1 /* Instruction scheduling pass.  Selective scheduler and pipeliner.
2    Copyright (C) 2006-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl-error.h"
25 #include "tm_p.h"
26 #include "hard-reg-set.h"
27 #include "regs.h"
28 #include "function.h"
29 #include "flags.h"
30 #include "insn-config.h"
31 #include "insn-attr.h"
32 #include "except.h"
33 #include "recog.h"
34 #include "params.h"
35 #include "target.h"
36 #include "output.h"
37 #include "sched-int.h"
38 #include "ggc.h"
39 #include "tree.h"
40 #include "vec.h"
41 #include "langhooks.h"
42 #include "rtlhooks-def.h"
43 #include "emit-rtl.h"
44 #include "ira.h"
45 
46 #ifdef INSN_SCHEDULING
47 #include "sel-sched-ir.h"
48 #include "sel-sched-dump.h"
49 #include "sel-sched.h"
50 #include "dbgcnt.h"
51 
52 /* Implementation of selective scheduling approach.
53    The below implementation follows the original approach with the following
54    changes:
55 
56    o the scheduler works after register allocation (but can be also tuned
57    to work before RA);
58    o some instructions are not copied or register renamed;
59    o conditional jumps are not moved with code duplication;
60    o several jumps in one parallel group are not supported;
61    o when pipelining outer loops, code motion through inner loops
62    is not supported;
63    o control and data speculation are supported;
64    o some improvements for better compile time/performance were made.
65 
66    Terminology
67    ===========
68 
69    A vinsn, or virtual insn, is an insn with additional data characterizing
70    insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
71    Vinsns also act as smart pointers to save memory by reusing them in
72    different expressions.  A vinsn is described by vinsn_t type.
73 
74    An expression is a vinsn with additional data characterizing its properties
75    at some point in the control flow graph.  The data may be its usefulness,
76    priority, speculative status, whether it was renamed/subsituted, etc.
77    An expression is described by expr_t type.
78 
79    Availability set (av_set) is a set of expressions at a given control flow
80    point. It is represented as av_set_t.  The expressions in av sets are kept
81    sorted in the terms of expr_greater_p function.  It allows to truncate
82    the set while leaving the best expressions.
83 
84    A fence is a point through which code motion is prohibited.  On each step,
85    we gather a parallel group of insns at a fence.  It is possible to have
86    multiple fences. A fence is represented via fence_t.
87 
88    A boundary is the border between the fence group and the rest of the code.
89    Currently, we never have more than one boundary per fence, as we finalize
90    the fence group when a jump is scheduled. A boundary is represented
91    via bnd_t.
92 
93    High-level overview
94    ===================
95 
96    The scheduler finds regions to schedule, schedules each one, and finalizes.
97    The regions are formed starting from innermost loops, so that when the inner
98    loop is pipelined, its prologue can be scheduled together with yet unprocessed
99    outer loop. The rest of acyclic regions are found using extend_rgns:
100    the blocks that are not yet allocated to any regions are traversed in top-down
101    order, and a block is added to a region to which all its predecessors belong;
102    otherwise, the block starts its own region.
103 
104    The main scheduling loop (sel_sched_region_2) consists of just
105    scheduling on each fence and updating fences.  For each fence,
106    we fill a parallel group of insns (fill_insns) until some insns can be added.
107    First, we compute available exprs (av-set) at the boundary of the current
108    group.  Second, we choose the best expression from it.  If the stall is
109    required to schedule any of the expressions, we advance the current cycle
110    appropriately.  So, the final group does not exactly correspond to a VLIW
111    word.  Third, we move the chosen expression to the boundary (move_op)
112    and update the intermediate av sets and liveness sets.  We quit fill_insns
113    when either no insns left for scheduling or we have scheduled enough insns
114    so we feel like advancing a scheduling point.
115 
116    Computing available expressions
117    ===============================
118 
119    The computation (compute_av_set) is a bottom-up traversal.  At each insn,
120    we're moving the union of its successors' sets through it via
121    moveup_expr_set.  The dependent expressions are removed.  Local
122    transformations (substitution, speculation) are applied to move more
123    exprs.  Then the expr corresponding to the current insn is added.
124    The result is saved on each basic block header.
125 
126    When traversing the CFG, we're moving down for no more than max_ws insns.
127    Also, we do not move down to ineligible successors (is_ineligible_successor),
128    which include moving along a back-edge, moving to already scheduled code,
129    and moving to another fence.  The first two restrictions are lifted during
130    pipelining, which allows us to move insns along a back-edge.  We always have
131    an acyclic region for scheduling because we forbid motion through fences.
132 
133    Choosing the best expression
134    ============================
135 
136    We sort the final availability set via sel_rank_for_schedule, then we remove
137    expressions which are not yet ready (tick_check_p) or which dest registers
138    cannot be used.  For some of them, we choose another register via
139    find_best_reg.  To do this, we run find_used_regs to calculate the set of
140    registers which cannot be used.  The find_used_regs function performs
141    a traversal of code motion paths for an expr.  We consider for renaming
142    only registers which are from the same regclass as the original one and
143    using which does not interfere with any live ranges.  Finally, we convert
144    the resulting set to the ready list format and use max_issue and reorder*
145    hooks similarly to the Haifa scheduler.
146 
147    Scheduling the best expression
148    ==============================
149 
150    We run the move_op routine to perform the same type of code motion paths
151    traversal as in find_used_regs.  (These are working via the same driver,
152    code_motion_path_driver.)  When moving down the CFG, we look for original
153    instruction that gave birth to a chosen expression.  We undo
154    the transformations performed on an expression via the history saved in it.
155    When found, we remove the instruction or leave a reg-reg copy/speculation
156    check if needed.  On a way up, we insert bookkeeping copies at each join
157    point.  If a copy is not needed, it will be removed later during this
158    traversal.  We update the saved av sets and liveness sets on the way up, too.
159 
160    Finalizing the schedule
161    =======================
162 
163    When pipelining, we reschedule the blocks from which insns were pipelined
164    to get a tighter schedule.  On Itanium, we also perform bundling via
165    the same routine from ia64.c.
166 
167    Dependence analysis changes
168    ===========================
169 
170    We augmented the sched-deps.c with hooks that get called when a particular
171    dependence is found in a particular part of an insn.  Using these hooks, we
172    can do several actions such as: determine whether an insn can be moved through
173    another (has_dependence_p, moveup_expr); find out whether an insn can be
174    scheduled on the current cycle (tick_check_p); find out registers that
175    are set/used/clobbered by an insn and find out all the strange stuff that
176    restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
177    init_global_and_expr_for_insn).
178 
179    Initialization changes
180    ======================
181 
182    There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
183    reused in all of the schedulers.  We have split up the initialization of data
184    of such parts into different functions prefixed with scheduler type and
185    postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
186    sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
187    The same splitting is done with current_sched_info structure:
188    dependence-related parts are in sched_deps_info, common part is in
189    common_sched_info, and haifa/sel/etc part is in current_sched_info.
190 
191    Target contexts
192    ===============
193 
194    As we now have multiple-point scheduling, this would not work with backends
195    which save some of the scheduler state to use it in the target hooks.
196    For this purpose, we introduce a concept of target contexts, which
197    encapsulate such information.  The backend should implement simple routines
198    of allocating/freeing/setting such a context.  The scheduler calls these
199    as target hooks and handles the target context as an opaque pointer (similar
200    to the DFA state type, state_t).
201 
202    Various speedups
203    ================
204 
205    As the correct data dependence graph is not supported during scheduling (which
206    is to be changed in mid-term), we cache as much of the dependence analysis
207    results as possible to avoid reanalyzing.  This includes: bitmap caches on
208    each insn in stream of the region saying yes/no for a query with a pair of
209    UIDs; hashtables with the previously done transformations on each insn in
210    stream; a vector keeping a history of transformations on each expr.
211 
212    Also, we try to minimize the dependence context used on each fence to check
213    whether the given expression is ready for scheduling by removing from it
214    insns that are definitely completed the execution.  The results of
215    tick_check_p checks are also cached in a vector on each fence.
216 
217    We keep a valid liveness set on each insn in a region to avoid the high
218    cost of recomputation on large basic blocks.
219 
220    Finally, we try to minimize the number of needed updates to the availability
221    sets.  The updates happen in two cases: when fill_insns terminates,
222    we advance all fences and increase the stage number to show that the region
223    has changed and the sets are to be recomputed; and when the next iteration
224    of a loop in fill_insns happens (but this one reuses the saved av sets
225    on bb headers.)  Thus, we try to break the fill_insns loop only when
226    "significant" number of insns from the current scheduling window was
227    scheduled.  This should be made a target param.
228 
229 
230    TODO: correctly support the data dependence graph at all stages and get rid
231    of all caches.  This should speed up the scheduler.
232    TODO: implement moving cond jumps with bookkeeping copies on both targets.
233    TODO: tune the scheduler before RA so it does not create too much pseudos.
234 
235 
236    References:
237    S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
238    selective scheduling and software pipelining.
239    ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
240 
241    Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
242    and Dmitry Zhurikhin.  An interblock VLIW-targeted instruction scheduler
243    for GCC. In Proceedings of GCC Developers' Summit 2006.
244 
245    Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik.  GCC Instruction
246    Scheduler and Software Pipeliner on the Itanium Platform.   EPIC-7 Workshop.
247    http://rogue.colorado.edu/EPIC7/.
248 
249 */
250 
251 /* True when pipelining is enabled.  */
252 bool pipelining_p;
253 
254 /* True if bookkeeping is enabled.  */
255 bool bookkeeping_p;
256 
257 /* Maximum number of insns that are eligible for renaming.  */
258 int max_insns_to_rename;
259 
260 
261 /* Definitions of local types and macros.  */
262 
263 /* Represents possible outcomes of moving an expression through an insn.  */
264 enum MOVEUP_EXPR_CODE
265   {
266     /* The expression is not changed.  */
267     MOVEUP_EXPR_SAME,
268 
269     /* Not changed, but requires a new destination register.  */
270     MOVEUP_EXPR_AS_RHS,
271 
272     /* Cannot be moved.  */
273     MOVEUP_EXPR_NULL,
274 
275     /* Changed (substituted or speculated).  */
276     MOVEUP_EXPR_CHANGED
277   };
278 
279 /* The container to be passed into rtx search & replace functions.  */
280 struct rtx_search_arg
281 {
282   /* What we are searching for.  */
283   rtx x;
284 
285   /* The occurrence counter.  */
286   int n;
287 };
288 
289 typedef struct rtx_search_arg *rtx_search_arg_p;
290 
291 /* This struct contains precomputed hard reg sets that are needed when
292    computing registers available for renaming.  */
293 struct hard_regs_data
294 {
295   /* For every mode, this stores registers available for use with
296      that mode.  */
297   HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
298 
299   /* True when regs_for_mode[mode] is initialized.  */
300   bool regs_for_mode_ok[NUM_MACHINE_MODES];
301 
302   /* For every register, it has regs that are ok to rename into it.
303      The register in question is always set.  If not, this means
304      that the whole set is not computed yet.  */
305   HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
306 
307   /* For every mode, this stores registers not available due to
308      call clobbering.  */
309   HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
310 
311   /* All registers that are used or call used.  */
312   HARD_REG_SET regs_ever_used;
313 
314 #ifdef STACK_REGS
315   /* Stack registers.  */
316   HARD_REG_SET stack_regs;
317 #endif
318 };
319 
320 /* Holds the results of computation of available for renaming and
321    unavailable hard registers.  */
322 struct reg_rename
323 {
324   /* These are unavailable due to calls crossing, globalness, etc.  */
325   HARD_REG_SET unavailable_hard_regs;
326 
327   /* These are *available* for renaming.  */
328   HARD_REG_SET available_for_renaming;
329 
330   /* Whether this code motion path crosses a call.  */
331   bool crosses_call;
332 };
333 
334 /* A global structure that contains the needed information about harg
335    regs.  */
336 static struct hard_regs_data sel_hrd;
337 
338 
339 /* This structure holds local data used in code_motion_path_driver hooks on
340    the same or adjacent levels of recursion.  Here we keep those parameters
341    that are not used in code_motion_path_driver routine itself, but only in
342    its hooks.  Moreover, all parameters that can be modified in hooks are
343    in this structure, so all other parameters passed explicitly to hooks are
344    read-only.  */
345 struct cmpd_local_params
346 {
347   /* Local params used in move_op_* functions.  */
348 
349   /* Edges for bookkeeping generation.  */
350   edge e1, e2;
351 
352   /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
353   expr_t c_expr_merged, c_expr_local;
354 
355   /* Local params used in fur_* functions.  */
356   /* Copy of the ORIGINAL_INSN list, stores the original insns already
357      found before entering the current level of code_motion_path_driver.  */
358   def_list_t old_original_insns;
359 
360   /* Local params used in move_op_* functions.  */
361   /* True when we have removed last insn in the block which was
362      also a boundary.  Do not update anything or create bookkeeping copies.  */
363   BOOL_BITFIELD removed_last_insn : 1;
364 };
365 
366 /* Stores the static parameters for move_op_* calls.  */
367 struct moveop_static_params
368 {
369   /* Destination register.  */
370   rtx dest;
371 
372   /* Current C_EXPR.  */
373   expr_t c_expr;
374 
375   /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
376      they are to be removed.  */
377   int uid;
378 
379 #ifdef ENABLE_CHECKING
380   /* This is initialized to the insn on which the driver stopped its traversal.  */
381   insn_t failed_insn;
382 #endif
383 
384   /* True if we scheduled an insn with different register.  */
385   bool was_renamed;
386 };
387 
388 /* Stores the static parameters for fur_* calls.  */
389 struct fur_static_params
390 {
391   /* Set of registers unavailable on the code motion path.  */
392   regset used_regs;
393 
394   /* Pointer to the list of original insns definitions.  */
395   def_list_t *original_insns;
396 
397   /* True if a code motion path contains a CALL insn.  */
398   bool crosses_call;
399 };
400 
401 typedef struct fur_static_params *fur_static_params_p;
402 typedef struct cmpd_local_params *cmpd_local_params_p;
403 typedef struct moveop_static_params *moveop_static_params_p;
404 
405 /* Set of hooks and parameters that determine behaviour specific to
406    move_op or find_used_regs functions.  */
407 struct code_motion_path_driver_info_def
408 {
409   /* Called on enter to the basic block.  */
410   int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool);
411 
412   /* Called when original expr is found.  */
413   void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *);
414 
415   /* Called while descending current basic block if current insn is not
416      the original EXPR we're searching for.  */
417   bool (*orig_expr_not_found) (insn_t, av_set_t, void *);
418 
419   /* Function to merge C_EXPRes from different successors.  */
420   void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *);
421 
422   /* Function to finalize merge from different successors and possibly
423      deallocate temporary data structures used for merging.  */
424   void (*after_merge_succs) (cmpd_local_params_p, void *);
425 
426   /* Called on the backward stage of recursion to do moveup_expr.
427      Used only with move_op_*.  */
428   void (*ascend) (insn_t, void *);
429 
430   /* Called on the ascending pass, before returning from the current basic
431      block or from the whole traversal.  */
432   void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
433 
434   /* When processing successors in move_op we need only descend into
435      SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL.  */
436   int succ_flags;
437 
438   /* The routine name to print in dumps ("move_op" of "find_used_regs").  */
439   const char *routine_name;
440 };
441 
442 /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or
443    FUR_HOOKS.  */
444 struct code_motion_path_driver_info_def *code_motion_path_driver_info;
445 
446 /* Set of hooks for performing move_op and find_used_regs routines with
447    code_motion_path_driver.  */
448 extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
449 
450 /* True if/when we want to emulate Haifa scheduler in the common code.
451    This is used in sched_rgn_local_init and in various places in
452    sched-deps.c.  */
453 int sched_emulate_haifa_p;
454 
455 /* GLOBAL_LEVEL is used to discard information stored in basic block headers
456    av_sets.  Av_set of bb header is valid if its (bb header's) level is equal
457    to GLOBAL_LEVEL.  And invalid if lesser.  This is primarily used to advance
458    scheduling window.  */
459 int global_level;
460 
461 /* Current fences.  */
462 flist_t fences;
463 
464 /* True when separable insns should be scheduled as RHSes.  */
465 static bool enable_schedule_as_rhs_p;
466 
467 /* Used in verify_target_availability to assert that target reg is reported
468    unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
469    we haven't scheduled anything on the previous fence.
470    if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
471    have more conservative value than the one returned by the
472    find_used_regs, thus we shouldn't assert that these values are equal.  */
473 static bool scheduled_something_on_previous_fence;
474 
475 /* All newly emitted insns will have their uids greater than this value.  */
476 static int first_emitted_uid;
477 
478 /* Set of basic blocks that are forced to start new ebbs.  This is a subset
479    of all the ebb heads.  */
480 static bitmap_head _forced_ebb_heads;
481 bitmap_head *forced_ebb_heads = &_forced_ebb_heads;
482 
483 /* Blocks that need to be rescheduled after pipelining.  */
484 bitmap blocks_to_reschedule = NULL;
485 
486 /* True when the first lv set should be ignored when updating liveness.  */
487 static bool ignore_first = false;
488 
489 /* Number of insns max_issue has initialized data structures for.  */
490 static int max_issue_size = 0;
491 
492 /* Whether we can issue more instructions.  */
493 static int can_issue_more;
494 
495 /* Maximum software lookahead window size, reduced when rescheduling after
496    pipelining.  */
497 static int max_ws;
498 
499 /* Number of insns scheduled in current region.  */
500 static int num_insns_scheduled;
501 
502 /* A vector of expressions is used to be able to sort them.  */
503 static vec<expr_t> vec_av_set = vNULL;
504 
505 /* A vector of vinsns is used to hold temporary lists of vinsns.  */
506 typedef vec<vinsn_t> vinsn_vec_t;
507 
508 /* This vector has the exprs which may still present in av_sets, but actually
509    can't be moved up due to bookkeeping created during code motion to another
510    fence.  See comment near the call to update_and_record_unavailable_insns
511    for the detailed explanations.  */
512 static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t();
513 
514 /* This vector has vinsns which are scheduled with renaming on the first fence
515    and then seen on the second.  For expressions with such vinsns, target
516    availability information may be wrong.  */
517 static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t();
518 
519 /* Vector to store temporary nops inserted in move_op to prevent removal
520    of empty bbs.  */
521 static vec<insn_t> vec_temp_moveop_nops = vNULL;
522 
523 /* These bitmaps record original instructions scheduled on the current
524    iteration and bookkeeping copies created by them.  */
525 static bitmap current_originators = NULL;
526 static bitmap current_copies = NULL;
527 
528 /* This bitmap marks the blocks visited by code_motion_path_driver so we don't
529    visit them afterwards.  */
530 static bitmap code_motion_visited_blocks = NULL;
531 
532 /* Variables to accumulate different statistics.  */
533 
534 /* The number of bookkeeping copies created.  */
535 static int stat_bookkeeping_copies;
536 
537 /* The number of insns that required bookkeeiping for their scheduling.  */
538 static int stat_insns_needed_bookkeeping;
539 
540 /* The number of insns that got renamed.  */
541 static int stat_renamed_scheduled;
542 
543 /* The number of substitutions made during scheduling.  */
544 static int stat_substitutions_total;
545 
546 
547 /* Forward declarations of static functions.  */
548 static bool rtx_ok_for_substitution_p (rtx, rtx);
549 static int sel_rank_for_schedule (const void *, const void *);
550 static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
551 static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
552 
553 static rtx get_dest_from_orig_ops (av_set_t);
554 static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
555 static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
556                             def_list_t *);
557 static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
558 static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
559                                     cmpd_local_params_p, void *);
560 static void sel_sched_region_1 (void);
561 static void sel_sched_region_2 (int);
562 static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
563 
564 static void debug_state (state_t);
565 
566 
567 /* Functions that work with fences.  */
568 
569 /* Advance one cycle on FENCE.  */
570 static void
advance_one_cycle(fence_t fence)571 advance_one_cycle (fence_t fence)
572 {
573   unsigned i;
574   int cycle;
575   rtx insn;
576 
577   advance_state (FENCE_STATE (fence));
578   cycle = ++FENCE_CYCLE (fence);
579   FENCE_ISSUED_INSNS (fence) = 0;
580   FENCE_STARTS_CYCLE_P (fence) = 1;
581   can_issue_more = issue_rate;
582   FENCE_ISSUE_MORE (fence) = can_issue_more;
583 
584   for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); )
585     {
586       if (INSN_READY_CYCLE (insn) < cycle)
587         {
588           remove_from_deps (FENCE_DC (fence), insn);
589           FENCE_EXECUTING_INSNS (fence)->unordered_remove (i);
590           continue;
591         }
592       i++;
593     }
594   if (sched_verbose >= 2)
595     {
596       sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
597       debug_state (FENCE_STATE (fence));
598     }
599 }
600 
601 /* Returns true when SUCC in a fallthru bb of INSN, possibly
602    skipping empty basic blocks.  */
603 static bool
in_fallthru_bb_p(rtx insn,rtx succ)604 in_fallthru_bb_p (rtx insn, rtx succ)
605 {
606   basic_block bb = BLOCK_FOR_INSN (insn);
607   edge e;
608 
609   if (bb == BLOCK_FOR_INSN (succ))
610     return true;
611 
612   e = find_fallthru_edge_from (bb);
613   if (e)
614     bb = e->dest;
615   else
616     return false;
617 
618   while (sel_bb_empty_p (bb))
619     bb = bb->next_bb;
620 
621   return bb == BLOCK_FOR_INSN (succ);
622 }
623 
624 /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
625    When a successor will continue a ebb, transfer all parameters of a fence
626    to the new fence.  ORIG_MAX_SEQNO is the maximal seqno before this round
627    of scheduling helping to distinguish between the old and the new code.  */
628 static void
extract_new_fences_from(flist_t old_fences,flist_tail_t new_fences,int orig_max_seqno)629 extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
630 			 int orig_max_seqno)
631 {
632   bool was_here_p = false;
633   insn_t insn = NULL_RTX;
634   insn_t succ;
635   succ_iterator si;
636   ilist_iterator ii;
637   fence_t fence = FLIST_FENCE (old_fences);
638   basic_block bb;
639 
640   /* Get the only element of FENCE_BNDS (fence).  */
641   FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))
642     {
643       gcc_assert (!was_here_p);
644       was_here_p = true;
645     }
646   gcc_assert (was_here_p && insn != NULL_RTX);
647 
648   /* When in the "middle" of the block, just move this fence
649      to the new list.  */
650   bb = BLOCK_FOR_INSN (insn);
651   if (! sel_bb_end_p (insn)
652       || (single_succ_p (bb)
653           && single_pred_p (single_succ (bb))))
654     {
655       insn_t succ;
656 
657       succ = (sel_bb_end_p (insn)
658               ? sel_bb_head (single_succ (bb))
659               : NEXT_INSN (insn));
660 
661       if (INSN_SEQNO (succ) > 0
662           && INSN_SEQNO (succ) <= orig_max_seqno
663           && INSN_SCHED_TIMES (succ) <= 0)
664         {
665           FENCE_INSN (fence) = succ;
666           move_fence_to_fences (old_fences, new_fences);
667 
668           if (sched_verbose >= 1)
669             sel_print ("Fence %d continues as %d[%d] (state continue)\n",
670                        INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
671         }
672       return;
673     }
674 
675   /* Otherwise copy fence's structures to (possibly) multiple successors.  */
676   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
677     {
678       int seqno = INSN_SEQNO (succ);
679 
680       if (0 < seqno && seqno <= orig_max_seqno
681           && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
682         {
683           bool b = (in_same_ebb_p (insn, succ)
684                     || in_fallthru_bb_p (insn, succ));
685 
686           if (sched_verbose >= 1)
687             sel_print ("Fence %d continues as %d[%d] (state %s)\n",
688                        INSN_UID (insn), INSN_UID (succ),
689                        BLOCK_NUM (succ), b ? "continue" : "reset");
690 
691           if (b)
692             add_dirty_fence_to_fences (new_fences, succ, fence);
693           else
694             {
695               /* Mark block of the SUCC as head of the new ebb.  */
696               bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ));
697               add_clean_fence_to_fences (new_fences, succ, fence);
698             }
699         }
700     }
701 }
702 
703 
704 /* Functions to support substitution.  */
705 
706 /* Returns whether INSN with dependence status DS is eligible for
707    substitution, i.e. it's a copy operation x := y, and RHS that is
708    moved up through this insn should be substituted.  */
709 static bool
can_substitute_through_p(insn_t insn,ds_t ds)710 can_substitute_through_p (insn_t insn, ds_t ds)
711 {
712   /* We can substitute only true dependencies.  */
713   if ((ds & DEP_OUTPUT)
714       || (ds & DEP_ANTI)
715       || ! INSN_RHS (insn)
716       || ! INSN_LHS (insn))
717     return false;
718 
719   /* Now we just need to make sure the INSN_RHS consists of only one
720      simple REG rtx.  */
721   if (REG_P (INSN_LHS (insn))
722       && REG_P (INSN_RHS (insn)))
723     return true;
724   return false;
725 }
726 
727 /* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's
728    source (if INSN is eligible for substitution).  Returns TRUE if
729    substitution was actually performed, FALSE otherwise.  Substitution might
730    be not performed because it's either EXPR' vinsn doesn't contain INSN's
731    destination or the resulting insn is invalid for the target machine.
732    When UNDO is true, perform unsubstitution instead (the difference is in
733    the part of rtx on which validate_replace_rtx is called).  */
734 static bool
substitute_reg_in_expr(expr_t expr,insn_t insn,bool undo)735 substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
736 {
737   rtx *where;
738   bool new_insn_valid;
739   vinsn_t *vi = &EXPR_VINSN (expr);
740   bool has_rhs = VINSN_RHS (*vi) != NULL;
741   rtx old, new_rtx;
742 
743   /* Do not try to replace in SET_DEST.  Although we'll choose new
744      register for the RHS, we don't want to change RHS' original reg.
745      If the insn is not SET, we may still be able to substitute something
746      in it, and if we're here (don't have deps), it doesn't write INSN's
747      dest.  */
748   where = (has_rhs
749 	   ? &VINSN_RHS (*vi)
750 	   : &PATTERN (VINSN_INSN_RTX (*vi)));
751   old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
752 
753   /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
754   if (rtx_ok_for_substitution_p (old, *where))
755     {
756       rtx new_insn;
757       rtx *where_replace;
758 
759       /* We should copy these rtxes before substitution.  */
760       new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
761       new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
762 
763       /* Where we'll replace.
764          WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
765 	 used instead of SET_SRC.  */
766       where_replace = (has_rhs
767 		       ? &SET_SRC (PATTERN (new_insn))
768 		       : &PATTERN (new_insn));
769 
770       new_insn_valid
771         = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
772                                                 new_insn);
773 
774       /* ??? Actually, constrain_operands result depends upon choice of
775          destination register.  E.g. if we allow single register to be an rhs,
776 	 and if we try to move dx=ax(as rhs) through ax=dx, we'll result
777 	 in invalid insn dx=dx, so we'll loose this rhs here.
778 	 Just can't come up with significant testcase for this, so just
779 	 leaving it for now.  */
780       if (new_insn_valid)
781 	{
782 	  change_vinsn_in_expr (expr,
783 				create_vinsn_from_insn_rtx (new_insn, false));
784 
785 	  /* Do not allow clobbering the address register of speculative
786              insns.  */
787 	  if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
788               && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
789 					 expr_dest_reg (expr)))
790 	    EXPR_TARGET_AVAILABLE (expr) = false;
791 
792 	  return true;
793 	}
794       else
795         return false;
796     }
797   else
798     return false;
799 }
800 
801 /* Helper function for count_occurences_equiv.  */
802 static int
count_occurrences_1(rtx * cur_rtx,void * arg)803 count_occurrences_1 (rtx *cur_rtx, void *arg)
804 {
805   rtx_search_arg_p p = (rtx_search_arg_p) arg;
806 
807   if (REG_P (*cur_rtx) && REGNO (*cur_rtx) == REGNO (p->x))
808     {
809       /* Bail out if mode is different or more than one register is used.  */
810       if (GET_MODE (*cur_rtx) != GET_MODE (p->x)
811           || (HARD_REGISTER_P (*cur_rtx)
812 	      && hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1))
813         {
814           p->n = 0;
815           return 1;
816         }
817 
818       p->n++;
819 
820       /* Do not traverse subexprs.  */
821       return -1;
822     }
823 
824   if (GET_CODE (*cur_rtx) == SUBREG
825       && (!REG_P (SUBREG_REG (*cur_rtx))
826 	  || REGNO (SUBREG_REG (*cur_rtx)) == REGNO (p->x)))
827     {
828       /* ??? Do not support substituting regs inside subregs.  In that case,
829          simplify_subreg will be called by validate_replace_rtx, and
830          unsubstitution will fail later.  */
831       p->n = 0;
832       return 1;
833     }
834 
835   /* Continue search.  */
836   return 0;
837 }
838 
839 /* Return the number of places WHAT appears within WHERE.
840    Bail out when we found a reference occupying several hard registers.  */
841 static int
count_occurrences_equiv(rtx what,rtx where)842 count_occurrences_equiv (rtx what, rtx where)
843 {
844   struct rtx_search_arg arg;
845 
846   gcc_assert (REG_P (what));
847   arg.x = what;
848   arg.n = 0;
849 
850   for_each_rtx (&where, &count_occurrences_1, (void *) &arg);
851 
852   return arg.n;
853 }
854 
855 /* Returns TRUE if WHAT is found in WHERE rtx tree.  */
856 static bool
rtx_ok_for_substitution_p(rtx what,rtx where)857 rtx_ok_for_substitution_p (rtx what, rtx where)
858 {
859   return (count_occurrences_equiv (what, where) > 0);
860 }
861 
862 
863 /* Functions to support register renaming.  */
864 
865 /* Substitute VI's set source with REGNO.  Returns newly created pattern
866    that has REGNO as its source.  */
867 static rtx
create_insn_rtx_with_rhs(vinsn_t vi,rtx rhs_rtx)868 create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
869 {
870   rtx lhs_rtx;
871   rtx pattern;
872   rtx insn_rtx;
873 
874   lhs_rtx = copy_rtx (VINSN_LHS (vi));
875 
876   pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
877   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
878 
879   return insn_rtx;
880 }
881 
882 /* Returns whether INSN's src can be replaced with register number
883    NEW_SRC_REG. E.g. the following insn is valid for i386:
884 
885     (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
886       (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
887 			(reg:SI 0 ax [orig:770 c1 ] [770]))
888 		    (const_int 288 [0x120])) [0 str S1 A8])
889 	    (const_int 0 [0x0])) 43 {*movqi_1} (nil)
890 	(nil))
891 
892   But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
893   because of operand constraints:
894 
895     (define_insn "*movqi_1"
896       [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
897 	    (match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn")
898 	    )]
899 
900   So do constrain_operands here, before choosing NEW_SRC_REG as best
901   reg for rhs.  */
902 
903 static bool
replace_src_with_reg_ok_p(insn_t insn,rtx new_src_reg)904 replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg)
905 {
906   vinsn_t vi = INSN_VINSN (insn);
907   enum machine_mode mode;
908   rtx dst_loc;
909   bool res;
910 
911   gcc_assert (VINSN_SEPARABLE_P (vi));
912 
913   get_dest_and_mode (insn, &dst_loc, &mode);
914   gcc_assert (mode == GET_MODE (new_src_reg));
915 
916   if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc))
917     return true;
918 
919   /* See whether SET_SRC can be replaced with this register.  */
920   validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1);
921   res = verify_changes (0);
922   cancel_changes (0);
923 
924   return res;
925 }
926 
927 /* Returns whether INSN still be valid after replacing it's DEST with
928    register NEW_REG.  */
929 static bool
replace_dest_with_reg_ok_p(insn_t insn,rtx new_reg)930 replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg)
931 {
932   vinsn_t vi = INSN_VINSN (insn);
933   bool res;
934 
935   /* We should deal here only with separable insns.  */
936   gcc_assert (VINSN_SEPARABLE_P (vi));
937   gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg));
938 
939   /* See whether SET_DEST can be replaced with this register.  */
940   validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1);
941   res = verify_changes (0);
942   cancel_changes (0);
943 
944   return res;
945 }
946 
947 /* Create a pattern with rhs of VI and lhs of LHS_RTX.  */
948 static rtx
create_insn_rtx_with_lhs(vinsn_t vi,rtx lhs_rtx)949 create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
950 {
951   rtx rhs_rtx;
952   rtx pattern;
953   rtx insn_rtx;
954 
955   rhs_rtx = copy_rtx (VINSN_RHS (vi));
956 
957   pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
958   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
959 
960   return insn_rtx;
961 }
962 
963 /* Substitute lhs in the given expression EXPR for the register with number
964    NEW_REGNO.  SET_DEST may be arbitrary rtx, not only register.  */
965 static void
replace_dest_with_reg_in_expr(expr_t expr,rtx new_reg)966 replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
967 {
968   rtx insn_rtx;
969   vinsn_t vinsn;
970 
971   insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
972   vinsn = create_vinsn_from_insn_rtx (insn_rtx, false);
973 
974   change_vinsn_in_expr (expr, vinsn);
975   EXPR_WAS_RENAMED (expr) = 1;
976   EXPR_TARGET_AVAILABLE (expr) = 1;
977 }
978 
979 /* Returns whether VI writes either one of the USED_REGS registers or,
980    if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers.  */
981 static bool
vinsn_writes_one_of_regs_p(vinsn_t vi,regset used_regs,HARD_REG_SET unavailable_hard_regs)982 vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
983                             HARD_REG_SET unavailable_hard_regs)
984 {
985   unsigned regno;
986   reg_set_iterator rsi;
987 
988   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)
989     {
990       if (REGNO_REG_SET_P (used_regs, regno))
991         return true;
992       if (HARD_REGISTER_NUM_P (regno)
993           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
994 	return true;
995     }
996 
997   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)
998     {
999       if (REGNO_REG_SET_P (used_regs, regno))
1000         return true;
1001       if (HARD_REGISTER_NUM_P (regno)
1002           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
1003 	return true;
1004     }
1005 
1006   return false;
1007 }
1008 
1009 /* Returns register class of the output register in INSN.
1010    Returns NO_REGS for call insns because some targets have constraints on
1011    destination register of a call insn.
1012 
1013    Code adopted from regrename.c::build_def_use.  */
1014 static enum reg_class
get_reg_class(rtx insn)1015 get_reg_class (rtx insn)
1016 {
1017   int alt, i, n_ops;
1018 
1019   extract_insn (insn);
1020   if (! constrain_operands (1))
1021     fatal_insn_not_found (insn);
1022   preprocess_constraints ();
1023   alt = which_alternative;
1024   n_ops = recog_data.n_operands;
1025 
1026   for (i = 0; i < n_ops; ++i)
1027     {
1028       int matches = recog_op_alt[i][alt].matches;
1029       if (matches >= 0)
1030 	recog_op_alt[i][alt].cl = recog_op_alt[matches][alt].cl;
1031     }
1032 
1033   if (asm_noperands (PATTERN (insn)) > 0)
1034     {
1035       for (i = 0; i < n_ops; i++)
1036 	if (recog_data.operand_type[i] == OP_OUT)
1037 	  {
1038 	    rtx *loc = recog_data.operand_loc[i];
1039 	    rtx op = *loc;
1040 	    enum reg_class cl = recog_op_alt[i][alt].cl;
1041 
1042 	    if (REG_P (op)
1043 		&& REGNO (op) == ORIGINAL_REGNO (op))
1044 	      continue;
1045 
1046 	    return cl;
1047 	  }
1048     }
1049   else if (!CALL_P (insn))
1050     {
1051       for (i = 0; i < n_ops + recog_data.n_dups; i++)
1052        {
1053 	 int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
1054 	 enum reg_class cl = recog_op_alt[opn][alt].cl;
1055 
1056 	 if (recog_data.operand_type[opn] == OP_OUT ||
1057 	     recog_data.operand_type[opn] == OP_INOUT)
1058 	   return cl;
1059        }
1060     }
1061 
1062 /*  Insns like
1063     (insn (set (reg:CCZ 17 flags) (compare:CCZ ...)))
1064     may result in returning NO_REGS, cause flags is written implicitly through
1065     CMP insn, which has no OP_OUT | OP_INOUT operands.  */
1066   return NO_REGS;
1067 }
1068 
1069 #ifdef HARD_REGNO_RENAME_OK
1070 /* Calculate HARD_REGNO_RENAME_OK data for REGNO.  */
1071 static void
init_hard_regno_rename(int regno)1072 init_hard_regno_rename (int regno)
1073 {
1074   int cur_reg;
1075 
1076   SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno);
1077 
1078   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1079     {
1080       /* We are not interested in renaming in other regs.  */
1081       if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg))
1082         continue;
1083 
1084       if (HARD_REGNO_RENAME_OK (regno, cur_reg))
1085         SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg);
1086     }
1087 }
1088 #endif
1089 
1090 /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
1091    data first.  */
1092 static inline bool
sel_hard_regno_rename_ok(int from ATTRIBUTE_UNUSED,int to ATTRIBUTE_UNUSED)1093 sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
1094 {
1095 #ifdef HARD_REGNO_RENAME_OK
1096   /* Check whether this is all calculated.  */
1097   if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from))
1098     return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1099 
1100   init_hard_regno_rename (from);
1101 
1102   return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1103 #else
1104   return true;
1105 #endif
1106 }
1107 
1108 /* Calculate set of registers that are capable of holding MODE.  */
1109 static void
init_regs_for_mode(enum machine_mode mode)1110 init_regs_for_mode (enum machine_mode mode)
1111 {
1112   int cur_reg;
1113 
1114   CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
1115   CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
1116 
1117   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1118     {
1119       int nregs = hard_regno_nregs[cur_reg][mode];
1120       int i;
1121 
1122       for (i = nregs - 1; i >= 0; --i)
1123         if (fixed_regs[cur_reg + i]
1124                 || global_regs[cur_reg + i]
1125             /* Can't use regs which aren't saved by
1126                the prologue.  */
1127             || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
1128 	    /* Can't use regs with non-null REG_BASE_VALUE, because adjusting
1129 	       it affects aliasing globally and invalidates all AV sets.  */
1130 	    || get_reg_base_value (cur_reg + i)
1131 #ifdef LEAF_REGISTERS
1132             /* We can't use a non-leaf register if we're in a
1133                leaf function.  */
1134             || (crtl->is_leaf
1135                 && !LEAF_REGISTERS[cur_reg + i])
1136 #endif
1137             )
1138           break;
1139 
1140       if (i >= 0)
1141         continue;
1142 
1143       /* See whether it accepts all modes that occur in
1144          original insns.  */
1145       if (! HARD_REGNO_MODE_OK (cur_reg, mode))
1146         continue;
1147 
1148       if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode))
1149         SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
1150                           cur_reg);
1151 
1152       /* If the CUR_REG passed all the checks above,
1153          then it's ok.  */
1154       SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
1155     }
1156 
1157   sel_hrd.regs_for_mode_ok[mode] = true;
1158 }
1159 
1160 /* Init all register sets gathered in HRD.  */
1161 static void
init_hard_regs_data(void)1162 init_hard_regs_data (void)
1163 {
1164   int cur_reg = 0;
1165   int cur_mode = 0;
1166 
1167   CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
1168   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1169     if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
1170       SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
1171 
1172   /* Initialize registers that are valid based on mode when this is
1173      really needed.  */
1174   for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
1175     sel_hrd.regs_for_mode_ok[cur_mode] = false;
1176 
1177   /* Mark that all HARD_REGNO_RENAME_OK is not calculated.  */
1178   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1179     CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
1180 
1181 #ifdef STACK_REGS
1182   CLEAR_HARD_REG_SET (sel_hrd.stack_regs);
1183 
1184   for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
1185     SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
1186 #endif
1187 }
1188 
1189 /* Mark hardware regs in REG_RENAME_P that are not suitable
1190    for renaming rhs in INSN due to hardware restrictions (register class,
1191    modes compatibility etc).  This doesn't affect original insn's dest reg,
1192    if it isn't in USED_REGS.  DEF is a definition insn of rhs for which the
1193    destination register is sought.  LHS (DEF->ORIG_INSN) may be REG or MEM.
1194    Registers that are in used_regs are always marked in
1195    unavailable_hard_regs as well.  */
1196 
1197 static void
mark_unavailable_hard_regs(def_t def,struct reg_rename * reg_rename_p,regset used_regs ATTRIBUTE_UNUSED)1198 mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
1199                             regset used_regs ATTRIBUTE_UNUSED)
1200 {
1201   enum machine_mode mode;
1202   enum reg_class cl = NO_REGS;
1203   rtx orig_dest;
1204   unsigned cur_reg, regno;
1205   hard_reg_set_iterator hrsi;
1206 
1207   gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET);
1208   gcc_assert (reg_rename_p);
1209 
1210   orig_dest = SET_DEST (PATTERN (def->orig_insn));
1211 
1212   /* We have decided not to rename 'mem = something;' insns, as 'something'
1213      is usually a register.  */
1214   if (!REG_P (orig_dest))
1215     return;
1216 
1217   regno = REGNO (orig_dest);
1218 
1219   /* If before reload, don't try to work with pseudos.  */
1220   if (!reload_completed && !HARD_REGISTER_NUM_P (regno))
1221     return;
1222 
1223   if (reload_completed)
1224     cl = get_reg_class (def->orig_insn);
1225 
1226   /* Stop if the original register is one of the fixed_regs, global_regs or
1227      frame pointer, or we could not discover its class.  */
1228   if (fixed_regs[regno]
1229       || global_regs[regno]
1230 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1231       || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM)
1232 #else
1233       || (frame_pointer_needed && regno == FRAME_POINTER_REGNUM)
1234 #endif
1235       || (reload_completed && cl == NO_REGS))
1236     {
1237       SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
1238 
1239       /* Give a chance for original register, if it isn't in used_regs.  */
1240       if (!def->crosses_call)
1241         CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
1242 
1243       return;
1244     }
1245 
1246   /* If something allocated on stack in this function, mark frame pointer
1247      register unavailable, considering also modes.
1248      FIXME: it is enough to do this once per all original defs.  */
1249   if (frame_pointer_needed)
1250     {
1251       add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1252 			   Pmode, FRAME_POINTER_REGNUM);
1253 
1254       if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
1255         add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1256 			     Pmode, HARD_FRAME_POINTER_REGNUM);
1257     }
1258 
1259 #ifdef STACK_REGS
1260   /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
1261      is equivalent to as if all stack regs were in this set.
1262      I.e. no stack register can be renamed, and even if it's an original
1263      register here we make sure it won't be lifted over it's previous def
1264      (it's previous def will appear as if it's a FIRST_STACK_REG def.
1265      The HARD_REGNO_RENAME_OK covers other cases in condition below.  */
1266   if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
1267       && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
1268     IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1269                       sel_hrd.stack_regs);
1270 #endif
1271 
1272   /* If there's a call on this path, make regs from call_used_reg_set
1273      unavailable.  */
1274   if (def->crosses_call)
1275     IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1276                       call_used_reg_set);
1277 
1278   /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
1279      but not register classes.  */
1280   if (!reload_completed)
1281     return;
1282 
1283   /* Leave regs as 'available' only from the current
1284      register class.  */
1285   COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
1286                      reg_class_contents[cl]);
1287 
1288   mode = GET_MODE (orig_dest);
1289 
1290   /* Leave only registers available for this mode.  */
1291   if (!sel_hrd.regs_for_mode_ok[mode])
1292     init_regs_for_mode (mode);
1293   AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
1294                     sel_hrd.regs_for_mode[mode]);
1295 
1296   /* Exclude registers that are partially call clobbered.  */
1297   if (def->crosses_call
1298       && ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
1299     AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1300                             sel_hrd.regs_for_call_clobbered[mode]);
1301 
1302   /* Leave only those that are ok to rename.  */
1303   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1304                                   0, cur_reg, hrsi)
1305     {
1306       int nregs;
1307       int i;
1308 
1309       nregs = hard_regno_nregs[cur_reg][mode];
1310       gcc_assert (nregs > 0);
1311 
1312       for (i = nregs - 1; i >= 0; --i)
1313         if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
1314           break;
1315 
1316       if (i >= 0)
1317         CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1318                             cur_reg);
1319     }
1320 
1321   AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1322                           reg_rename_p->unavailable_hard_regs);
1323 
1324   /* Regno is always ok from the renaming part of view, but it really
1325      could be in *unavailable_hard_regs already, so set it here instead
1326      of there.  */
1327   SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno);
1328 }
1329 
1330 /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the
1331    best register more recently than REG2.  */
1332 static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
1333 
1334 /* Indicates the number of times renaming happened before the current one.  */
1335 static int reg_rename_this_tick;
1336 
1337 /* Choose the register among free, that is suitable for storing
1338    the rhs value.
1339 
1340    ORIGINAL_INSNS is the list of insns where the operation (rhs)
1341    originally appears.  There could be multiple original operations
1342    for single rhs since we moving it up and merging along different
1343    paths.
1344 
1345    Some code is adapted from regrename.c (regrename_optimize).
1346    If original register is available, function returns it.
1347    Otherwise it performs the checks, so the new register should
1348    comply with the following:
1349     - it should not violate any live ranges (such registers are in
1350       REG_RENAME_P->available_for_renaming set);
1351     - it should not be in the HARD_REGS_USED regset;
1352     - it should be in the class compatible with original uses;
1353     - it should not be clobbered through reference with different mode;
1354     - if we're in the leaf function, then the new register should
1355       not be in the LEAF_REGISTERS;
1356     - etc.
1357 
1358    If several registers meet the conditions, the register with smallest
1359    tick is returned to achieve more even register allocation.
1360 
1361    If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true.
1362 
1363    If no register satisfies the above conditions, NULL_RTX is returned.  */
1364 static rtx
choose_best_reg_1(HARD_REG_SET hard_regs_used,struct reg_rename * reg_rename_p,def_list_t original_insns,bool * is_orig_reg_p_ptr)1365 choose_best_reg_1 (HARD_REG_SET hard_regs_used,
1366                    struct reg_rename *reg_rename_p,
1367                    def_list_t original_insns, bool *is_orig_reg_p_ptr)
1368 {
1369   int best_new_reg;
1370   unsigned cur_reg;
1371   enum machine_mode mode = VOIDmode;
1372   unsigned regno, i, n;
1373   hard_reg_set_iterator hrsi;
1374   def_list_iterator di;
1375   def_t def;
1376 
1377   /* If original register is available, return it.  */
1378   *is_orig_reg_p_ptr = true;
1379 
1380   FOR_EACH_DEF (def, di, original_insns)
1381     {
1382       rtx orig_dest = SET_DEST (PATTERN (def->orig_insn));
1383 
1384       gcc_assert (REG_P (orig_dest));
1385 
1386       /* Check that all original operations have the same mode.
1387          This is done for the next loop; if we'd return from this
1388          loop, we'd check only part of them, but in this case
1389          it doesn't matter.  */
1390       if (mode == VOIDmode)
1391         mode = GET_MODE (orig_dest);
1392       gcc_assert (mode == GET_MODE (orig_dest));
1393 
1394       regno = REGNO (orig_dest);
1395       for (i = 0, n = hard_regno_nregs[regno][mode]; i < n; i++)
1396         if (TEST_HARD_REG_BIT (hard_regs_used, regno + i))
1397           break;
1398 
1399       /* All hard registers are available.  */
1400       if (i == n)
1401         {
1402           gcc_assert (mode != VOIDmode);
1403 
1404           /* Hard registers should not be shared.  */
1405           return gen_rtx_REG (mode, regno);
1406         }
1407     }
1408 
1409   *is_orig_reg_p_ptr = false;
1410   best_new_reg = -1;
1411 
1412   /* Among all available regs choose the register that was
1413      allocated earliest.  */
1414   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1415                                   0, cur_reg, hrsi)
1416     if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
1417       {
1418 	/* Check that all hard regs for mode are available.  */
1419 	for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++)
1420 	  if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
1421 	      || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1422 				     cur_reg + i))
1423 	    break;
1424 
1425 	if (i < n)
1426 	  continue;
1427 
1428         /* All hard registers are available.  */
1429         if (best_new_reg < 0
1430             || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
1431           {
1432             best_new_reg = cur_reg;
1433 
1434             /* Return immediately when we know there's no better reg.  */
1435             if (! reg_rename_tick[best_new_reg])
1436               break;
1437           }
1438       }
1439 
1440   if (best_new_reg >= 0)
1441     {
1442       /* Use the check from the above loop.  */
1443       gcc_assert (mode != VOIDmode);
1444       return gen_rtx_REG (mode, best_new_reg);
1445     }
1446 
1447   return NULL_RTX;
1448 }
1449 
1450 /* A wrapper around choose_best_reg_1 () to verify that we make correct
1451    assumptions about available registers in the function.  */
1452 static rtx
choose_best_reg(HARD_REG_SET hard_regs_used,struct reg_rename * reg_rename_p,def_list_t original_insns,bool * is_orig_reg_p_ptr)1453 choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
1454                  def_list_t original_insns, bool *is_orig_reg_p_ptr)
1455 {
1456   rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
1457                                     original_insns, is_orig_reg_p_ptr);
1458 
1459   /* FIXME loop over hard_regno_nregs here.  */
1460   gcc_assert (best_reg == NULL_RTX
1461 	      || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)));
1462 
1463   return best_reg;
1464 }
1465 
1466 /* Choose the pseudo register for storing rhs value.  As this is supposed
1467    to work before reload, we return either the original register or make
1468    the new one.  The parameters are the same that in choose_nest_reg_1
1469    functions, except that USED_REGS may contain pseudos.
1470    If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
1471 
1472    TODO: take into account register pressure while doing this.  Up to this
1473    moment, this function would never return NULL for pseudos, but we should
1474    not rely on this.  */
1475 static rtx
choose_best_pseudo_reg(regset used_regs,struct reg_rename * reg_rename_p,def_list_t original_insns,bool * is_orig_reg_p_ptr)1476 choose_best_pseudo_reg (regset used_regs,
1477                         struct reg_rename *reg_rename_p,
1478                         def_list_t original_insns, bool *is_orig_reg_p_ptr)
1479 {
1480   def_list_iterator i;
1481   def_t def;
1482   enum machine_mode mode = VOIDmode;
1483   bool bad_hard_regs = false;
1484 
1485   /* We should not use this after reload.  */
1486   gcc_assert (!reload_completed);
1487 
1488   /* If original register is available, return it.  */
1489   *is_orig_reg_p_ptr = true;
1490 
1491   FOR_EACH_DEF (def, i, original_insns)
1492     {
1493       rtx dest = SET_DEST (PATTERN (def->orig_insn));
1494       int orig_regno;
1495 
1496       gcc_assert (REG_P (dest));
1497 
1498       /* Check that all original operations have the same mode.  */
1499       if (mode == VOIDmode)
1500         mode = GET_MODE (dest);
1501       else
1502         gcc_assert (mode == GET_MODE (dest));
1503       orig_regno = REGNO (dest);
1504 
1505       if (!REGNO_REG_SET_P (used_regs, orig_regno))
1506         {
1507           if (orig_regno < FIRST_PSEUDO_REGISTER)
1508             {
1509               gcc_assert (df_regs_ever_live_p (orig_regno));
1510 
1511               /* For hard registers, we have to check hardware imposed
1512                  limitations (frame/stack registers, calls crossed).  */
1513               if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1514                                       orig_regno))
1515 		{
1516 		  /* Don't let register cross a call if it doesn't already
1517 		     cross one.  This condition is written in accordance with
1518 		     that in sched-deps.c sched_analyze_reg().  */
1519 		  if (!reg_rename_p->crosses_call
1520 		      || REG_N_CALLS_CROSSED (orig_regno) > 0)
1521 		    return gen_rtx_REG (mode, orig_regno);
1522 		}
1523 
1524               bad_hard_regs = true;
1525             }
1526           else
1527             return dest;
1528         }
1529      }
1530 
1531   *is_orig_reg_p_ptr = false;
1532 
1533   /* We had some original hard registers that couldn't be used.
1534      Those were likely special.  Don't try to create a pseudo.  */
1535   if (bad_hard_regs)
1536     return NULL_RTX;
1537 
1538   /* We haven't found a register from original operations.  Get a new one.
1539      FIXME: control register pressure somehow.  */
1540   {
1541     rtx new_reg = gen_reg_rtx (mode);
1542 
1543     gcc_assert (mode != VOIDmode);
1544 
1545     max_regno = max_reg_num ();
1546     maybe_extend_reg_info_p ();
1547     REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
1548 
1549     return new_reg;
1550   }
1551 }
1552 
1553 /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
1554    USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS.  */
1555 static void
verify_target_availability(expr_t expr,regset used_regs,struct reg_rename * reg_rename_p)1556 verify_target_availability (expr_t expr, regset used_regs,
1557 			    struct reg_rename *reg_rename_p)
1558 {
1559   unsigned n, i, regno;
1560   enum machine_mode mode;
1561   bool target_available, live_available, hard_available;
1562 
1563   if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
1564     return;
1565 
1566   regno = expr_dest_regno (expr);
1567   mode = GET_MODE (EXPR_LHS (expr));
1568   target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
1569   n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs[regno][mode] : 1;
1570 
1571   live_available = hard_available = true;
1572   for (i = 0; i < n; i++)
1573     {
1574       if (bitmap_bit_p (used_regs, regno + i))
1575         live_available = false;
1576       if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
1577         hard_available = false;
1578     }
1579 
1580   /* When target is not available, it may be due to hard register
1581      restrictions, e.g. crosses calls, so we check hard_available too.  */
1582   if (target_available)
1583     gcc_assert (live_available);
1584   else
1585     /* Check only if we haven't scheduled something on the previous fence,
1586        cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
1587        and having more than one fence, we may end having targ_un in a block
1588        in which successors target register is actually available.
1589 
1590        The last condition handles the case when a dependence from a call insn
1591        was created in sched-deps.c for insns with destination registers that
1592        never crossed a call before, but do cross one after our code motion.
1593 
1594        FIXME: in the latter case, we just uselessly called find_used_regs,
1595        because we can't move this expression with any other register
1596        as well.  */
1597     gcc_assert (scheduled_something_on_previous_fence || !live_available
1598 		|| !hard_available
1599 		|| (!reload_completed && reg_rename_p->crosses_call
1600 		    && REG_N_CALLS_CROSSED (regno) == 0));
1601 }
1602 
1603 /* Collect unavailable registers due to liveness for EXPR from BNDS
1604    into USED_REGS.  Save additional information about available
1605    registers and unavailable due to hardware restriction registers
1606    into REG_RENAME_P structure.  Save original insns into ORIGINAL_INSNS
1607    list.  */
1608 static void
collect_unavailable_regs_from_bnds(expr_t expr,blist_t bnds,regset used_regs,struct reg_rename * reg_rename_p,def_list_t * original_insns)1609 collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs,
1610 				    struct reg_rename *reg_rename_p,
1611 				    def_list_t *original_insns)
1612 {
1613   for (; bnds; bnds = BLIST_NEXT (bnds))
1614     {
1615       bool res;
1616       av_set_t orig_ops = NULL;
1617       bnd_t bnd = BLIST_BND (bnds);
1618 
1619       /* If the chosen best expr doesn't belong to current boundary,
1620 	 skip it.  */
1621       if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr)))
1622 	continue;
1623 
1624       /* Put in ORIG_OPS all exprs from this boundary that became
1625 	 RES on top.  */
1626       orig_ops = find_sequential_best_exprs (bnd, expr, false);
1627 
1628       /* Compute used regs and OR it into the USED_REGS.  */
1629       res = find_used_regs (BND_TO (bnd), orig_ops, used_regs,
1630 			    reg_rename_p, original_insns);
1631 
1632       /* FIXME: the assert is true until we'd have several boundaries.  */
1633       gcc_assert (res);
1634       av_set_clear (&orig_ops);
1635     }
1636 }
1637 
1638 /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG.
1639    If BEST_REG is valid, replace LHS of EXPR with it.  */
1640 static bool
try_replace_dest_reg(ilist_t orig_insns,rtx best_reg,expr_t expr)1641 try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
1642 {
1643   /* Try whether we'll be able to generate the insn
1644      'dest := best_reg' at the place of the original operation.  */
1645   for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns))
1646     {
1647       insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn;
1648 
1649       gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)));
1650 
1651       if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn))
1652 	  && (! replace_src_with_reg_ok_p (orig_insn, best_reg)
1653 	      || ! replace_dest_with_reg_ok_p (orig_insn, best_reg)))
1654 	return false;
1655     }
1656 
1657   /* Make sure that EXPR has the right destination
1658      register.  */
1659   if (expr_dest_regno (expr) != REGNO (best_reg))
1660     replace_dest_with_reg_in_expr (expr, best_reg);
1661   else
1662     EXPR_TARGET_AVAILABLE (expr) = 1;
1663 
1664   return true;
1665 }
1666 
1667 /* Select and assign best register to EXPR searching from BNDS.
1668    Set *IS_ORIG_REG_P to TRUE if original register was selected.
1669    Return FALSE if no register can be chosen, which could happen when:
1670    * EXPR_SEPARABLE_P is true but we were unable to find suitable register;
1671    * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
1672      that are used on the moving path.  */
1673 static bool
find_best_reg_for_expr(expr_t expr,blist_t bnds,bool * is_orig_reg_p)1674 find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
1675 {
1676   static struct reg_rename reg_rename_data;
1677 
1678   regset used_regs;
1679   def_list_t original_insns = NULL;
1680   bool reg_ok;
1681 
1682   *is_orig_reg_p = false;
1683 
1684   /* Don't bother to do anything if this insn doesn't set any registers.  */
1685   if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr)))
1686       && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))))
1687     return true;
1688 
1689   used_regs = get_clear_regset_from_pool ();
1690   CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs);
1691 
1692   collect_unavailable_regs_from_bnds (expr, bnds, used_regs, &reg_rename_data,
1693 				      &original_insns);
1694 
1695 #ifdef ENABLE_CHECKING
1696   /* If after reload, make sure we're working with hard regs here.  */
1697   if (reload_completed)
1698     {
1699       reg_set_iterator rsi;
1700       unsigned i;
1701 
1702       EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
1703         gcc_unreachable ();
1704     }
1705 #endif
1706 
1707   if (EXPR_SEPARABLE_P (expr))
1708     {
1709       rtx best_reg = NULL_RTX;
1710       /* Check that we have computed availability of a target register
1711 	 correctly.  */
1712       verify_target_availability (expr, used_regs, &reg_rename_data);
1713 
1714       /* Turn everything in hard regs after reload.  */
1715       if (reload_completed)
1716 	{
1717 	  HARD_REG_SET hard_regs_used;
1718 	  REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs);
1719 
1720 	  /* Join hard registers unavailable due to register class
1721 	     restrictions and live range intersection.  */
1722 	  IOR_HARD_REG_SET (hard_regs_used,
1723 			    reg_rename_data.unavailable_hard_regs);
1724 
1725 	  best_reg = choose_best_reg (hard_regs_used, &reg_rename_data,
1726 				      original_insns, is_orig_reg_p);
1727 	}
1728       else
1729 	best_reg = choose_best_pseudo_reg (used_regs, &reg_rename_data,
1730 					   original_insns, is_orig_reg_p);
1731 
1732       if (!best_reg)
1733 	reg_ok = false;
1734       else if (*is_orig_reg_p)
1735 	{
1736 	  /* In case of unification BEST_REG may be different from EXPR's LHS
1737 	     when EXPR's LHS is unavailable, and there is another LHS among
1738 	     ORIGINAL_INSNS.  */
1739 	  reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1740 	}
1741       else
1742 	{
1743 	  /* Forbid renaming of low-cost insns.  */
1744 	  if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2)
1745 	    reg_ok = false;
1746 	  else
1747 	    reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1748 	}
1749     }
1750   else
1751     {
1752       /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set
1753 	 any of the HARD_REGS_USED set.  */
1754       if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs,
1755 				      reg_rename_data.unavailable_hard_regs))
1756 	{
1757 	  reg_ok = false;
1758 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0);
1759 	}
1760       else
1761 	{
1762 	  reg_ok = true;
1763 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0);
1764 	}
1765     }
1766 
1767   ilist_clear (&original_insns);
1768   return_regset_to_pool (used_regs);
1769 
1770   return reg_ok;
1771 }
1772 
1773 
1774 /* Return true if dependence described by DS can be overcomed.  */
1775 static bool
can_speculate_dep_p(ds_t ds)1776 can_speculate_dep_p (ds_t ds)
1777 {
1778   if (spec_info == NULL)
1779     return false;
1780 
1781   /* Leave only speculative data.  */
1782   ds &= SPECULATIVE;
1783 
1784   if (ds == 0)
1785     return false;
1786 
1787   {
1788     /* FIXME: make sched-deps.c produce only those non-hard dependencies,
1789        that we can overcome.  */
1790     ds_t spec_mask = spec_info->mask;
1791 
1792     if ((ds & spec_mask) != ds)
1793       return false;
1794   }
1795 
1796   if (ds_weak (ds) < spec_info->data_weakness_cutoff)
1797     return false;
1798 
1799   return true;
1800 }
1801 
1802 /* Get a speculation check instruction.
1803    C_EXPR is a speculative expression,
1804    CHECK_DS describes speculations that should be checked,
1805    ORIG_INSN is the original non-speculative insn in the stream.  */
1806 static insn_t
create_speculation_check(expr_t c_expr,ds_t check_ds,insn_t orig_insn)1807 create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
1808 {
1809   rtx check_pattern;
1810   rtx insn_rtx;
1811   insn_t insn;
1812   basic_block recovery_block;
1813   rtx label;
1814 
1815   /* Create a recovery block if target is going to emit branchy check, or if
1816      ORIG_INSN was speculative already.  */
1817   if (targetm.sched.needs_block_p (check_ds)
1818       || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0)
1819     {
1820       recovery_block = sel_create_recovery_block (orig_insn);
1821       label = BB_HEAD (recovery_block);
1822     }
1823   else
1824     {
1825       recovery_block = NULL;
1826       label = NULL_RTX;
1827     }
1828 
1829   /* Get pattern of the check.  */
1830   check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label,
1831 						check_ds);
1832 
1833   gcc_assert (check_pattern != NULL);
1834 
1835   /* Emit check.  */
1836   insn_rtx = create_insn_rtx_from_pattern (check_pattern, label);
1837 
1838   insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn),
1839 				      INSN_SEQNO (orig_insn), orig_insn);
1840 
1841   /* Make check to be non-speculative.  */
1842   EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
1843   INSN_SPEC_CHECKED_DS (insn) = check_ds;
1844 
1845   /* Decrease priority of check by difference of load/check instruction
1846      latencies.  */
1847   EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn))
1848 				       - sel_vinsn_cost (INSN_VINSN (insn)));
1849 
1850   /* Emit copy of original insn (though with replaced target register,
1851      if needed) to the recovery block.  */
1852   if (recovery_block != NULL)
1853     {
1854       rtx twin_rtx;
1855 
1856       twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)));
1857       twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX);
1858       sel_gen_recovery_insn_from_rtx_after (twin_rtx,
1859 					    INSN_EXPR (orig_insn),
1860 					    INSN_SEQNO (insn),
1861 					    bb_note (recovery_block));
1862     }
1863 
1864   /* If we've generated a data speculation check, make sure
1865      that all the bookkeeping instruction we'll create during
1866      this move_op () will allocate an ALAT entry so that the
1867      check won't fail.
1868      In case of control speculation we must convert C_EXPR to control
1869      speculative mode, because failing to do so will bring us an exception
1870      thrown by the non-control-speculative load.  */
1871   check_ds = ds_get_max_dep_weak (check_ds);
1872   speculate_expr (c_expr, check_ds);
1873 
1874   return insn;
1875 }
1876 
1877 /* True when INSN is a "regN = regN" copy.  */
1878 static bool
identical_copy_p(rtx insn)1879 identical_copy_p (rtx insn)
1880 {
1881   rtx lhs, rhs, pat;
1882 
1883   pat = PATTERN (insn);
1884 
1885   if (GET_CODE (pat) != SET)
1886     return false;
1887 
1888   lhs = SET_DEST (pat);
1889   if (!REG_P (lhs))
1890     return false;
1891 
1892   rhs = SET_SRC (pat);
1893   if (!REG_P (rhs))
1894     return false;
1895 
1896   return REGNO (lhs) == REGNO (rhs);
1897 }
1898 
1899 /* Undo all transformations on *AV_PTR that were done when
1900    moving through INSN.  */
1901 static void
undo_transformations(av_set_t * av_ptr,rtx insn)1902 undo_transformations (av_set_t *av_ptr, rtx insn)
1903 {
1904   av_set_iterator av_iter;
1905   expr_t expr;
1906   av_set_t new_set = NULL;
1907 
1908   /* First, kill any EXPR that uses registers set by an insn.  This is
1909      required for correctness.  */
1910   FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
1911     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
1912         && bitmap_intersect_p (INSN_REG_SETS (insn),
1913                                VINSN_REG_USES (EXPR_VINSN (expr)))
1914         /* When an insn looks like 'r1 = r1', we could substitute through
1915            it, but the above condition will still hold.  This happened with
1916            gcc.c-torture/execute/961125-1.c.  */
1917         && !identical_copy_p (insn))
1918       {
1919         if (sched_verbose >= 6)
1920           sel_print ("Expr %d removed due to use/set conflict\n",
1921                      INSN_UID (EXPR_INSN_RTX (expr)));
1922         av_set_iter_remove (&av_iter);
1923       }
1924 
1925   /* Undo transformations looking at the history vector.  */
1926   FOR_EACH_EXPR (expr, av_iter, *av_ptr)
1927     {
1928       int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr),
1929                                         insn, EXPR_VINSN (expr), true);
1930 
1931       if (index >= 0)
1932         {
1933           expr_history_def *phist;
1934 
1935           phist = &EXPR_HISTORY_OF_CHANGES (expr)[index];
1936 
1937           switch (phist->type)
1938             {
1939             case TRANS_SPECULATION:
1940               {
1941                 ds_t old_ds, new_ds;
1942 
1943                 /* Compute the difference between old and new speculative
1944                    statuses: that's what we need to check.
1945                    Earlier we used to assert that the status will really
1946                    change.  This no longer works because only the probability
1947                    bits in the status may have changed during compute_av_set,
1948                    and in the case of merging different probabilities of the
1949                    same speculative status along different paths we do not
1950                    record this in the history vector.  */
1951                 old_ds = phist->spec_ds;
1952                 new_ds = EXPR_SPEC_DONE_DS (expr);
1953 
1954                 old_ds &= SPECULATIVE;
1955                 new_ds &= SPECULATIVE;
1956                 new_ds &= ~old_ds;
1957 
1958                 EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
1959                 break;
1960               }
1961             case TRANS_SUBSTITUTION:
1962               {
1963                 expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
1964                 vinsn_t new_vi;
1965                 bool add = true;
1966 
1967                 new_vi = phist->old_expr_vinsn;
1968 
1969                 gcc_assert (VINSN_SEPARABLE_P (new_vi)
1970                             == EXPR_SEPARABLE_P (expr));
1971                 copy_expr (tmp_expr, expr);
1972 
1973                 if (vinsn_equal_p (phist->new_expr_vinsn,
1974                                    EXPR_VINSN (tmp_expr)))
1975                   change_vinsn_in_expr (tmp_expr, new_vi);
1976                 else
1977                   /* This happens when we're unsubstituting on a bookkeeping
1978                      copy, which was in turn substituted.  The history is wrong
1979                      in this case.  Do it the hard way.  */
1980                   add = substitute_reg_in_expr (tmp_expr, insn, true);
1981                 if (add)
1982                   av_set_add (&new_set, tmp_expr);
1983                 clear_expr (tmp_expr);
1984                 break;
1985               }
1986             default:
1987               gcc_unreachable ();
1988             }
1989         }
1990 
1991     }
1992 
1993   av_set_union_and_clear (av_ptr, &new_set, NULL);
1994 }
1995 
1996 
1997 /* Moveup_* helpers for code motion and computing av sets.  */
1998 
1999 /* Propagates EXPR inside an insn group through THROUGH_INSN.
2000    The difference from the below function is that only substitution is
2001    performed.  */
2002 static enum MOVEUP_EXPR_CODE
moveup_expr_inside_insn_group(expr_t expr,insn_t through_insn)2003 moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
2004 {
2005   vinsn_t vi = EXPR_VINSN (expr);
2006   ds_t *has_dep_p;
2007   ds_t full_ds;
2008 
2009   /* Do this only inside insn group.  */
2010   gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0);
2011 
2012   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2013   if (full_ds == 0)
2014     return MOVEUP_EXPR_SAME;
2015 
2016   /* Substitution is the possible choice in this case.  */
2017   if (has_dep_p[DEPS_IN_RHS])
2018     {
2019       /* Can't substitute UNIQUE VINSNs.  */
2020       gcc_assert (!VINSN_UNIQUE_P (vi));
2021 
2022       if (can_substitute_through_p (through_insn,
2023                                     has_dep_p[DEPS_IN_RHS])
2024           && substitute_reg_in_expr (expr, through_insn, false))
2025         {
2026           EXPR_WAS_SUBSTITUTED (expr) = true;
2027           return MOVEUP_EXPR_CHANGED;
2028         }
2029 
2030       /* Don't care about this, as even true dependencies may be allowed
2031          in an insn group.  */
2032       return MOVEUP_EXPR_SAME;
2033     }
2034 
2035   /* This can catch output dependencies in COND_EXECs.  */
2036   if (has_dep_p[DEPS_IN_INSN])
2037     return MOVEUP_EXPR_NULL;
2038 
2039   /* This is either an output or an anti dependence, which usually have
2040      a zero latency.  Allow this here, if we'd be wrong, tick_check_p
2041      will fix this.  */
2042   gcc_assert (has_dep_p[DEPS_IN_LHS]);
2043   return MOVEUP_EXPR_AS_RHS;
2044 }
2045 
2046 /* True when a trapping EXPR cannot be moved through THROUGH_INSN.  */
2047 #define CANT_MOVE_TRAPPING(expr, through_insn)                \
2048   (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))                       \
2049    && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \
2050    && !sel_insn_is_speculation_check (through_insn))
2051 
2052 /* True when a conflict on a target register was found during moveup_expr.  */
2053 static bool was_target_conflict = false;
2054 
2055 /* Return true when moving a debug INSN across THROUGH_INSN will
2056    create a bookkeeping block.  We don't want to create such blocks,
2057    for they would cause codegen differences between compilations with
2058    and without debug info.  */
2059 
2060 static bool
moving_insn_creates_bookkeeping_block_p(insn_t insn,insn_t through_insn)2061 moving_insn_creates_bookkeeping_block_p (insn_t insn,
2062 					 insn_t through_insn)
2063 {
2064   basic_block bbi, bbt;
2065   edge e1, e2;
2066   edge_iterator ei1, ei2;
2067 
2068   if (!bookkeeping_can_be_created_if_moved_through_p (through_insn))
2069     {
2070       if (sched_verbose >= 9)
2071 	sel_print ("no bookkeeping required: ");
2072       return FALSE;
2073     }
2074 
2075   bbi = BLOCK_FOR_INSN (insn);
2076 
2077   if (EDGE_COUNT (bbi->preds) == 1)
2078     {
2079       if (sched_verbose >= 9)
2080 	sel_print ("only one pred edge: ");
2081       return TRUE;
2082     }
2083 
2084   bbt = BLOCK_FOR_INSN (through_insn);
2085 
2086   FOR_EACH_EDGE (e1, ei1, bbt->succs)
2087     {
2088       FOR_EACH_EDGE (e2, ei2, bbi->preds)
2089 	{
2090 	  if (find_block_for_bookkeeping (e1, e2, TRUE))
2091 	    {
2092 	      if (sched_verbose >= 9)
2093 		sel_print ("found existing block: ");
2094 	      return FALSE;
2095 	    }
2096 	}
2097     }
2098 
2099   if (sched_verbose >= 9)
2100     sel_print ("would create bookkeeping block: ");
2101 
2102   return TRUE;
2103 }
2104 
2105 /* Return true when the conflict with newly created implicit clobbers
2106    between EXPR and THROUGH_INSN is found because of renaming.  */
2107 static bool
implicit_clobber_conflict_p(insn_t through_insn,expr_t expr)2108 implicit_clobber_conflict_p (insn_t through_insn, expr_t expr)
2109 {
2110   HARD_REG_SET temp;
2111   rtx insn, reg, rhs, pat;
2112   hard_reg_set_iterator hrsi;
2113   unsigned regno;
2114   bool valid;
2115 
2116   /* Make a new pseudo register.  */
2117   reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr)));
2118   max_regno = max_reg_num ();
2119   maybe_extend_reg_info_p ();
2120 
2121   /* Validate a change and bail out early.  */
2122   insn = EXPR_INSN_RTX (expr);
2123   validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true);
2124   valid = verify_changes (0);
2125   cancel_changes (0);
2126   if (!valid)
2127     {
2128       if (sched_verbose >= 6)
2129 	sel_print ("implicit clobbers failed validation, ");
2130       return true;
2131     }
2132 
2133   /* Make a new insn with it.  */
2134   rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr)));
2135   pat = gen_rtx_SET (VOIDmode, reg, rhs);
2136   start_sequence ();
2137   insn = emit_insn (pat);
2138   end_sequence ();
2139 
2140   /* Calculate implicit clobbers.  */
2141   extract_insn (insn);
2142   preprocess_constraints ();
2143   ira_implicitly_set_insn_hard_regs (&temp);
2144   AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2145 
2146   /* If any implicit clobber registers intersect with regular ones in
2147      through_insn, we have a dependency and thus bail out.  */
2148   EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
2149     {
2150       vinsn_t vi = INSN_VINSN (through_insn);
2151       if (bitmap_bit_p (VINSN_REG_SETS (vi), regno)
2152 	  || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno)
2153 	  || bitmap_bit_p (VINSN_REG_USES (vi), regno))
2154 	return true;
2155     }
2156 
2157   return false;
2158 }
2159 
2160 /* Modifies EXPR so it can be moved through the THROUGH_INSN,
2161    performing necessary transformations.  Record the type of transformation
2162    made in PTRANS_TYPE, when it is not NULL.  When INSIDE_INSN_GROUP,
2163    permit all dependencies except true ones, and try to remove those
2164    too via forward substitution.  All cases when a non-eliminable
2165    non-zero cost dependency exists inside an insn group will be fixed
2166    in tick_check_p instead.  */
2167 static enum MOVEUP_EXPR_CODE
moveup_expr(expr_t expr,insn_t through_insn,bool inside_insn_group,enum local_trans_type * ptrans_type)2168 moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
2169             enum local_trans_type *ptrans_type)
2170 {
2171   vinsn_t vi = EXPR_VINSN (expr);
2172   insn_t insn = VINSN_INSN_RTX (vi);
2173   bool was_changed = false;
2174   bool as_rhs = false;
2175   ds_t *has_dep_p;
2176   ds_t full_ds;
2177 
2178   /* ??? We use dependencies of non-debug insns on debug insns to
2179      indicate that the debug insns need to be reset if the non-debug
2180      insn is pulled ahead of it.  It's hard to figure out how to
2181      introduce such a notion in sel-sched, but it already fails to
2182      support debug insns in other ways, so we just go ahead and
2183      let the deug insns go corrupt for now.  */
2184   if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn))
2185     return MOVEUP_EXPR_SAME;
2186 
2187   /* When inside_insn_group, delegate to the helper.  */
2188   if (inside_insn_group)
2189     return moveup_expr_inside_insn_group (expr, through_insn);
2190 
2191   /* Deal with unique insns and control dependencies.  */
2192   if (VINSN_UNIQUE_P (vi))
2193     {
2194       /* We can move jumps without side-effects or jumps that are
2195 	 mutually exclusive with instruction THROUGH_INSN (all in cases
2196 	 dependencies allow to do so and jump is not speculative).  */
2197       if (control_flow_insn_p (insn))
2198         {
2199           basic_block fallthru_bb;
2200 
2201           /* Do not move checks and do not move jumps through other
2202              jumps.  */
2203           if (control_flow_insn_p (through_insn)
2204               || sel_insn_is_speculation_check (insn))
2205             return MOVEUP_EXPR_NULL;
2206 
2207           /* Don't move jumps through CFG joins.  */
2208           if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
2209             return MOVEUP_EXPR_NULL;
2210 
2211           /* The jump should have a clear fallthru block, and
2212              this block should be in the current region.  */
2213           if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
2214               || ! in_current_region_p (fallthru_bb))
2215             return MOVEUP_EXPR_NULL;
2216 
2217           /* And it should be mutually exclusive with through_insn.  */
2218           if (! sched_insns_conditions_mutex_p (insn, through_insn)
2219 	      && ! DEBUG_INSN_P (through_insn))
2220             return MOVEUP_EXPR_NULL;
2221         }
2222 
2223       /* Don't move what we can't move.  */
2224       if (EXPR_CANT_MOVE (expr)
2225 	  && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn))
2226 	return MOVEUP_EXPR_NULL;
2227 
2228       /* Don't move SCHED_GROUP instruction through anything.
2229          If we don't force this, then it will be possible to start
2230          scheduling a sched_group before all its dependencies are
2231          resolved.
2232          ??? Haifa deals with this issue by delaying the SCHED_GROUP
2233          as late as possible through rank_for_schedule.  */
2234       if (SCHED_GROUP_P (insn))
2235 	return MOVEUP_EXPR_NULL;
2236     }
2237   else
2238     gcc_assert (!control_flow_insn_p (insn));
2239 
2240   /* Don't move debug insns if this would require bookkeeping.  */
2241   if (DEBUG_INSN_P (insn)
2242       && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)
2243       && moving_insn_creates_bookkeeping_block_p (insn, through_insn))
2244     return MOVEUP_EXPR_NULL;
2245 
2246   /* Deal with data dependencies.  */
2247   was_target_conflict = false;
2248   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2249   if (full_ds == 0)
2250     {
2251       if (!CANT_MOVE_TRAPPING (expr, through_insn))
2252 	return MOVEUP_EXPR_SAME;
2253     }
2254   else
2255     {
2256       /* We can move UNIQUE insn up only as a whole and unchanged,
2257          so it shouldn't have any dependencies.  */
2258       if (VINSN_UNIQUE_P (vi))
2259 	return MOVEUP_EXPR_NULL;
2260     }
2261 
2262   if (full_ds != 0 && can_speculate_dep_p (full_ds))
2263     {
2264       int res;
2265 
2266       res = speculate_expr (expr, full_ds);
2267       if (res >= 0)
2268 	{
2269           /* Speculation was successful.  */
2270           full_ds = 0;
2271           was_changed = (res > 0);
2272           if (res == 2)
2273             was_target_conflict = true;
2274           if (ptrans_type)
2275             *ptrans_type = TRANS_SPECULATION;
2276 	  sel_clear_has_dependence ();
2277 	}
2278     }
2279 
2280   if (has_dep_p[DEPS_IN_INSN])
2281     /* We have some dependency that cannot be discarded.  */
2282     return MOVEUP_EXPR_NULL;
2283 
2284   if (has_dep_p[DEPS_IN_LHS])
2285     {
2286       /* Only separable insns can be moved up with the new register.
2287          Anyways, we should mark that the original register is
2288          unavailable.  */
2289       if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
2290         return MOVEUP_EXPR_NULL;
2291 
2292       /* When renaming a hard register to a pseudo before reload, extra
2293 	 dependencies can occur from the implicit clobbers of the insn.
2294 	 Filter out such cases here.  */
2295       if (!reload_completed && REG_P (EXPR_LHS (expr))
2296 	  && HARD_REGISTER_P (EXPR_LHS (expr))
2297 	  && implicit_clobber_conflict_p (through_insn, expr))
2298 	{
2299 	  if (sched_verbose >= 6)
2300 	    sel_print ("implicit clobbers conflict detected, ");
2301 	  return MOVEUP_EXPR_NULL;
2302 	}
2303       EXPR_TARGET_AVAILABLE (expr) = false;
2304       was_target_conflict = true;
2305       as_rhs = true;
2306     }
2307 
2308   /* At this point we have either separable insns, that will be lifted
2309      up only as RHSes, or non-separable insns with no dependency in lhs.
2310      If dependency is in RHS, then try to perform substitution and move up
2311      substituted RHS:
2312 
2313       Ex. 1:				  Ex.2
2314 	y = x;				    y = x;
2315 	z = y*2;			    y = y*2;
2316 
2317     In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
2318     moved above y=x assignment as z=x*2.
2319 
2320     In Ex.2 y*2 also can be substituted for x*2, but only the right hand
2321     side can be moved because of the output dependency.  The operation was
2322     cropped to its rhs above.  */
2323   if (has_dep_p[DEPS_IN_RHS])
2324     {
2325       ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS];
2326 
2327       /* Can't substitute UNIQUE VINSNs.  */
2328       gcc_assert (!VINSN_UNIQUE_P (vi));
2329 
2330       if (can_speculate_dep_p (*rhs_dsp))
2331 	{
2332           int res;
2333 
2334           res = speculate_expr (expr, *rhs_dsp);
2335           if (res >= 0)
2336             {
2337               /* Speculation was successful.  */
2338               *rhs_dsp = 0;
2339               was_changed = (res > 0);
2340               if (res == 2)
2341                 was_target_conflict = true;
2342               if (ptrans_type)
2343                 *ptrans_type = TRANS_SPECULATION;
2344             }
2345 	  else
2346 	    return MOVEUP_EXPR_NULL;
2347 	}
2348       else if (can_substitute_through_p (through_insn,
2349                                          *rhs_dsp)
2350                && substitute_reg_in_expr (expr, through_insn, false))
2351 	{
2352           /* ??? We cannot perform substitution AND speculation on the same
2353              insn.  */
2354           gcc_assert (!was_changed);
2355           was_changed = true;
2356           if (ptrans_type)
2357             *ptrans_type = TRANS_SUBSTITUTION;
2358           EXPR_WAS_SUBSTITUTED (expr) = true;
2359 	}
2360       else
2361 	return MOVEUP_EXPR_NULL;
2362     }
2363 
2364   /* Don't move trapping insns through jumps.
2365      This check should be at the end to give a chance to control speculation
2366      to perform its duties.  */
2367   if (CANT_MOVE_TRAPPING (expr, through_insn))
2368     return MOVEUP_EXPR_NULL;
2369 
2370   return (was_changed
2371           ? MOVEUP_EXPR_CHANGED
2372           : (as_rhs
2373              ? MOVEUP_EXPR_AS_RHS
2374              : MOVEUP_EXPR_SAME));
2375 }
2376 
2377 /* Try to look at bitmap caches for EXPR and INSN pair, return true
2378    if successful.  When INSIDE_INSN_GROUP, also try ignore dependencies
2379    that can exist within a parallel group.  Write to RES the resulting
2380    code for moveup_expr.  */
2381 static bool
try_bitmap_cache(expr_t expr,insn_t insn,bool inside_insn_group,enum MOVEUP_EXPR_CODE * res)2382 try_bitmap_cache (expr_t expr, insn_t insn,
2383                   bool inside_insn_group,
2384                   enum MOVEUP_EXPR_CODE *res)
2385 {
2386   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2387 
2388   /* First check whether we've analyzed this situation already.  */
2389   if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
2390     {
2391       if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2392         {
2393           if (sched_verbose >= 6)
2394             sel_print ("removed (cached)\n");
2395           *res = MOVEUP_EXPR_NULL;
2396           return true;
2397         }
2398       else
2399         {
2400           if (sched_verbose >= 6)
2401             sel_print ("unchanged (cached)\n");
2402           *res = MOVEUP_EXPR_SAME;
2403           return true;
2404         }
2405     }
2406   else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2407     {
2408       if (inside_insn_group)
2409         {
2410           if (sched_verbose >= 6)
2411             sel_print ("unchanged (as RHS, cached, inside insn group)\n");
2412           *res = MOVEUP_EXPR_SAME;
2413           return true;
2414 
2415         }
2416       else
2417         EXPR_TARGET_AVAILABLE (expr) = false;
2418 
2419       /* This is the only case when propagation result can change over time,
2420          as we can dynamically switch off scheduling as RHS.  In this case,
2421          just check the flag to reach the correct decision.  */
2422       if (enable_schedule_as_rhs_p)
2423         {
2424           if (sched_verbose >= 6)
2425             sel_print ("unchanged (as RHS, cached)\n");
2426           *res = MOVEUP_EXPR_AS_RHS;
2427           return true;
2428         }
2429       else
2430         {
2431           if (sched_verbose >= 6)
2432             sel_print ("removed (cached as RHS, but renaming"
2433                        " is now disabled)\n");
2434           *res = MOVEUP_EXPR_NULL;
2435           return true;
2436         }
2437     }
2438 
2439   return false;
2440 }
2441 
2442 /* Try to look at bitmap caches for EXPR and INSN pair, return true
2443    if successful.  Write to RES the resulting code for moveup_expr.  */
2444 static bool
try_transformation_cache(expr_t expr,insn_t insn,enum MOVEUP_EXPR_CODE * res)2445 try_transformation_cache (expr_t expr, insn_t insn,
2446                           enum MOVEUP_EXPR_CODE *res)
2447 {
2448   struct transformed_insns *pti
2449     = (struct transformed_insns *)
2450     htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
2451                          &EXPR_VINSN (expr),
2452                          VINSN_HASH_RTX (EXPR_VINSN (expr)));
2453   if (pti)
2454     {
2455       /* This EXPR was already moved through this insn and was
2456          changed as a result.  Fetch the proper data from
2457          the hashtable.  */
2458       insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2459                               INSN_UID (insn), pti->type,
2460                               pti->vinsn_old, pti->vinsn_new,
2461                               EXPR_SPEC_DONE_DS (expr));
2462 
2463       if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
2464         pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
2465       change_vinsn_in_expr (expr, pti->vinsn_new);
2466       if (pti->was_target_conflict)
2467         EXPR_TARGET_AVAILABLE (expr) = false;
2468       if (pti->type == TRANS_SPECULATION)
2469         {
2470           EXPR_SPEC_DONE_DS (expr) = pti->ds;
2471           EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
2472         }
2473 
2474       if (sched_verbose >= 6)
2475         {
2476           sel_print ("changed (cached): ");
2477           dump_expr (expr);
2478           sel_print ("\n");
2479         }
2480 
2481       *res = MOVEUP_EXPR_CHANGED;
2482       return true;
2483     }
2484 
2485   return false;
2486 }
2487 
2488 /* Update bitmap caches on INSN with result RES of propagating EXPR.  */
2489 static void
update_bitmap_cache(expr_t expr,insn_t insn,bool inside_insn_group,enum MOVEUP_EXPR_CODE res)2490 update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
2491                      enum MOVEUP_EXPR_CODE res)
2492 {
2493   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2494 
2495   /* Do not cache result of propagating jumps through an insn group,
2496      as it is always true, which is not useful outside the group.  */
2497   if (inside_insn_group)
2498     return;
2499 
2500   if (res == MOVEUP_EXPR_NULL)
2501     {
2502       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2503       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2504     }
2505   else if (res == MOVEUP_EXPR_SAME)
2506     {
2507       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2508       bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid);
2509     }
2510   else if (res == MOVEUP_EXPR_AS_RHS)
2511     {
2512       bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2513       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2514     }
2515   else
2516     gcc_unreachable ();
2517 }
2518 
2519 /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
2520    and transformation type TRANS_TYPE.  */
2521 static void
update_transformation_cache(expr_t expr,insn_t insn,bool inside_insn_group,enum local_trans_type trans_type,vinsn_t expr_old_vinsn)2522 update_transformation_cache (expr_t expr, insn_t insn,
2523                              bool inside_insn_group,
2524                              enum local_trans_type trans_type,
2525                              vinsn_t expr_old_vinsn)
2526 {
2527   struct transformed_insns *pti;
2528 
2529   if (inside_insn_group)
2530     return;
2531 
2532   pti = XNEW (struct transformed_insns);
2533   pti->vinsn_old = expr_old_vinsn;
2534   pti->vinsn_new = EXPR_VINSN (expr);
2535   pti->type = trans_type;
2536   pti->was_target_conflict = was_target_conflict;
2537   pti->ds = EXPR_SPEC_DONE_DS (expr);
2538   pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
2539   vinsn_attach (pti->vinsn_old);
2540   vinsn_attach (pti->vinsn_new);
2541   *((struct transformed_insns **)
2542     htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
2543                               pti, VINSN_HASH_RTX (expr_old_vinsn),
2544                               INSERT)) = pti;
2545 }
2546 
2547 /* Same as moveup_expr, but first looks up the result of
2548    transformation in caches.  */
2549 static enum MOVEUP_EXPR_CODE
moveup_expr_cached(expr_t expr,insn_t insn,bool inside_insn_group)2550 moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
2551 {
2552   enum MOVEUP_EXPR_CODE res;
2553   bool got_answer = false;
2554 
2555   if (sched_verbose >= 6)
2556     {
2557       sel_print ("Moving ");
2558       dump_expr (expr);
2559       sel_print (" through %d: ", INSN_UID (insn));
2560     }
2561 
2562   if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))
2563       && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)))
2564 	  == EXPR_INSN_RTX (expr)))
2565     /* Don't use cached information for debug insns that are heads of
2566        basic blocks.  */;
2567   else if (try_bitmap_cache (expr, insn, inside_insn_group, &res))
2568     /* When inside insn group, we do not want remove stores conflicting
2569        with previosly issued loads.  */
2570     got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL;
2571   else if (try_transformation_cache (expr, insn, &res))
2572     got_answer = true;
2573 
2574   if (! got_answer)
2575     {
2576       /* Invoke moveup_expr and record the results.  */
2577       vinsn_t expr_old_vinsn = EXPR_VINSN (expr);
2578       ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr);
2579       int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn));
2580       bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
2581       enum local_trans_type trans_type = TRANS_SUBSTITUTION;
2582 
2583       /* ??? Invent something better than this.  We can't allow old_vinsn
2584          to go, we need it for the history vector.  */
2585       vinsn_attach (expr_old_vinsn);
2586 
2587       res = moveup_expr (expr, insn, inside_insn_group,
2588                          &trans_type);
2589       switch (res)
2590         {
2591         case MOVEUP_EXPR_NULL:
2592           update_bitmap_cache (expr, insn, inside_insn_group, res);
2593 	  if (sched_verbose >= 6)
2594             sel_print ("removed\n");
2595 	  break;
2596 
2597 	case MOVEUP_EXPR_SAME:
2598           update_bitmap_cache (expr, insn, inside_insn_group, res);
2599           if (sched_verbose >= 6)
2600             sel_print ("unchanged\n");
2601 	  break;
2602 
2603         case MOVEUP_EXPR_AS_RHS:
2604           gcc_assert (!unique_p || inside_insn_group);
2605           update_bitmap_cache (expr, insn, inside_insn_group, res);
2606 	  if (sched_verbose >= 6)
2607             sel_print ("unchanged (as RHS)\n");
2608 	  break;
2609 
2610 	case MOVEUP_EXPR_CHANGED:
2611           gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
2612                       || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
2613           insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2614                                   INSN_UID (insn), trans_type,
2615                                   expr_old_vinsn, EXPR_VINSN (expr),
2616                                   expr_old_spec_ds);
2617           update_transformation_cache (expr, insn, inside_insn_group,
2618                                        trans_type, expr_old_vinsn);
2619           if (sched_verbose >= 6)
2620             {
2621               sel_print ("changed: ");
2622               dump_expr (expr);
2623               sel_print ("\n");
2624             }
2625 	  break;
2626 	default:
2627 	  gcc_unreachable ();
2628         }
2629 
2630       vinsn_detach (expr_old_vinsn);
2631     }
2632 
2633   return res;
2634 }
2635 
2636 /* Moves an av set AVP up through INSN, performing necessary
2637    transformations.  */
2638 static void
moveup_set_expr(av_set_t * avp,insn_t insn,bool inside_insn_group)2639 moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
2640 {
2641   av_set_iterator i;
2642   expr_t expr;
2643 
2644   FOR_EACH_EXPR_1 (expr, i, avp)
2645     {
2646 
2647       switch (moveup_expr_cached (expr, insn, inside_insn_group))
2648 	{
2649 	case MOVEUP_EXPR_SAME:
2650         case MOVEUP_EXPR_AS_RHS:
2651 	  break;
2652 
2653 	case MOVEUP_EXPR_NULL:
2654 	  av_set_iter_remove (&i);
2655 	  break;
2656 
2657 	case MOVEUP_EXPR_CHANGED:
2658           expr = merge_with_other_exprs (avp, &i, expr);
2659 	  break;
2660 
2661 	default:
2662 	  gcc_unreachable ();
2663 	}
2664     }
2665 }
2666 
2667 /* Moves AVP set along PATH.  */
2668 static void
moveup_set_inside_insn_group(av_set_t * avp,ilist_t path)2669 moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
2670 {
2671   int last_cycle;
2672 
2673   if (sched_verbose >= 6)
2674     sel_print ("Moving expressions up in the insn group...\n");
2675   if (! path)
2676     return;
2677   last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
2678   while (path
2679          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2680     {
2681       moveup_set_expr (avp, ILIST_INSN (path), true);
2682       path = ILIST_NEXT (path);
2683     }
2684 }
2685 
2686 /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW.  */
2687 static bool
equal_after_moveup_path_p(expr_t expr,ilist_t path,expr_t expr_vliw)2688 equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
2689 {
2690   expr_def _tmp, *tmp = &_tmp;
2691   int last_cycle;
2692   bool res = true;
2693 
2694   copy_expr_onside (tmp, expr);
2695   last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
2696   while (path
2697          && res
2698          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2699     {
2700       res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
2701              != MOVEUP_EXPR_NULL);
2702       path = ILIST_NEXT (path);
2703     }
2704 
2705   if (res)
2706     {
2707       vinsn_t tmp_vinsn = EXPR_VINSN (tmp);
2708       vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw);
2709 
2710       if (tmp_vinsn != expr_vliw_vinsn)
2711 	res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn);
2712     }
2713 
2714   clear_expr (tmp);
2715   return res;
2716 }
2717 
2718 
2719 /* Functions that compute av and lv sets.  */
2720 
2721 /* Returns true if INSN is not a downward continuation of the given path P in
2722    the current stage.  */
2723 static bool
is_ineligible_successor(insn_t insn,ilist_t p)2724 is_ineligible_successor (insn_t insn, ilist_t p)
2725 {
2726   insn_t prev_insn;
2727 
2728   /* Check if insn is not deleted.  */
2729   if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
2730     gcc_unreachable ();
2731   else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
2732     gcc_unreachable ();
2733 
2734   /* If it's the first insn visited, then the successor is ok.  */
2735   if (!p)
2736     return false;
2737 
2738   prev_insn = ILIST_INSN (p);
2739 
2740   if (/* a backward edge.  */
2741       INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
2742       /* is already visited.  */
2743       || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
2744 	  && (ilist_is_in_p (p, insn)
2745               /* We can reach another fence here and still seqno of insn
2746                  would be equal to seqno of prev_insn.  This is possible
2747                  when prev_insn is a previously created bookkeeping copy.
2748                  In that case it'd get a seqno of insn.  Thus, check here
2749                  whether insn is in current fence too.  */
2750               || IN_CURRENT_FENCE_P (insn)))
2751       /* Was already scheduled on this round.  */
2752       || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
2753 	  && IN_CURRENT_FENCE_P (insn))
2754       /* An insn from another fence could also be
2755 	 scheduled earlier even if this insn is not in
2756 	 a fence list right now.  Check INSN_SCHED_CYCLE instead.  */
2757       || (!pipelining_p
2758           && INSN_SCHED_TIMES (insn) > 0))
2759     return true;
2760   else
2761     return false;
2762 }
2763 
2764 /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
2765    of handling multiple successors and properly merging its av_sets.  P is
2766    the current path traversed.  WS is the size of lookahead window.
2767    Return the av set computed.  */
2768 static av_set_t
compute_av_set_at_bb_end(insn_t insn,ilist_t p,int ws)2769 compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
2770 {
2771   struct succs_info *sinfo;
2772   av_set_t expr_in_all_succ_branches = NULL;
2773   int is;
2774   insn_t succ, zero_succ = NULL;
2775   av_set_t av1 = NULL;
2776 
2777   gcc_assert (sel_bb_end_p (insn));
2778 
2779   /* Find different kind of successors needed for correct computing of
2780      SPEC and TARGET_AVAILABLE attributes.  */
2781   sinfo = compute_succs_info (insn, SUCCS_NORMAL);
2782 
2783   /* Debug output.  */
2784   if (sched_verbose >= 6)
2785     {
2786       sel_print ("successors of bb end (%d): ", INSN_UID (insn));
2787       dump_insn_vector (sinfo->succs_ok);
2788       sel_print ("\n");
2789       if (sinfo->succs_ok_n != sinfo->all_succs_n)
2790         sel_print ("real successors num: %d\n", sinfo->all_succs_n);
2791     }
2792 
2793   /* Add insn to the tail of current path.  */
2794   ilist_add (&p, insn);
2795 
2796   FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
2797     {
2798       av_set_t succ_set;
2799 
2800       /* We will edit SUCC_SET and EXPR_SPEC field of its elements.  */
2801       succ_set = compute_av_set_inside_bb (succ, p, ws, true);
2802 
2803       av_set_split_usefulness (succ_set,
2804                                sinfo->probs_ok[is],
2805                                sinfo->all_prob);
2806 
2807       if (sinfo->all_succs_n > 1)
2808 	{
2809           /* Find EXPR'es that came from *all* successors and save them
2810              into expr_in_all_succ_branches.  This set will be used later
2811              for calculating speculation attributes of EXPR'es.  */
2812           if (is == 0)
2813             {
2814               expr_in_all_succ_branches = av_set_copy (succ_set);
2815 
2816               /* Remember the first successor for later. */
2817               zero_succ = succ;
2818             }
2819           else
2820             {
2821               av_set_iterator i;
2822               expr_t expr;
2823 
2824               FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
2825                 if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
2826                   av_set_iter_remove (&i);
2827             }
2828 	}
2829 
2830       /* Union the av_sets.  Check liveness restrictions on target registers
2831          in special case of two successors.  */
2832       if (sinfo->succs_ok_n == 2 && is == 1)
2833         {
2834           basic_block bb0 = BLOCK_FOR_INSN (zero_succ);
2835           basic_block bb1 = BLOCK_FOR_INSN (succ);
2836 
2837           gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
2838           av_set_union_and_live (&av1, &succ_set,
2839                                  BB_LV_SET (bb0),
2840                                  BB_LV_SET (bb1),
2841                                  insn);
2842         }
2843       else
2844         av_set_union_and_clear (&av1, &succ_set, insn);
2845     }
2846 
2847   /* Check liveness restrictions via hard way when there are more than
2848      two successors.  */
2849   if (sinfo->succs_ok_n > 2)
2850     FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
2851       {
2852         basic_block succ_bb = BLOCK_FOR_INSN (succ);
2853 
2854         gcc_assert (BB_LV_SET_VALID_P (succ_bb));
2855         mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
2856                                   BB_LV_SET (succ_bb));
2857       }
2858 
2859   /* Finally, check liveness restrictions on paths leaving the region.  */
2860   if (sinfo->all_succs_n > sinfo->succs_ok_n)
2861     FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ)
2862       mark_unavailable_targets
2863         (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
2864 
2865   if (sinfo->all_succs_n > 1)
2866     {
2867       av_set_iterator i;
2868       expr_t expr;
2869 
2870       /* Increase the spec attribute of all EXPR'es that didn't come
2871 	 from all successors.  */
2872       FOR_EACH_EXPR (expr, i, av1)
2873 	if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
2874 	  EXPR_SPEC (expr)++;
2875 
2876       av_set_clear (&expr_in_all_succ_branches);
2877 
2878       /* Do not move conditional branches through other
2879 	 conditional branches.  So, remove all conditional
2880 	 branches from av_set if current operator is a conditional
2881 	 branch.  */
2882       av_set_substract_cond_branches (&av1);
2883     }
2884 
2885   ilist_remove (&p);
2886   free_succs_info (sinfo);
2887 
2888   if (sched_verbose >= 6)
2889     {
2890       sel_print ("av_succs (%d): ", INSN_UID (insn));
2891       dump_av_set (av1);
2892       sel_print ("\n");
2893     }
2894 
2895   return av1;
2896 }
2897 
2898 /* This function computes av_set for the FIRST_INSN by dragging valid
2899    av_set through all basic block insns either from the end of basic block
2900    (computed using compute_av_set_at_bb_end) or from the insn on which
2901    MAX_WS was exceeded.  It uses compute_av_set_at_bb_end to compute av_set
2902    below the basic block and handling conditional branches.
2903    FIRST_INSN - the basic block head, P - path consisting of the insns
2904    traversed on the way to the FIRST_INSN (the path is sparse, only bb heads
2905    and bb ends are added to the path), WS - current window size,
2906    NEED_COPY_P - true if we'll make a copy of av_set before returning it.  */
2907 static av_set_t
compute_av_set_inside_bb(insn_t first_insn,ilist_t p,int ws,bool need_copy_p)2908 compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
2909 			  bool need_copy_p)
2910 {
2911   insn_t cur_insn;
2912   int end_ws = ws;
2913   insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn));
2914   insn_t after_bb_end = NEXT_INSN (bb_end);
2915   insn_t last_insn;
2916   av_set_t av = NULL;
2917   basic_block cur_bb = BLOCK_FOR_INSN (first_insn);
2918 
2919   /* Return NULL if insn is not on the legitimate downward path.  */
2920   if (is_ineligible_successor (first_insn, p))
2921     {
2922       if (sched_verbose >= 6)
2923         sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn));
2924 
2925       return NULL;
2926     }
2927 
2928   /* If insn already has valid av(insn) computed, just return it.  */
2929   if (AV_SET_VALID_P (first_insn))
2930     {
2931       av_set_t av_set;
2932 
2933       if (sel_bb_head_p (first_insn))
2934 	av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn));
2935       else
2936 	av_set = NULL;
2937 
2938       if (sched_verbose >= 6)
2939         {
2940           sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn));
2941           dump_av_set (av_set);
2942           sel_print ("\n");
2943         }
2944 
2945       return need_copy_p ? av_set_copy (av_set) : av_set;
2946     }
2947 
2948   ilist_add (&p, first_insn);
2949 
2950   /* As the result after this loop have completed, in LAST_INSN we'll
2951      have the insn which has valid av_set to start backward computation
2952      from: it either will be NULL because on it the window size was exceeded
2953      or other valid av_set as returned by compute_av_set for the last insn
2954      of the basic block.  */
2955   for (last_insn = first_insn; last_insn != after_bb_end;
2956        last_insn = NEXT_INSN (last_insn))
2957     {
2958       /* We may encounter valid av_set not only on bb_head, but also on
2959 	 those insns on which previously MAX_WS was exceeded.  */
2960       if (AV_SET_VALID_P (last_insn))
2961 	{
2962           if (sched_verbose >= 6)
2963             sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn));
2964 	  break;
2965 	}
2966 
2967       /* The special case: the last insn of the BB may be an
2968          ineligible_successor due to its SEQ_NO that was set on
2969 	 it as a bookkeeping.  */
2970       if (last_insn != first_insn
2971           && is_ineligible_successor (last_insn, p))
2972 	{
2973           if (sched_verbose >= 6)
2974             sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
2975 	  break;
2976 	}
2977 
2978       if (DEBUG_INSN_P (last_insn))
2979 	continue;
2980 
2981       if (end_ws > max_ws)
2982 	{
2983 	  /* We can reach max lookahead size at bb_header, so clean av_set
2984 	     first.  */
2985 	  INSN_WS_LEVEL (last_insn) = global_level;
2986 
2987 	  if (sched_verbose >= 6)
2988             sel_print ("Insn %d is beyond the software lookahead window size\n",
2989                        INSN_UID (last_insn));
2990 	  break;
2991 	}
2992 
2993       end_ws++;
2994     }
2995 
2996   /* Get the valid av_set into AV above the LAST_INSN to start backward
2997      computation from.  It either will be empty av_set or av_set computed from
2998      the successors on the last insn of the current bb.  */
2999   if (last_insn != after_bb_end)
3000     {
3001       av = NULL;
3002 
3003       /* This is needed only to obtain av_sets that are identical to
3004          those computed by the old compute_av_set version.  */
3005       if (last_insn == first_insn && !INSN_NOP_P (last_insn))
3006         av_set_add (&av, INSN_EXPR (last_insn));
3007     }
3008   else
3009     /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END.  */
3010     av = compute_av_set_at_bb_end (bb_end, p, end_ws);
3011 
3012   /* Compute av_set in AV starting from below the LAST_INSN up to
3013      location above the FIRST_INSN.  */
3014   for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
3015        cur_insn = PREV_INSN (cur_insn))
3016     if (!INSN_NOP_P (cur_insn))
3017       {
3018         expr_t expr;
3019 
3020         moveup_set_expr (&av, cur_insn, false);
3021 
3022         /* If the expression for CUR_INSN is already in the set,
3023            replace it by the new one.  */
3024         expr = av_set_lookup (av, INSN_VINSN (cur_insn));
3025         if (expr != NULL)
3026           {
3027             clear_expr (expr);
3028             copy_expr (expr, INSN_EXPR (cur_insn));
3029           }
3030         else
3031           av_set_add (&av, INSN_EXPR (cur_insn));
3032       }
3033 
3034   /* Clear stale bb_av_set.  */
3035   if (sel_bb_head_p (first_insn))
3036     {
3037       av_set_clear (&BB_AV_SET (cur_bb));
3038       BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av;
3039       BB_AV_LEVEL (cur_bb) = global_level;
3040     }
3041 
3042   if (sched_verbose >= 6)
3043     {
3044       sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn));
3045       dump_av_set (av);
3046       sel_print ("\n");
3047     }
3048 
3049   ilist_remove (&p);
3050   return av;
3051 }
3052 
3053 /* Compute av set before INSN.
3054    INSN - the current operation (actual rtx INSN)
3055    P - the current path, which is list of insns visited so far
3056    WS - software lookahead window size.
3057    UNIQUE_P - TRUE, if returned av_set will be changed, hence
3058    if we want to save computed av_set in s_i_d, we should make a copy of it.
3059 
3060    In the resulting set we will have only expressions that don't have delay
3061    stalls and nonsubstitutable dependences.  */
3062 static av_set_t
compute_av_set(insn_t insn,ilist_t p,int ws,bool unique_p)3063 compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
3064 {
3065   return compute_av_set_inside_bb (insn, p, ws, unique_p);
3066 }
3067 
3068 /* Propagate a liveness set LV through INSN.  */
3069 static void
propagate_lv_set(regset lv,insn_t insn)3070 propagate_lv_set (regset lv, insn_t insn)
3071 {
3072   gcc_assert (INSN_P (insn));
3073 
3074   if (INSN_NOP_P (insn))
3075     return;
3076 
3077   df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv);
3078 }
3079 
3080 /* Return livness set at the end of BB.  */
3081 static regset
compute_live_after_bb(basic_block bb)3082 compute_live_after_bb (basic_block bb)
3083 {
3084   edge e;
3085   edge_iterator ei;
3086   regset lv = get_clear_regset_from_pool ();
3087 
3088   gcc_assert (!ignore_first);
3089 
3090   FOR_EACH_EDGE (e, ei, bb->succs)
3091     if (sel_bb_empty_p (e->dest))
3092       {
3093         if (! BB_LV_SET_VALID_P (e->dest))
3094           {
3095             gcc_unreachable ();
3096             gcc_assert (BB_LV_SET (e->dest) == NULL);
3097             BB_LV_SET (e->dest) = compute_live_after_bb (e->dest);
3098             BB_LV_SET_VALID_P (e->dest) = true;
3099           }
3100         IOR_REG_SET (lv, BB_LV_SET (e->dest));
3101       }
3102     else
3103       IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)));
3104 
3105   return lv;
3106 }
3107 
3108 /* Compute the set of all live registers at the point before INSN and save
3109    it at INSN if INSN is bb header.  */
3110 regset
compute_live(insn_t insn)3111 compute_live (insn_t insn)
3112 {
3113   basic_block bb = BLOCK_FOR_INSN (insn);
3114   insn_t final, temp;
3115   regset lv;
3116 
3117   /* Return the valid set if we're already on it.  */
3118   if (!ignore_first)
3119     {
3120       regset src = NULL;
3121 
3122       if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
3123         src = BB_LV_SET (bb);
3124       else
3125         {
3126           gcc_assert (in_current_region_p (bb));
3127           if (INSN_LIVE_VALID_P (insn))
3128             src = INSN_LIVE (insn);
3129         }
3130 
3131       if (src)
3132 	{
3133 	  lv = get_regset_from_pool ();
3134 	  COPY_REG_SET (lv, src);
3135 
3136           if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb))
3137             {
3138               COPY_REG_SET (BB_LV_SET (bb), lv);
3139               BB_LV_SET_VALID_P (bb) = true;
3140             }
3141 
3142 	  return_regset_to_pool (lv);
3143 	  return lv;
3144 	}
3145     }
3146 
3147   /* We've skipped the wrong lv_set.  Don't skip the right one.  */
3148   ignore_first = false;
3149   gcc_assert (in_current_region_p (bb));
3150 
3151   /* Find a valid LV set in this block or below, if needed.
3152      Start searching from the next insn: either ignore_first is true, or
3153      INSN doesn't have a correct live set.  */
3154   temp = NEXT_INSN (insn);
3155   final = NEXT_INSN (BB_END (bb));
3156   while (temp != final && ! INSN_LIVE_VALID_P (temp))
3157     temp = NEXT_INSN (temp);
3158   if (temp == final)
3159     {
3160       lv = compute_live_after_bb (bb);
3161       temp = PREV_INSN (temp);
3162     }
3163   else
3164     {
3165       lv = get_regset_from_pool ();
3166       COPY_REG_SET (lv, INSN_LIVE (temp));
3167     }
3168 
3169   /* Put correct lv sets on the insns which have bad sets.  */
3170   final = PREV_INSN (insn);
3171   while (temp != final)
3172     {
3173       propagate_lv_set (lv, temp);
3174       COPY_REG_SET (INSN_LIVE (temp), lv);
3175       INSN_LIVE_VALID_P (temp) = true;
3176       temp = PREV_INSN (temp);
3177     }
3178 
3179   /* Also put it in a BB.  */
3180   if (sel_bb_head_p (insn))
3181     {
3182       basic_block bb = BLOCK_FOR_INSN (insn);
3183 
3184       COPY_REG_SET (BB_LV_SET (bb), lv);
3185       BB_LV_SET_VALID_P (bb) = true;
3186     }
3187 
3188   /* We return LV to the pool, but will not clear it there.  Thus we can
3189      legimatelly use LV till the next use of regset_pool_get ().  */
3190   return_regset_to_pool (lv);
3191   return lv;
3192 }
3193 
3194 /* Update liveness sets for INSN.  */
3195 static inline void
update_liveness_on_insn(rtx insn)3196 update_liveness_on_insn (rtx insn)
3197 {
3198   ignore_first = true;
3199   compute_live (insn);
3200 }
3201 
3202 /* Compute liveness below INSN and write it into REGS.  */
3203 static inline void
compute_live_below_insn(rtx insn,regset regs)3204 compute_live_below_insn (rtx insn, regset regs)
3205 {
3206   rtx succ;
3207   succ_iterator si;
3208 
3209   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
3210     IOR_REG_SET (regs, compute_live (succ));
3211 }
3212 
3213 /* Update the data gathered in av and lv sets starting from INSN.  */
3214 static void
update_data_sets(rtx insn)3215 update_data_sets (rtx insn)
3216 {
3217   update_liveness_on_insn (insn);
3218   if (sel_bb_head_p (insn))
3219     {
3220       gcc_assert (AV_LEVEL (insn) != 0);
3221       BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1;
3222       compute_av_set (insn, NULL, 0, 0);
3223     }
3224 }
3225 
3226 
3227 /* Helper for move_op () and find_used_regs ().
3228    Return speculation type for which a check should be created on the place
3229    of INSN.  EXPR is one of the original ops we are searching for.  */
3230 static ds_t
get_spec_check_type_for_insn(insn_t insn,expr_t expr)3231 get_spec_check_type_for_insn (insn_t insn, expr_t expr)
3232 {
3233   ds_t to_check_ds;
3234   ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn));
3235 
3236   to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr);
3237 
3238   if (targetm.sched.get_insn_checked_ds)
3239     already_checked_ds |= targetm.sched.get_insn_checked_ds (insn);
3240 
3241   if (spec_info != NULL
3242       && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL))
3243     already_checked_ds |= BEGIN_CONTROL;
3244 
3245   already_checked_ds = ds_get_speculation_types (already_checked_ds);
3246 
3247   to_check_ds &= ~already_checked_ds;
3248 
3249   return to_check_ds;
3250 }
3251 
3252 /* Find the set of registers that are unavailable for storing expres
3253    while moving ORIG_OPS up on the path starting from INSN due to
3254    liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
3255 
3256    All the original operations found during the traversal are saved in the
3257    ORIGINAL_INSNS list.
3258 
3259    REG_RENAME_P denotes the set of hardware registers that
3260    can not be used with renaming due to the register class restrictions,
3261    mode restrictions and other (the register we'll choose should be
3262    compatible class with the original uses, shouldn't be in call_used_regs,
3263    should be HARD_REGNO_RENAME_OK etc).
3264 
3265    Returns TRUE if we've found all original insns, FALSE otherwise.
3266 
3267    This function utilizes code_motion_path_driver (formerly find_used_regs_1)
3268    to traverse the code motion paths.  This helper function finds registers
3269    that are not available for storing expres while moving ORIG_OPS up on the
3270    path starting from INSN.  A register considered as used on the moving path,
3271    if one of the following conditions is not satisfied:
3272 
3273       (1) a register not set or read on any path from xi to an instance of
3274 	  the original operation,
3275       (2) not among the live registers of the point immediately following the
3276           first original operation on a given downward path, except for the
3277 	  original target register of the operation,
3278       (3) not live on the other path of any conditional branch that is passed
3279 	  by the operation, in case original operations are not present on
3280 	  both paths of the conditional branch.
3281 
3282    All the original operations found during the traversal are saved in the
3283    ORIGINAL_INSNS list.
3284 
3285    REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
3286    from INSN to original insn. In this case CALL_USED_REG_SET will be added
3287    to unavailable hard regs at the point original operation is found.  */
3288 
3289 static bool
find_used_regs(insn_t insn,av_set_t orig_ops,regset used_regs,struct reg_rename * reg_rename_p,def_list_t * original_insns)3290 find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
3291 		struct reg_rename  *reg_rename_p, def_list_t *original_insns)
3292 {
3293   def_list_iterator i;
3294   def_t def;
3295   int res;
3296   bool needs_spec_check_p = false;
3297   expr_t expr;
3298   av_set_iterator expr_iter;
3299   struct fur_static_params sparams;
3300   struct cmpd_local_params lparams;
3301 
3302   /* We haven't visited any blocks yet.  */
3303   bitmap_clear (code_motion_visited_blocks);
3304 
3305   /* Init parameters for code_motion_path_driver.  */
3306   sparams.crosses_call = false;
3307   sparams.original_insns = original_insns;
3308   sparams.used_regs = used_regs;
3309 
3310   /* Set the appropriate hooks and data.  */
3311   code_motion_path_driver_info = &fur_hooks;
3312 
3313   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
3314 
3315   reg_rename_p->crosses_call |= sparams.crosses_call;
3316 
3317   gcc_assert (res == 1);
3318   gcc_assert (original_insns && *original_insns);
3319 
3320   /* ??? We calculate whether an expression needs a check when computing
3321      av sets.  This information is not as precise as it could be due to
3322      merging this bit in merge_expr.  We can do better in find_used_regs,
3323      but we want to avoid multiple traversals of the same code motion
3324      paths.  */
3325   FOR_EACH_EXPR (expr, expr_iter, orig_ops)
3326     needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
3327 
3328   /* Mark hardware regs in REG_RENAME_P that are not suitable
3329      for renaming expr in INSN due to hardware restrictions (register class,
3330      modes compatibility etc).  */
3331   FOR_EACH_DEF (def, i, *original_insns)
3332     {
3333       vinsn_t vinsn = INSN_VINSN (def->orig_insn);
3334 
3335       if (VINSN_SEPARABLE_P (vinsn))
3336 	mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
3337 
3338       /* Do not allow clobbering of ld.[sa] address in case some of the
3339          original operations need a check.  */
3340       if (needs_spec_check_p)
3341 	IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
3342     }
3343 
3344   return true;
3345 }
3346 
3347 
3348 /* Functions to choose the best insn from available ones.  */
3349 
3350 /* Adjusts the priority for EXPR using the backend *_adjust_priority hook.  */
3351 static int
sel_target_adjust_priority(expr_t expr)3352 sel_target_adjust_priority (expr_t expr)
3353 {
3354   int priority = EXPR_PRIORITY (expr);
3355   int new_priority;
3356 
3357   if (targetm.sched.adjust_priority)
3358     new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority);
3359   else
3360     new_priority = priority;
3361 
3362   /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly.  */
3363   EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
3364 
3365   gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0);
3366 
3367   if (sched_verbose >= 4)
3368     sel_print ("sel_target_adjust_priority: insn %d,  %d+%d = %d.\n",
3369 	       INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
3370 	       EXPR_PRIORITY_ADJ (expr), new_priority);
3371 
3372   return new_priority;
3373 }
3374 
3375 /* Rank two available exprs for schedule.  Never return 0 here.  */
3376 static int
sel_rank_for_schedule(const void * x,const void * y)3377 sel_rank_for_schedule (const void *x, const void *y)
3378 {
3379   expr_t tmp = *(const expr_t *) y;
3380   expr_t tmp2 = *(const expr_t *) x;
3381   insn_t tmp_insn, tmp2_insn;
3382   vinsn_t tmp_vinsn, tmp2_vinsn;
3383   int val;
3384 
3385   tmp_vinsn = EXPR_VINSN (tmp);
3386   tmp2_vinsn = EXPR_VINSN (tmp2);
3387   tmp_insn = EXPR_INSN_RTX (tmp);
3388   tmp2_insn = EXPR_INSN_RTX (tmp2);
3389 
3390   /* Schedule debug insns as early as possible.  */
3391   if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
3392     return -1;
3393   else if (DEBUG_INSN_P (tmp2_insn))
3394     return 1;
3395 
3396   /* Prefer SCHED_GROUP_P insns to any others.  */
3397   if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
3398     {
3399       if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
3400         return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
3401 
3402       /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
3403          cannot be cloned.  */
3404       if (VINSN_UNIQUE_P (tmp2_vinsn))
3405         return 1;
3406       return -1;
3407     }
3408 
3409   /* Discourage scheduling of speculative checks.  */
3410   val = (sel_insn_is_speculation_check (tmp_insn)
3411 	 - sel_insn_is_speculation_check (tmp2_insn));
3412   if (val)
3413     return val;
3414 
3415   /* Prefer not scheduled insn over scheduled one.  */
3416   if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0)
3417     {
3418       val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2);
3419       if (val)
3420 	return val;
3421     }
3422 
3423   /* Prefer jump over non-jump instruction.  */
3424   if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn))
3425     return -1;
3426   else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
3427     return 1;
3428 
3429   /* Prefer an expr with greater priority.  */
3430   if (EXPR_USEFULNESS (tmp) != 0 && EXPR_USEFULNESS (tmp2) != 0)
3431     {
3432       int p2 = EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2),
3433           p1 = EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp);
3434 
3435       val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp);
3436     }
3437   else
3438     val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
3439 	  + EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp);
3440   if (val)
3441     return val;
3442 
3443   if (spec_info != NULL && spec_info->mask != 0)
3444     /* This code was taken from haifa-sched.c: rank_for_schedule ().  */
3445     {
3446       ds_t ds1, ds2;
3447       dw_t dw1, dw2;
3448       int dw;
3449 
3450       ds1 = EXPR_SPEC_DONE_DS (tmp);
3451       if (ds1)
3452 	dw1 = ds_weak (ds1);
3453       else
3454 	dw1 = NO_DEP_WEAK;
3455 
3456       ds2 = EXPR_SPEC_DONE_DS (tmp2);
3457       if (ds2)
3458 	dw2 = ds_weak (ds2);
3459       else
3460 	dw2 = NO_DEP_WEAK;
3461 
3462       dw = dw2 - dw1;
3463       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
3464 	return dw;
3465     }
3466 
3467   /* Prefer an old insn to a bookkeeping insn.  */
3468   if (INSN_UID (tmp_insn) < first_emitted_uid
3469       && INSN_UID (tmp2_insn) >= first_emitted_uid)
3470     return -1;
3471   if (INSN_UID (tmp_insn) >= first_emitted_uid
3472       && INSN_UID (tmp2_insn) < first_emitted_uid)
3473     return 1;
3474 
3475   /* Prefer an insn with smaller UID, as a last resort.
3476      We can't safely use INSN_LUID as it is defined only for those insns
3477      that are in the stream.  */
3478   return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
3479 }
3480 
3481 /* Filter out expressions from av set pointed to by AV_PTR
3482    that are pipelined too many times.  */
3483 static void
process_pipelined_exprs(av_set_t * av_ptr)3484 process_pipelined_exprs (av_set_t *av_ptr)
3485 {
3486   expr_t expr;
3487   av_set_iterator si;
3488 
3489   /* Don't pipeline already pipelined code as that would increase
3490      number of unnecessary register moves.  */
3491   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3492     {
3493       if (EXPR_SCHED_TIMES (expr)
3494 	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
3495 	av_set_iter_remove (&si);
3496     }
3497 }
3498 
3499 /* Filter speculative insns from AV_PTR if we don't want them.  */
3500 static void
process_spec_exprs(av_set_t * av_ptr)3501 process_spec_exprs (av_set_t *av_ptr)
3502 {
3503   bool try_data_p = true;
3504   bool try_control_p = true;
3505   expr_t expr;
3506   av_set_iterator si;
3507 
3508   if (spec_info == NULL)
3509     return;
3510 
3511   /* Scan *AV_PTR to find out if we want to consider speculative
3512      instructions for scheduling.  */
3513   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3514     {
3515       ds_t ds;
3516 
3517       ds = EXPR_SPEC_DONE_DS (expr);
3518 
3519       /* The probability of a success is too low - don't speculate.  */
3520       if ((ds & SPECULATIVE)
3521           && (ds_weak (ds) < spec_info->data_weakness_cutoff
3522               || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff
3523 	      || (pipelining_p && false
3524 		  && (ds & DATA_SPEC)
3525 		  && (ds & CONTROL_SPEC))))
3526         {
3527           av_set_iter_remove (&si);
3528           continue;
3529         }
3530 
3531       if ((spec_info->flags & PREFER_NON_DATA_SPEC)
3532           && !(ds & BEGIN_DATA))
3533         try_data_p = false;
3534 
3535       if ((spec_info->flags & PREFER_NON_CONTROL_SPEC)
3536           && !(ds & BEGIN_CONTROL))
3537         try_control_p = false;
3538     }
3539 
3540   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3541     {
3542       ds_t ds;
3543 
3544       ds = EXPR_SPEC_DONE_DS (expr);
3545 
3546       if (ds & SPECULATIVE)
3547         {
3548           if ((ds & BEGIN_DATA) && !try_data_p)
3549             /* We don't want any data speculative instructions right
3550                now.  */
3551             av_set_iter_remove (&si);
3552 
3553           if ((ds & BEGIN_CONTROL) && !try_control_p)
3554             /* We don't want any control speculative instructions right
3555                now.  */
3556             av_set_iter_remove (&si);
3557         }
3558     }
3559 }
3560 
3561 /* Search for any use-like insns in AV_PTR and decide on scheduling
3562    them.  Return one when found, and NULL otherwise.
3563    Note that we check here whether a USE could be scheduled to avoid
3564    an infinite loop later.  */
3565 static expr_t
process_use_exprs(av_set_t * av_ptr)3566 process_use_exprs (av_set_t *av_ptr)
3567 {
3568   expr_t expr;
3569   av_set_iterator si;
3570   bool uses_present_p = false;
3571   bool try_uses_p = true;
3572 
3573   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3574     {
3575       /* This will also initialize INSN_CODE for later use.  */
3576       if (recog_memoized (EXPR_INSN_RTX (expr)) < 0)
3577         {
3578           /* If we have a USE in *AV_PTR that was not scheduled yet,
3579              do so because it will do good only.  */
3580           if (EXPR_SCHED_TIMES (expr) <= 0)
3581             {
3582               if (EXPR_TARGET_AVAILABLE (expr) == 1)
3583                 return expr;
3584 
3585               av_set_iter_remove (&si);
3586             }
3587           else
3588             {
3589               gcc_assert (pipelining_p);
3590 
3591               uses_present_p = true;
3592             }
3593         }
3594       else
3595         try_uses_p = false;
3596     }
3597 
3598   if (uses_present_p)
3599     {
3600       /* If we don't want to schedule any USEs right now and we have some
3601            in *AV_PTR, remove them, else just return the first one found.  */
3602       if (!try_uses_p)
3603         {
3604           FOR_EACH_EXPR_1 (expr, si, av_ptr)
3605             if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)
3606               av_set_iter_remove (&si);
3607         }
3608       else
3609         {
3610           FOR_EACH_EXPR_1 (expr, si, av_ptr)
3611             {
3612               gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0);
3613 
3614               if (EXPR_TARGET_AVAILABLE (expr) == 1)
3615                 return expr;
3616 
3617               av_set_iter_remove (&si);
3618             }
3619         }
3620     }
3621 
3622   return NULL;
3623 }
3624 
3625 /* Lookup EXPR in VINSN_VEC and return TRUE if found.  Also check patterns from
3626    EXPR's history of changes.  */
3627 static bool
vinsn_vec_has_expr_p(vinsn_vec_t vinsn_vec,expr_t expr)3628 vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
3629 {
3630   vinsn_t vinsn, expr_vinsn;
3631   int n;
3632   unsigned i;
3633 
3634   /* Start with checking expr itself and then proceed with all the old forms
3635      of expr taken from its history vector.  */
3636   for (i = 0, expr_vinsn = EXPR_VINSN (expr);
3637        expr_vinsn;
3638        expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr).length ()
3639 		     ? EXPR_HISTORY_OF_CHANGES (expr)[i++].old_expr_vinsn
3640 		     : NULL))
3641     FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn)
3642       if (VINSN_SEPARABLE_P (vinsn))
3643 	{
3644 	  if (vinsn_equal_p (vinsn, expr_vinsn))
3645 	    return true;
3646 	}
3647       else
3648 	{
3649 	  /* For non-separable instructions, the blocking insn can have
3650 	     another pattern due to substitution, and we can't choose
3651 	     different register as in the above case.  Check all registers
3652 	     being written instead.  */
3653 	  if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
3654 				  VINSN_REG_SETS (expr_vinsn)))
3655 	    return true;
3656 	}
3657 
3658   return false;
3659 }
3660 
3661 #ifdef ENABLE_CHECKING
3662 /* Return true if either of expressions from ORIG_OPS can be blocked
3663    by previously created bookkeeping code.  STATIC_PARAMS points to static
3664    parameters of move_op.  */
3665 static bool
av_set_could_be_blocked_by_bookkeeping_p(av_set_t orig_ops,void * static_params)3666 av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params)
3667 {
3668   expr_t expr;
3669   av_set_iterator iter;
3670   moveop_static_params_p sparams;
3671 
3672   /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping
3673      created while scheduling on another fence.  */
3674   FOR_EACH_EXPR (expr, iter, orig_ops)
3675     if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3676       return true;
3677 
3678   gcc_assert (code_motion_path_driver_info == &move_op_hooks);
3679   sparams = (moveop_static_params_p) static_params;
3680 
3681   /* Expressions can be also blocked by bookkeeping created during current
3682      move_op.  */
3683   if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn)))
3684     FOR_EACH_EXPR (expr, iter, orig_ops)
3685       if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL)
3686         return true;
3687 
3688   /* Expressions in ORIG_OPS may have wrong destination register due to
3689      renaming.  Check with the right register instead.  */
3690   if (sparams->dest && REG_P (sparams->dest))
3691     {
3692       rtx reg = sparams->dest;
3693       vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
3694 
3695       if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg)
3696 	  || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg)
3697 	  || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg))
3698 	return true;
3699     }
3700 
3701   return false;
3702 }
3703 #endif
3704 
3705 /* Clear VINSN_VEC and detach vinsns.  */
3706 static void
vinsn_vec_clear(vinsn_vec_t * vinsn_vec)3707 vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
3708 {
3709   unsigned len = vinsn_vec->length ();
3710   if (len > 0)
3711     {
3712       vinsn_t vinsn;
3713       int n;
3714 
3715       FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn)
3716         vinsn_detach (vinsn);
3717       vinsn_vec->block_remove (0, len);
3718     }
3719 }
3720 
3721 /* Add the vinsn of EXPR to the VINSN_VEC.  */
3722 static void
vinsn_vec_add(vinsn_vec_t * vinsn_vec,expr_t expr)3723 vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
3724 {
3725   vinsn_attach (EXPR_VINSN (expr));
3726   vinsn_vec->safe_push (EXPR_VINSN (expr));
3727 }
3728 
3729 /* Free the vector representing blocked expressions.  */
3730 static void
vinsn_vec_free(vinsn_vec_t & vinsn_vec)3731 vinsn_vec_free (vinsn_vec_t &vinsn_vec)
3732 {
3733   vinsn_vec.release ();
3734 }
3735 
3736 /* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT.  */
3737 
sel_add_to_insn_priority(rtx insn,int amount)3738 void sel_add_to_insn_priority (rtx insn, int amount)
3739 {
3740   EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
3741 
3742   if (sched_verbose >= 2)
3743     sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
3744 	       INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
3745 	       EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
3746 }
3747 
3748 /* Turn AV into a vector, filter inappropriate insns and sort it.  Return
3749    true if there is something to schedule.  BNDS and FENCE are current
3750    boundaries and fence, respectively.  If we need to stall for some cycles
3751    before an expr from AV would become available, write this number to
3752    *PNEED_STALL.  */
3753 static bool
fill_vec_av_set(av_set_t av,blist_t bnds,fence_t fence,int * pneed_stall)3754 fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
3755                  int *pneed_stall)
3756 {
3757   av_set_iterator si;
3758   expr_t expr;
3759   int sched_next_worked = 0, stalled, n;
3760   static int av_max_prio, est_ticks_till_branch;
3761   int min_need_stall = -1;
3762   deps_t dc = BND_DC (BLIST_BND (bnds));
3763 
3764   /* Bail out early when the ready list contained only USEs/CLOBBERs that are
3765      already scheduled.  */
3766   if (av == NULL)
3767     return false;
3768 
3769   /* Empty vector from the previous stuff.  */
3770   if (vec_av_set.length () > 0)
3771     vec_av_set.block_remove (0, vec_av_set.length ());
3772 
3773   /* Turn the set into a vector for sorting and call sel_target_adjust_priority
3774      for each insn.  */
3775   gcc_assert (vec_av_set.is_empty ());
3776   FOR_EACH_EXPR (expr, si, av)
3777     {
3778       vec_av_set.safe_push (expr);
3779 
3780       gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
3781 
3782       /* Adjust priority using target backend hook.  */
3783       sel_target_adjust_priority (expr);
3784     }
3785 
3786   /* Sort the vector.  */
3787   vec_av_set.qsort (sel_rank_for_schedule);
3788 
3789   /* We record maximal priority of insns in av set for current instruction
3790      group.  */
3791   if (FENCE_STARTS_CYCLE_P (fence))
3792     av_max_prio = est_ticks_till_branch = INT_MIN;
3793 
3794   /* Filter out inappropriate expressions.  Loop's direction is reversed to
3795      visit "best" instructions first.  We assume that vec::unordered_remove
3796      moves last element in place of one being deleted.  */
3797   for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--)
3798     {
3799       expr_t expr = vec_av_set[n];
3800       insn_t insn = EXPR_INSN_RTX (expr);
3801       signed char target_available;
3802       bool is_orig_reg_p = true;
3803       int need_cycles, new_prio;
3804 
3805       /* Don't allow any insns other than from SCHED_GROUP if we have one.  */
3806       if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
3807         {
3808           vec_av_set.unordered_remove (n);
3809           continue;
3810         }
3811 
3812       /* Set number of sched_next insns (just in case there
3813          could be several).  */
3814       if (FENCE_SCHED_NEXT (fence))
3815         sched_next_worked++;
3816 
3817       /* Check all liveness requirements and try renaming.
3818          FIXME: try to minimize calls to this.  */
3819       target_available = EXPR_TARGET_AVAILABLE (expr);
3820 
3821       /* If insn was already scheduled on the current fence,
3822 	 set TARGET_AVAILABLE to -1 no matter what expr's attribute says.  */
3823       if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr))
3824 	target_available = -1;
3825 
3826       /* If the availability of the EXPR is invalidated by the insertion of
3827 	 bookkeeping earlier, make sure that we won't choose this expr for
3828 	 scheduling if it's not separable, and if it is separable, then
3829 	 we have to recompute the set of available registers for it.  */
3830       if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3831 	{
3832           vec_av_set.unordered_remove (n);
3833           if (sched_verbose >= 4)
3834             sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
3835                        INSN_UID (insn));
3836           continue;
3837         }
3838 
3839       if (target_available == true)
3840 	{
3841           /* Do nothing -- we can use an existing register.  */
3842 	  is_orig_reg_p = EXPR_SEPARABLE_P (expr);
3843         }
3844       else if (/* Non-separable instruction will never
3845                   get another register. */
3846                (target_available == false
3847                 && !EXPR_SEPARABLE_P (expr))
3848                /* Don't try to find a register for low-priority expression.  */
3849                || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename
3850                /* ??? FIXME: Don't try to rename data speculation.  */
3851                || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
3852                || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
3853         {
3854           vec_av_set.unordered_remove (n);
3855           if (sched_verbose >= 4)
3856             sel_print ("Expr %d has no suitable target register\n",
3857                        INSN_UID (insn));
3858           continue;
3859         }
3860 
3861       /* Filter expressions that need to be renamed or speculated when
3862 	 pipelining, because compensating register copies or speculation
3863 	 checks are likely to be placed near the beginning of the loop,
3864 	 causing a stall.  */
3865       if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0
3866 	  && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0))
3867 	{
3868 	  /* Estimation of number of cycles until loop branch for
3869 	     renaming/speculation to be successful.  */
3870 	  int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr));
3871 
3872 	  if ((int) current_loop_nest->ninsns < 9)
3873 	    {
3874 	      vec_av_set.unordered_remove (n);
3875 	      if (sched_verbose >= 4)
3876 		sel_print ("Pipelining expr %d will likely cause stall\n",
3877 			   INSN_UID (insn));
3878 	      continue;
3879 	    }
3880 
3881 	  if ((int) current_loop_nest->ninsns - num_insns_scheduled
3882 	      < need_n_ticks_till_branch * issue_rate / 2
3883 	      && est_ticks_till_branch < need_n_ticks_till_branch)
3884 	     {
3885 	       vec_av_set.unordered_remove (n);
3886 	       if (sched_verbose >= 4)
3887 		 sel_print ("Pipelining expr %d will likely cause stall\n",
3888 			    INSN_UID (insn));
3889 	       continue;
3890 	     }
3891 	}
3892 
3893       /* We want to schedule speculation checks as late as possible.  Discard
3894 	 them from av set if there are instructions with higher priority.  */
3895       if (sel_insn_is_speculation_check (insn)
3896 	  && EXPR_PRIORITY (expr) < av_max_prio)
3897 	{
3898           stalled++;
3899           min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
3900           vec_av_set.unordered_remove (n);
3901 	  if (sched_verbose >= 4)
3902 	    sel_print ("Delaying speculation check %d until its first use\n",
3903 		       INSN_UID (insn));
3904 	  continue;
3905 	}
3906 
3907       /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO.  */
3908       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3909 	av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr));
3910 
3911       /* Don't allow any insns whose data is not yet ready.
3912          Check first whether we've already tried them and failed.  */
3913       if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
3914 	{
3915           need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3916 			 - FENCE_CYCLE (fence));
3917 	  if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3918 	    est_ticks_till_branch = MAX (est_ticks_till_branch,
3919 					 EXPR_PRIORITY (expr) + need_cycles);
3920 
3921 	  if (need_cycles > 0)
3922 	    {
3923 	      stalled++;
3924 	      min_need_stall = (min_need_stall < 0
3925 				? need_cycles
3926 				: MIN (min_need_stall, need_cycles));
3927 	      vec_av_set.unordered_remove (n);
3928 
3929 	      if (sched_verbose >= 4)
3930 		sel_print ("Expr %d is not ready until cycle %d (cached)\n",
3931 			   INSN_UID (insn),
3932 			   FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3933 	      continue;
3934 	    }
3935 	}
3936 
3937       /* Now resort to dependence analysis to find whether EXPR might be
3938          stalled due to dependencies from FENCE's context.  */
3939       need_cycles = tick_check_p (expr, dc, fence);
3940       new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
3941 
3942       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3943 	est_ticks_till_branch = MAX (est_ticks_till_branch,
3944 				     new_prio);
3945 
3946       if (need_cycles > 0)
3947         {
3948           if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
3949             {
3950               int new_size = INSN_UID (insn) * 3 / 2;
3951 
3952               FENCE_READY_TICKS (fence)
3953                 = (int *) xrecalloc (FENCE_READY_TICKS (fence),
3954                                      new_size, FENCE_READY_TICKS_SIZE (fence),
3955                                      sizeof (int));
3956             }
3957           FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3958             = FENCE_CYCLE (fence) + need_cycles;
3959 
3960           stalled++;
3961           min_need_stall = (min_need_stall < 0
3962                             ? need_cycles
3963                             : MIN (min_need_stall, need_cycles));
3964 
3965           vec_av_set.unordered_remove (n);
3966 
3967           if (sched_verbose >= 4)
3968             sel_print ("Expr %d is not ready yet until cycle %d\n",
3969                        INSN_UID (insn),
3970                        FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3971           continue;
3972         }
3973 
3974       if (sched_verbose >= 4)
3975         sel_print ("Expr %d is ok\n", INSN_UID (insn));
3976       min_need_stall = 0;
3977     }
3978 
3979   /* Clear SCHED_NEXT.  */
3980   if (FENCE_SCHED_NEXT (fence))
3981     {
3982       gcc_assert (sched_next_worked == 1);
3983       FENCE_SCHED_NEXT (fence) = NULL_RTX;
3984     }
3985 
3986   /* No need to stall if this variable was not initialized.  */
3987   if (min_need_stall < 0)
3988     min_need_stall = 0;
3989 
3990   if (vec_av_set.is_empty ())
3991     {
3992       /* We need to set *pneed_stall here, because later we skip this code
3993          when ready list is empty.  */
3994       *pneed_stall = min_need_stall;
3995       return false;
3996     }
3997   else
3998     gcc_assert (min_need_stall == 0);
3999 
4000   /* Sort the vector.  */
4001   vec_av_set.qsort (sel_rank_for_schedule);
4002 
4003   if (sched_verbose >= 4)
4004     {
4005       sel_print ("Total ready exprs: %d, stalled: %d\n",
4006                  vec_av_set.length (), stalled);
4007       sel_print ("Sorted av set (%d): ", vec_av_set.length ());
4008       FOR_EACH_VEC_ELT (vec_av_set, n, expr)
4009         dump_expr (expr);
4010       sel_print ("\n");
4011     }
4012 
4013   *pneed_stall = 0;
4014   return true;
4015 }
4016 
4017 /* Convert a vectored and sorted av set to the ready list that
4018    the rest of the backend wants to see.  */
4019 static void
convert_vec_av_set_to_ready(void)4020 convert_vec_av_set_to_ready (void)
4021 {
4022   int n;
4023   expr_t expr;
4024 
4025   /* Allocate and fill the ready list from the sorted vector.  */
4026   ready.n_ready = vec_av_set.length ();
4027   ready.first = ready.n_ready - 1;
4028 
4029   gcc_assert (ready.n_ready > 0);
4030 
4031   if (ready.n_ready > max_issue_size)
4032     {
4033       max_issue_size = ready.n_ready;
4034       sched_extend_ready_list (ready.n_ready);
4035     }
4036 
4037   FOR_EACH_VEC_ELT (vec_av_set, n, expr)
4038     {
4039       vinsn_t vi = EXPR_VINSN (expr);
4040       insn_t insn = VINSN_INSN_RTX (vi);
4041 
4042       ready_try[n] = 0;
4043       ready.vec[n] = insn;
4044     }
4045 }
4046 
4047 /* Initialize ready list from *AV_PTR for the max_issue () call.
4048    If any unrecognizable insn found in *AV_PTR, return it (and skip
4049    max_issue).  BND and FENCE are current boundary and fence,
4050    respectively.  If we need to stall for some cycles before an expr
4051    from *AV_PTR would become available, write this number to *PNEED_STALL.  */
4052 static expr_t
fill_ready_list(av_set_t * av_ptr,blist_t bnds,fence_t fence,int * pneed_stall)4053 fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
4054                  int *pneed_stall)
4055 {
4056   expr_t expr;
4057 
4058   /* We do not support multiple boundaries per fence.  */
4059   gcc_assert (BLIST_NEXT (bnds) == NULL);
4060 
4061   /* Process expressions required special handling, i.e.  pipelined,
4062      speculative and recog() < 0 expressions first.  */
4063   process_pipelined_exprs (av_ptr);
4064   process_spec_exprs (av_ptr);
4065 
4066   /* A USE could be scheduled immediately.  */
4067   expr = process_use_exprs (av_ptr);
4068   if (expr)
4069     {
4070       *pneed_stall = 0;
4071       return expr;
4072     }
4073 
4074   /* Turn the av set to a vector for sorting.  */
4075   if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall))
4076     {
4077       ready.n_ready = 0;
4078       return NULL;
4079     }
4080 
4081   /* Build the final ready list.  */
4082   convert_vec_av_set_to_ready ();
4083   return NULL;
4084 }
4085 
4086 /* Wrapper for dfa_new_cycle ().  Returns TRUE if cycle was advanced.  */
4087 static bool
sel_dfa_new_cycle(insn_t insn,fence_t fence)4088 sel_dfa_new_cycle (insn_t insn, fence_t fence)
4089 {
4090   int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
4091                              ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
4092                              : FENCE_CYCLE (fence) - 1;
4093   bool res = false;
4094   int sort_p = 0;
4095 
4096   if (!targetm.sched.dfa_new_cycle)
4097     return false;
4098 
4099   memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4100 
4101   while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4102                                                  insn, last_scheduled_cycle,
4103                                                  FENCE_CYCLE (fence), &sort_p))
4104     {
4105       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4106       advance_one_cycle (fence);
4107       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4108       res = true;
4109     }
4110 
4111   return res;
4112 }
4113 
4114 /* Invoke reorder* target hooks on the ready list.  Return the number of insns
4115    we can issue.  FENCE is the current fence.  */
4116 static int
invoke_reorder_hooks(fence_t fence)4117 invoke_reorder_hooks (fence_t fence)
4118 {
4119   int issue_more;
4120   bool ran_hook = false;
4121 
4122   /* Call the reorder hook at the beginning of the cycle, and call
4123      the reorder2 hook in the middle of the cycle.  */
4124   if (FENCE_ISSUED_INSNS (fence) == 0)
4125     {
4126       if (targetm.sched.reorder
4127           && !SCHED_GROUP_P (ready_element (&ready, 0))
4128           && ready.n_ready > 1)
4129         {
4130           /* Don't give reorder the most prioritized insn as it can break
4131              pipelining.  */
4132           if (pipelining_p)
4133             --ready.n_ready;
4134 
4135           issue_more
4136             = targetm.sched.reorder (sched_dump, sched_verbose,
4137                                      ready_lastpos (&ready),
4138                                      &ready.n_ready, FENCE_CYCLE (fence));
4139 
4140           if (pipelining_p)
4141             ++ready.n_ready;
4142 
4143           ran_hook = true;
4144         }
4145       else
4146         /* Initialize can_issue_more for variable_issue.  */
4147         issue_more = issue_rate;
4148     }
4149   else if (targetm.sched.reorder2
4150            && !SCHED_GROUP_P (ready_element (&ready, 0)))
4151     {
4152       if (ready.n_ready == 1)
4153         issue_more =
4154           targetm.sched.reorder2 (sched_dump, sched_verbose,
4155                                   ready_lastpos (&ready),
4156                                   &ready.n_ready, FENCE_CYCLE (fence));
4157       else
4158         {
4159           if (pipelining_p)
4160             --ready.n_ready;
4161 
4162           issue_more =
4163             targetm.sched.reorder2 (sched_dump, sched_verbose,
4164                                     ready.n_ready
4165                                     ? ready_lastpos (&ready) : NULL,
4166                                     &ready.n_ready, FENCE_CYCLE (fence));
4167 
4168           if (pipelining_p)
4169             ++ready.n_ready;
4170         }
4171 
4172       ran_hook = true;
4173     }
4174   else
4175     issue_more = FENCE_ISSUE_MORE (fence);
4176 
4177   /* Ensure that ready list and vec_av_set are in line with each other,
4178      i.e. vec_av_set[i] == ready_element (&ready, i).  */
4179   if (issue_more && ran_hook)
4180     {
4181       int i, j, n;
4182       rtx *arr = ready.vec;
4183       expr_t *vec = vec_av_set.address ();
4184 
4185       for (i = 0, n = ready.n_ready; i < n; i++)
4186         if (EXPR_INSN_RTX (vec[i]) != arr[i])
4187           {
4188             expr_t tmp;
4189 
4190             for (j = i; j < n; j++)
4191               if (EXPR_INSN_RTX (vec[j]) == arr[i])
4192                 break;
4193             gcc_assert (j < n);
4194 
4195             tmp = vec[i];
4196             vec[i] = vec[j];
4197             vec[j] = tmp;
4198           }
4199     }
4200 
4201   return issue_more;
4202 }
4203 
4204 /* Return an EXPR corresponding to INDEX element of ready list, if
4205    FOLLOW_READY_ELEMENT is true (i.e., an expr of
4206    ready_element (&ready, INDEX) will be returned), and to INDEX element of
4207    ready.vec otherwise.  */
4208 static inline expr_t
find_expr_for_ready(int index,bool follow_ready_element)4209 find_expr_for_ready (int index, bool follow_ready_element)
4210 {
4211   expr_t expr;
4212   int real_index;
4213 
4214   real_index = follow_ready_element ? ready.first - index : index;
4215 
4216   expr = vec_av_set[real_index];
4217   gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
4218 
4219   return expr;
4220 }
4221 
4222 /* Calculate insns worth trying via lookahead_guard hook.  Return a number
4223    of such insns found.  */
4224 static int
invoke_dfa_lookahead_guard(void)4225 invoke_dfa_lookahead_guard (void)
4226 {
4227   int i, n;
4228   bool have_hook
4229     = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
4230 
4231   if (sched_verbose >= 2)
4232     sel_print ("ready after reorder: ");
4233 
4234   for (i = 0, n = 0; i < ready.n_ready; i++)
4235     {
4236       expr_t expr;
4237       insn_t insn;
4238       int r;
4239 
4240       /* In this loop insn is Ith element of the ready list given by
4241          ready_element, not Ith element of ready.vec.  */
4242       insn = ready_element (&ready, i);
4243 
4244       if (! have_hook || i == 0)
4245         r = 0;
4246       else
4247         r = !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn);
4248 
4249       gcc_assert (INSN_CODE (insn) >= 0);
4250 
4251       /* Only insns with ready_try = 0 can get here
4252          from fill_ready_list.  */
4253       gcc_assert (ready_try [i] == 0);
4254       ready_try[i] = r;
4255       if (!r)
4256         n++;
4257 
4258       expr = find_expr_for_ready (i, true);
4259 
4260       if (sched_verbose >= 2)
4261         {
4262           dump_vinsn (EXPR_VINSN (expr));
4263           sel_print (":%d; ", ready_try[i]);
4264         }
4265     }
4266 
4267   if (sched_verbose >= 2)
4268     sel_print ("\n");
4269   return n;
4270 }
4271 
4272 /* Calculate the number of privileged insns and return it.  */
4273 static int
calculate_privileged_insns(void)4274 calculate_privileged_insns (void)
4275 {
4276   expr_t cur_expr, min_spec_expr = NULL;
4277   int privileged_n = 0, i;
4278 
4279   for (i = 0; i < ready.n_ready; i++)
4280     {
4281       if (ready_try[i])
4282         continue;
4283 
4284       if (! min_spec_expr)
4285 	min_spec_expr = find_expr_for_ready (i, true);
4286 
4287       cur_expr = find_expr_for_ready (i, true);
4288 
4289       if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr))
4290         break;
4291 
4292       ++privileged_n;
4293     }
4294 
4295   if (i == ready.n_ready)
4296     privileged_n = 0;
4297 
4298   if (sched_verbose >= 2)
4299     sel_print ("privileged_n: %d insns with SPEC %d\n",
4300                privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1);
4301   return privileged_n;
4302 }
4303 
4304 /* Call the rest of the hooks after the choice was made.  Return
4305    the number of insns that still can be issued given that the current
4306    number is ISSUE_MORE.  FENCE and BEST_INSN are the current fence
4307    and the insn chosen for scheduling, respectively.  */
4308 static int
invoke_aftermath_hooks(fence_t fence,rtx best_insn,int issue_more)4309 invoke_aftermath_hooks (fence_t fence, rtx best_insn, int issue_more)
4310 {
4311   gcc_assert (INSN_P (best_insn));
4312 
4313   /* First, call dfa_new_cycle, and then variable_issue, if available.  */
4314   sel_dfa_new_cycle (best_insn, fence);
4315 
4316   if (targetm.sched.variable_issue)
4317     {
4318       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4319       issue_more =
4320         targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
4321                                       issue_more);
4322       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4323     }
4324   else if (GET_CODE (PATTERN (best_insn)) != USE
4325            && GET_CODE (PATTERN (best_insn)) != CLOBBER)
4326     issue_more--;
4327 
4328   return issue_more;
4329 }
4330 
4331 /* Estimate the cost of issuing INSN on DFA state STATE.  */
4332 static int
estimate_insn_cost(rtx insn,state_t state)4333 estimate_insn_cost (rtx insn, state_t state)
4334 {
4335   static state_t temp = NULL;
4336   int cost;
4337 
4338   if (!temp)
4339     temp = xmalloc (dfa_state_size);
4340 
4341   memcpy (temp, state, dfa_state_size);
4342   cost = state_transition (temp, insn);
4343 
4344   if (cost < 0)
4345     return 0;
4346   else if (cost == 0)
4347     return 1;
4348   return cost;
4349 }
4350 
4351 /* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
4352    This function properly handles ASMs, USEs etc.  */
4353 static int
get_expr_cost(expr_t expr,fence_t fence)4354 get_expr_cost (expr_t expr, fence_t fence)
4355 {
4356   rtx insn = EXPR_INSN_RTX (expr);
4357 
4358   if (recog_memoized (insn) < 0)
4359     {
4360       if (!FENCE_STARTS_CYCLE_P (fence)
4361 	  && INSN_ASM_P (insn))
4362 	/* This is asm insn which is tryed to be issued on the
4363 	   cycle not first.  Issue it on the next cycle.  */
4364 	return 1;
4365       else
4366 	/* A USE insn, or something else we don't need to
4367 	   understand.  We can't pass these directly to
4368 	   state_transition because it will trigger a
4369 	   fatal error for unrecognizable insns.  */
4370 	return 0;
4371     }
4372   else
4373     return estimate_insn_cost (insn, FENCE_STATE (fence));
4374 }
4375 
4376 /* Find the best insn for scheduling, either via max_issue or just take
4377    the most prioritized available.  */
4378 static int
choose_best_insn(fence_t fence,int privileged_n,int * index)4379 choose_best_insn (fence_t fence, int privileged_n, int *index)
4380 {
4381   int can_issue = 0;
4382 
4383   if (dfa_lookahead > 0)
4384     {
4385       cycle_issued_insns = FENCE_ISSUED_INSNS (fence);
4386       /* TODO: pass equivalent of first_cycle_insn_p to max_issue ().  */
4387       can_issue = max_issue (&ready, privileged_n,
4388                              FENCE_STATE (fence), true, index);
4389       if (sched_verbose >= 2)
4390         sel_print ("max_issue: we can issue %d insns, already did %d insns\n",
4391                    can_issue, FENCE_ISSUED_INSNS (fence));
4392     }
4393   else
4394     {
4395       /* We can't use max_issue; just return the first available element.  */
4396       int i;
4397 
4398       for (i = 0; i < ready.n_ready; i++)
4399 	{
4400 	  expr_t expr = find_expr_for_ready (i, true);
4401 
4402 	  if (get_expr_cost (expr, fence) < 1)
4403 	    {
4404 	      can_issue = can_issue_more;
4405 	      *index = i;
4406 
4407 	      if (sched_verbose >= 2)
4408 		sel_print ("using %dth insn from the ready list\n", i + 1);
4409 
4410 	      break;
4411 	    }
4412 	}
4413 
4414       if (i == ready.n_ready)
4415 	{
4416 	  can_issue = 0;
4417 	  *index = -1;
4418 	}
4419     }
4420 
4421   return can_issue;
4422 }
4423 
4424 /* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
4425    BNDS and FENCE are current boundaries and scheduling fence respectively.
4426    Return the expr found and NULL if nothing can be issued atm.
4427    Write to PNEED_STALL the number of cycles to stall if no expr was found.  */
4428 static expr_t
find_best_expr(av_set_t * av_vliw_ptr,blist_t bnds,fence_t fence,int * pneed_stall)4429 find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
4430                 int *pneed_stall)
4431 {
4432   expr_t best;
4433 
4434   /* Choose the best insn for scheduling via:
4435      1) sorting the ready list based on priority;
4436      2) calling the reorder hook;
4437      3) calling max_issue.  */
4438   best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall);
4439   if (best == NULL && ready.n_ready > 0)
4440     {
4441       int privileged_n, index;
4442 
4443       can_issue_more = invoke_reorder_hooks (fence);
4444       if (can_issue_more > 0)
4445         {
4446           /* Try choosing the best insn until we find one that is could be
4447              scheduled due to liveness restrictions on its destination register.
4448              In the future, we'd like to choose once and then just probe insns
4449              in the order of their priority.  */
4450           invoke_dfa_lookahead_guard ();
4451           privileged_n = calculate_privileged_insns ();
4452           can_issue_more = choose_best_insn (fence, privileged_n, &index);
4453           if (can_issue_more)
4454             best = find_expr_for_ready (index, true);
4455         }
4456       /* We had some available insns, so if we can't issue them,
4457          we have a stall.  */
4458       if (can_issue_more == 0)
4459         {
4460           best = NULL;
4461           *pneed_stall = 1;
4462         }
4463     }
4464 
4465   if (best != NULL)
4466     {
4467       can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
4468                                                can_issue_more);
4469       if (targetm.sched.variable_issue
4470 	  && can_issue_more == 0)
4471         *pneed_stall = 1;
4472     }
4473 
4474   if (sched_verbose >= 2)
4475     {
4476       if (best != NULL)
4477         {
4478           sel_print ("Best expression (vliw form): ");
4479           dump_expr (best);
4480           sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
4481         }
4482       else
4483         sel_print ("No best expr found!\n");
4484     }
4485 
4486   return best;
4487 }
4488 
4489 
4490 /* Functions that implement the core of the scheduler.  */
4491 
4492 
4493 /* Emit an instruction from EXPR with SEQNO and VINSN after
4494    PLACE_TO_INSERT.  */
4495 static insn_t
emit_insn_from_expr_after(expr_t expr,vinsn_t vinsn,int seqno,insn_t place_to_insert)4496 emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
4497                            insn_t place_to_insert)
4498 {
4499   /* This assert fails when we have identical instructions
4500      one of which dominates the other.  In this case move_op ()
4501      finds the first instruction and doesn't search for second one.
4502      The solution would be to compute av_set after the first found
4503      insn and, if insn present in that set, continue searching.
4504      For now we workaround this issue in move_op.  */
4505   gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)));
4506 
4507   if (EXPR_WAS_RENAMED (expr))
4508     {
4509       unsigned regno = expr_dest_regno (expr);
4510 
4511       if (HARD_REGISTER_NUM_P (regno))
4512 	{
4513 	  df_set_regs_ever_live (regno, true);
4514 	  reg_rename_tick[regno] = ++reg_rename_this_tick;
4515 	}
4516     }
4517 
4518   return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
4519                                        place_to_insert);
4520 }
4521 
4522 /* Return TRUE if BB can hold bookkeeping code.  */
4523 static bool
block_valid_for_bookkeeping_p(basic_block bb)4524 block_valid_for_bookkeeping_p (basic_block bb)
4525 {
4526   insn_t bb_end = BB_END (bb);
4527 
4528   if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1)
4529     return false;
4530 
4531   if (INSN_P (bb_end))
4532     {
4533       if (INSN_SCHED_TIMES (bb_end) > 0)
4534 	return false;
4535     }
4536   else
4537     gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end));
4538 
4539   return true;
4540 }
4541 
4542 /* Attempt to find a block that can hold bookkeeping code for path(s) incoming
4543    into E2->dest, except from E1->src (there may be a sequence of empty basic
4544    blocks between E1->src and E2->dest).  Return found block, or NULL if new
4545    one must be created.  If LAX holds, don't assume there is a simple path
4546    from E1->src to E2->dest.  */
4547 static basic_block
find_block_for_bookkeeping(edge e1,edge e2,bool lax)4548 find_block_for_bookkeeping (edge e1, edge e2, bool lax)
4549 {
4550   basic_block candidate_block = NULL;
4551   edge e;
4552 
4553   /* Loop over edges from E1 to E2, inclusive.  */
4554   for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR; e = EDGE_SUCC (e->dest, 0))
4555     {
4556       if (EDGE_COUNT (e->dest->preds) == 2)
4557 	{
4558 	  if (candidate_block == NULL)
4559 	    candidate_block = (EDGE_PRED (e->dest, 0) == e
4560 			       ? EDGE_PRED (e->dest, 1)->src
4561 			       : EDGE_PRED (e->dest, 0)->src);
4562 	  else
4563 	    /* Found additional edge leading to path from e1 to e2
4564 	       from aside.  */
4565 	    return NULL;
4566 	}
4567       else if (EDGE_COUNT (e->dest->preds) > 2)
4568 	/* Several edges leading to path from e1 to e2 from aside.  */
4569 	return NULL;
4570 
4571       if (e == e2)
4572 	return ((!lax || candidate_block)
4573 		&& block_valid_for_bookkeeping_p (candidate_block)
4574 		? candidate_block
4575 		: NULL);
4576 
4577       if (lax && EDGE_COUNT (e->dest->succs) != 1)
4578 	return NULL;
4579     }
4580 
4581   if (lax)
4582     return NULL;
4583 
4584   gcc_unreachable ();
4585 }
4586 
4587 /* Create new basic block for bookkeeping code for path(s) incoming into
4588    E2->dest, except from E1->src.  Return created block.  */
4589 static basic_block
create_block_for_bookkeeping(edge e1,edge e2)4590 create_block_for_bookkeeping (edge e1, edge e2)
4591 {
4592   basic_block new_bb, bb = e2->dest;
4593 
4594   /* Check that we don't spoil the loop structure.  */
4595   if (current_loop_nest)
4596     {
4597       basic_block latch = current_loop_nest->latch;
4598 
4599       /* We do not split header.  */
4600       gcc_assert (e2->dest != current_loop_nest->header);
4601 
4602       /* We do not redirect the only edge to the latch block.  */
4603       gcc_assert (e1->dest != latch
4604 		  || !single_pred_p (latch)
4605 		  || e1 != single_pred_edge (latch));
4606     }
4607 
4608   /* Split BB to insert BOOK_INSN there.  */
4609   new_bb = sched_split_block (bb, NULL);
4610 
4611   /* Move note_list from the upper bb.  */
4612   gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX);
4613   BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb);
4614   BB_NOTE_LIST (bb) = NULL_RTX;
4615 
4616   gcc_assert (e2->dest == bb);
4617 
4618   /* Skip block for bookkeeping copy when leaving E1->src.  */
4619   if (e1->flags & EDGE_FALLTHRU)
4620     sel_redirect_edge_and_branch_force (e1, new_bb);
4621   else
4622     sel_redirect_edge_and_branch (e1, new_bb);
4623 
4624   gcc_assert (e1->dest == new_bb);
4625   gcc_assert (sel_bb_empty_p (bb));
4626 
4627   /* To keep basic block numbers in sync between debug and non-debug
4628      compilations, we have to rotate blocks here.  Consider that we
4629      started from (a,b)->d, (c,d)->e, and d contained only debug
4630      insns.  It would have been removed before if the debug insns
4631      weren't there, so we'd have split e rather than d.  So what we do
4632      now is to swap the block numbers of new_bb and
4633      single_succ(new_bb) == e, so that the insns that were in e before
4634      get the new block number.  */
4635 
4636   if (MAY_HAVE_DEBUG_INSNS)
4637     {
4638       basic_block succ;
4639       insn_t insn = sel_bb_head (new_bb);
4640       insn_t last;
4641 
4642       if (DEBUG_INSN_P (insn)
4643 	  && single_succ_p (new_bb)
4644 	  && (succ = single_succ (new_bb))
4645 	  && succ != EXIT_BLOCK_PTR
4646 	  && DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
4647 	{
4648 	  while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4649 	    insn = NEXT_INSN (insn);
4650 
4651 	  if (insn == last)
4652 	    {
4653 	      sel_global_bb_info_def gbi;
4654 	      sel_region_bb_info_def rbi;
4655 	      int i;
4656 
4657 	      if (sched_verbose >= 2)
4658 		sel_print ("Swapping block ids %i and %i\n",
4659 			   new_bb->index, succ->index);
4660 
4661 	      i = new_bb->index;
4662 	      new_bb->index = succ->index;
4663 	      succ->index = i;
4664 
4665 	      SET_BASIC_BLOCK (new_bb->index, new_bb);
4666 	      SET_BASIC_BLOCK (succ->index, succ);
4667 
4668 	      memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
4669 	      memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
4670 		      sizeof (gbi));
4671 	      memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi));
4672 
4673 	      memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi));
4674 	      memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ),
4675 		      sizeof (rbi));
4676 	      memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi));
4677 
4678 	      i = BLOCK_TO_BB (new_bb->index);
4679 	      BLOCK_TO_BB (new_bb->index) = BLOCK_TO_BB (succ->index);
4680 	      BLOCK_TO_BB (succ->index) = i;
4681 
4682 	      i = CONTAINING_RGN (new_bb->index);
4683 	      CONTAINING_RGN (new_bb->index) = CONTAINING_RGN (succ->index);
4684 	      CONTAINING_RGN (succ->index) = i;
4685 
4686 	      for (i = 0; i < current_nr_blocks; i++)
4687 		if (BB_TO_BLOCK (i) == succ->index)
4688 		  BB_TO_BLOCK (i) = new_bb->index;
4689 		else if (BB_TO_BLOCK (i) == new_bb->index)
4690 		  BB_TO_BLOCK (i) = succ->index;
4691 
4692 	      FOR_BB_INSNS (new_bb, insn)
4693 		if (INSN_P (insn))
4694 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
4695 
4696 	      FOR_BB_INSNS (succ, insn)
4697 		if (INSN_P (insn))
4698 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index;
4699 
4700 	      if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index))
4701 		bitmap_set_bit (code_motion_visited_blocks, succ->index);
4702 
4703 	      gcc_assert (LABEL_P (BB_HEAD (new_bb))
4704 			  && LABEL_P (BB_HEAD (succ)));
4705 
4706 	      if (sched_verbose >= 4)
4707 		sel_print ("Swapping code labels %i and %i\n",
4708 			   CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
4709 			   CODE_LABEL_NUMBER (BB_HEAD (succ)));
4710 
4711 	      i = CODE_LABEL_NUMBER (BB_HEAD (new_bb));
4712 	      CODE_LABEL_NUMBER (BB_HEAD (new_bb))
4713 		= CODE_LABEL_NUMBER (BB_HEAD (succ));
4714 	      CODE_LABEL_NUMBER (BB_HEAD (succ)) = i;
4715 	    }
4716 	}
4717     }
4718 
4719   return bb;
4720 }
4721 
4722 /* Return insn after which we must insert bookkeeping code for path(s) incoming
4723    into E2->dest, except from E1->src.  If the returned insn immediately
4724    precedes a fence, assign that fence to *FENCE_TO_REWIND.  */
4725 static insn_t
find_place_for_bookkeeping(edge e1,edge e2,fence_t * fence_to_rewind)4726 find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind)
4727 {
4728   insn_t place_to_insert;
4729   /* Find a basic block that can hold bookkeeping.  If it can be found, do not
4730      create new basic block, but insert bookkeeping there.  */
4731   basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE);
4732 
4733   if (book_block)
4734     {
4735       place_to_insert = BB_END (book_block);
4736 
4737       /* Don't use a block containing only debug insns for
4738 	 bookkeeping, this causes scheduling differences between debug
4739 	 and non-debug compilations, for the block would have been
4740 	 removed already.  */
4741       if (DEBUG_INSN_P (place_to_insert))
4742 	{
4743 	  rtx insn = sel_bb_head (book_block);
4744 
4745 	  while (insn != place_to_insert &&
4746 		 (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4747 	    insn = NEXT_INSN (insn);
4748 
4749 	  if (insn == place_to_insert)
4750 	    book_block = NULL;
4751 	}
4752     }
4753 
4754   if (!book_block)
4755     {
4756       book_block = create_block_for_bookkeeping (e1, e2);
4757       place_to_insert = BB_END (book_block);
4758       if (sched_verbose >= 9)
4759 	sel_print ("New block is %i, split from bookkeeping block %i\n",
4760 		   EDGE_SUCC (book_block, 0)->dest->index, book_block->index);
4761     }
4762   else
4763     {
4764       if (sched_verbose >= 9)
4765 	sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
4766     }
4767 
4768   *fence_to_rewind = NULL;
4769   /* If basic block ends with a jump, insert bookkeeping code right before it.
4770      Notice if we are crossing a fence when taking PREV_INSN.  */
4771   if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
4772     {
4773       *fence_to_rewind = flist_lookup (fences, place_to_insert);
4774       place_to_insert = PREV_INSN (place_to_insert);
4775     }
4776 
4777   return place_to_insert;
4778 }
4779 
4780 /* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT
4781    for JOIN_POINT.   */
4782 static int
find_seqno_for_bookkeeping(insn_t place_to_insert,insn_t join_point)4783 find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
4784 {
4785   int seqno;
4786   rtx next;
4787 
4788   /* Check if we are about to insert bookkeeping copy before a jump, and use
4789      jump's seqno for the copy; otherwise, use JOIN_POINT's seqno.  */
4790   next = NEXT_INSN (place_to_insert);
4791   if (INSN_P (next)
4792       && JUMP_P (next)
4793       && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
4794     {
4795       gcc_assert (INSN_SCHED_TIMES (next) == 0);
4796       seqno = INSN_SEQNO (next);
4797     }
4798   else if (INSN_SEQNO (join_point) > 0)
4799     seqno = INSN_SEQNO (join_point);
4800   else
4801     {
4802       seqno = get_seqno_by_preds (place_to_insert);
4803 
4804       /* Sometimes the fences can move in such a way that there will be
4805          no instructions with positive seqno around this bookkeeping.
4806          This means that there will be no way to get to it by a regular
4807          fence movement.  Never mind because we pick up such pieces for
4808          rescheduling anyways, so any positive value will do for now.  */
4809       if (seqno < 0)
4810         {
4811           gcc_assert (pipelining_p);
4812           seqno = 1;
4813         }
4814     }
4815 
4816   gcc_assert (seqno > 0);
4817   return seqno;
4818 }
4819 
4820 /* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning
4821    NEW_SEQNO to it.  Return created insn.  */
4822 static insn_t
emit_bookkeeping_insn(insn_t place_to_insert,expr_t c_expr,int new_seqno)4823 emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno)
4824 {
4825   rtx new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr));
4826 
4827   vinsn_t new_vinsn
4828     = create_vinsn_from_insn_rtx (new_insn_rtx,
4829 				  VINSN_UNIQUE_P (EXPR_VINSN (c_expr)));
4830 
4831   insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno,
4832 					       place_to_insert);
4833 
4834   INSN_SCHED_TIMES (new_insn) = 0;
4835   bitmap_set_bit (current_copies, INSN_UID (new_insn));
4836 
4837   return new_insn;
4838 }
4839 
4840 /* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to
4841    E2->dest, except from E1->src (there may be a sequence of empty blocks
4842    between E1->src and E2->dest).  Return block containing the copy.
4843    All scheduler data is initialized for the newly created insn.  */
4844 static basic_block
generate_bookkeeping_insn(expr_t c_expr,edge e1,edge e2)4845 generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
4846 {
4847   insn_t join_point, place_to_insert, new_insn;
4848   int new_seqno;
4849   bool need_to_exchange_data_sets;
4850   fence_t fence_to_rewind;
4851 
4852   if (sched_verbose >= 4)
4853     sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
4854 	       e2->dest->index);
4855 
4856   join_point = sel_bb_head (e2->dest);
4857   place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind);
4858   new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
4859   need_to_exchange_data_sets
4860     = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
4861 
4862   new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
4863 
4864   if (fence_to_rewind)
4865     FENCE_INSN (fence_to_rewind) = new_insn;
4866 
4867   /* When inserting bookkeeping insn in new block, av sets should be
4868      following: old basic block (that now holds bookkeeping) data sets are
4869      the same as was before generation of bookkeeping, and new basic block
4870      (that now hold all other insns of old basic block) data sets are
4871      invalid.  So exchange data sets for these basic blocks as sel_split_block
4872      mistakenly exchanges them in this case.  Cannot do it earlier because
4873      when single instruction is added to new basic block it should hold NULL
4874      lv_set.  */
4875   if (need_to_exchange_data_sets)
4876     exchange_data_sets (BLOCK_FOR_INSN (new_insn),
4877 			BLOCK_FOR_INSN (join_point));
4878 
4879   stat_bookkeeping_copies++;
4880   return BLOCK_FOR_INSN (new_insn);
4881 }
4882 
4883 /* Remove from AV_PTR all insns that may need bookkeeping when scheduling
4884    on FENCE, but we are unable to copy them.  */
4885 static void
remove_insns_that_need_bookkeeping(fence_t fence,av_set_t * av_ptr)4886 remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
4887 {
4888   expr_t expr;
4889   av_set_iterator i;
4890 
4891   /*  An expression does not need bookkeeping if it is available on all paths
4892       from current block to original block and current block dominates
4893       original block.  We check availability on all paths by examining
4894       EXPR_SPEC; this is not equivalent, because it may be positive even
4895       if expr is available on all paths (but if expr is not available on
4896       any path, EXPR_SPEC will be positive).  */
4897 
4898   FOR_EACH_EXPR_1 (expr, i, av_ptr)
4899     {
4900       if (!control_flow_insn_p (EXPR_INSN_RTX (expr))
4901 	  && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr)))
4902 	  && (EXPR_SPEC (expr)
4903 	      || !EXPR_ORIG_BB_INDEX (expr)
4904 	      || !dominated_by_p (CDI_DOMINATORS,
4905 				  BASIC_BLOCK (EXPR_ORIG_BB_INDEX (expr)),
4906 				  BLOCK_FOR_INSN (FENCE_INSN (fence)))))
4907 	{
4908           if (sched_verbose >= 4)
4909             sel_print ("Expr %d removed because it would need bookkeeping, which "
4910                        "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)));
4911 	  av_set_iter_remove (&i);
4912 	}
4913     }
4914 }
4915 
4916 /* Moving conditional jump through some instructions.
4917 
4918    Consider example:
4919 
4920        ...                     <- current scheduling point
4921        NOTE BASIC BLOCK:       <- bb header
4922        (p8)  add r14=r14+0x9;;
4923        (p8)  mov [r14]=r23
4924        (!p8) jump L1;;
4925        NOTE BASIC BLOCK:
4926        ...
4927 
4928    We can schedule jump one cycle earlier, than mov, because they cannot be
4929    executed together as their predicates are mutually exclusive.
4930 
4931    This is done in this way: first, new fallthrough basic block is created
4932    after jump (it is always can be done, because there already should be a
4933    fallthrough block, where control flow goes in case of predicate being true -
4934    in our example; otherwise there should be a dependence between those
4935    instructions and jump and we cannot schedule jump right now);
4936    next, all instructions between jump and current scheduling point are moved
4937    to this new block.  And the result is this:
4938 
4939       NOTE BASIC BLOCK:
4940       (!p8) jump L1           <- current scheduling point
4941       NOTE BASIC BLOCK:       <- bb header
4942       (p8)  add r14=r14+0x9;;
4943       (p8)  mov [r14]=r23
4944       NOTE BASIC BLOCK:
4945       ...
4946 */
4947 static void
move_cond_jump(rtx insn,bnd_t bnd)4948 move_cond_jump (rtx insn, bnd_t bnd)
4949 {
4950   edge ft_edge;
4951   basic_block block_from, block_next, block_new, block_bnd, bb;
4952   rtx next, prev, link, head;
4953 
4954   block_from = BLOCK_FOR_INSN (insn);
4955   block_bnd = BLOCK_FOR_INSN (BND_TO (bnd));
4956   prev = BND_TO (bnd);
4957 
4958 #ifdef ENABLE_CHECKING
4959   /* Moving of jump should not cross any other jumps or beginnings of new
4960      basic blocks.  The only exception is when we move a jump through
4961      mutually exclusive insns along fallthru edges.  */
4962   if (block_from != block_bnd)
4963     {
4964       bb = block_from;
4965       for (link = PREV_INSN (insn); link != PREV_INSN (prev);
4966            link = PREV_INSN (link))
4967         {
4968           if (INSN_P (link))
4969             gcc_assert (sched_insns_conditions_mutex_p (insn, link));
4970           if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb)
4971             {
4972               gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link));
4973               bb = BLOCK_FOR_INSN (link);
4974             }
4975         }
4976     }
4977 #endif
4978 
4979   /* Jump is moved to the boundary.  */
4980   next = PREV_INSN (insn);
4981   BND_TO (bnd) = insn;
4982 
4983   ft_edge = find_fallthru_edge_from (block_from);
4984   block_next = ft_edge->dest;
4985   /* There must be a fallthrough block (or where should go
4986   control flow in case of false jump predicate otherwise?).  */
4987   gcc_assert (block_next);
4988 
4989   /* Create new empty basic block after source block.  */
4990   block_new = sel_split_edge (ft_edge);
4991   gcc_assert (block_new->next_bb == block_next
4992               && block_from->next_bb == block_new);
4993 
4994   /* Move all instructions except INSN to BLOCK_NEW.  */
4995   bb = block_bnd;
4996   head = BB_HEAD (block_new);
4997   while (bb != block_from->next_bb)
4998     {
4999       rtx from, to;
5000       from = bb == block_bnd ? prev : sel_bb_head (bb);
5001       to = bb == block_from ? next : sel_bb_end (bb);
5002 
5003       /* The jump being moved can be the first insn in the block.
5004          In this case we don't have to move anything in this block.  */
5005       if (NEXT_INSN (to) != from)
5006         {
5007           reorder_insns (from, to, head);
5008 
5009           for (link = to; link != head; link = PREV_INSN (link))
5010             EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index;
5011           head = to;
5012         }
5013 
5014       /* Cleanup possibly empty blocks left.  */
5015       block_next = bb->next_bb;
5016       if (bb != block_from)
5017 	tidy_control_flow (bb, false);
5018       bb = block_next;
5019     }
5020 
5021   /* Assert there is no jump to BLOCK_NEW, only fallthrough edge.  */
5022   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)));
5023 
5024   gcc_assert (!sel_bb_empty_p (block_from)
5025               && !sel_bb_empty_p (block_new));
5026 
5027   /* Update data sets for BLOCK_NEW to represent that INSN and
5028      instructions from the other branch of INSN is no longer
5029      available at BLOCK_NEW.  */
5030   BB_AV_LEVEL (block_new) = global_level;
5031   gcc_assert (BB_LV_SET (block_new) == NULL);
5032   BB_LV_SET (block_new) = get_clear_regset_from_pool ();
5033   update_data_sets (sel_bb_head (block_new));
5034 
5035   /* INSN is a new basic block header - so prepare its data
5036      structures and update availability and liveness sets.  */
5037   update_data_sets (insn);
5038 
5039   if (sched_verbose >= 4)
5040     sel_print ("Moving jump %d\n", INSN_UID (insn));
5041 }
5042 
5043 /* Remove nops generated during move_op for preventing removal of empty
5044    basic blocks.  */
5045 static void
remove_temp_moveop_nops(bool full_tidying)5046 remove_temp_moveop_nops (bool full_tidying)
5047 {
5048   int i;
5049   insn_t insn;
5050 
5051   FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn)
5052     {
5053       gcc_assert (INSN_NOP_P (insn));
5054       return_nop_to_pool (insn, full_tidying);
5055     }
5056 
5057   /* Empty the vector.  */
5058   if (vec_temp_moveop_nops.length () > 0)
5059     vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ());
5060 }
5061 
5062 /* Records the maximal UID before moving up an instruction.  Used for
5063    distinguishing between bookkeeping copies and original insns.  */
5064 static int max_uid_before_move_op = 0;
5065 
5066 /* Remove from AV_VLIW_P all instructions but next when debug counter
5067    tells us so.  Next instruction is fetched from BNDS.  */
5068 static void
remove_insns_for_debug(blist_t bnds,av_set_t * av_vliw_p)5069 remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
5070 {
5071   if (! dbg_cnt (sel_sched_insn_cnt))
5072     /* Leave only the next insn in av_vliw.  */
5073     {
5074       av_set_iterator av_it;
5075       expr_t expr;
5076       bnd_t bnd = BLIST_BND (bnds);
5077       insn_t next = BND_TO (bnd);
5078 
5079       gcc_assert (BLIST_NEXT (bnds) == NULL);
5080 
5081       FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
5082         if (EXPR_INSN_RTX (expr) != next)
5083           av_set_iter_remove (&av_it);
5084     }
5085 }
5086 
5087 /* Compute available instructions on BNDS.  FENCE is the current fence.  Write
5088    the computed set to *AV_VLIW_P.  */
5089 static void
compute_av_set_on_boundaries(fence_t fence,blist_t bnds,av_set_t * av_vliw_p)5090 compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
5091 {
5092   if (sched_verbose >= 2)
5093     {
5094       sel_print ("Boundaries: ");
5095       dump_blist (bnds);
5096       sel_print ("\n");
5097     }
5098 
5099   for (; bnds; bnds = BLIST_NEXT (bnds))
5100     {
5101       bnd_t bnd = BLIST_BND (bnds);
5102       av_set_t av1_copy;
5103       insn_t bnd_to = BND_TO (bnd);
5104 
5105       /* Rewind BND->TO to the basic block header in case some bookkeeping
5106          instructions were inserted before BND->TO and it needs to be
5107          adjusted.  */
5108       if (sel_bb_head_p (bnd_to))
5109         gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
5110       else
5111         while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0)
5112           {
5113             bnd_to = PREV_INSN (bnd_to);
5114             if (sel_bb_head_p (bnd_to))
5115               break;
5116           }
5117 
5118       if (BND_TO (bnd) != bnd_to)
5119 	{
5120   	  gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
5121 	  FENCE_INSN (fence) = bnd_to;
5122 	  BND_TO (bnd) = bnd_to;
5123 	}
5124 
5125       av_set_clear (&BND_AV (bnd));
5126       BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
5127 
5128       av_set_clear (&BND_AV1 (bnd));
5129       BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
5130 
5131       moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
5132 
5133       av1_copy = av_set_copy (BND_AV1 (bnd));
5134       av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
5135     }
5136 
5137   if (sched_verbose >= 2)
5138     {
5139       sel_print ("Available exprs (vliw form): ");
5140       dump_av_set (*av_vliw_p);
5141       sel_print ("\n");
5142     }
5143 }
5144 
5145 /* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
5146    expression.  When FOR_MOVEOP is true, also replace the register of
5147    expressions found with the register from EXPR_VLIW.  */
5148 static av_set_t
find_sequential_best_exprs(bnd_t bnd,expr_t expr_vliw,bool for_moveop)5149 find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
5150 {
5151   av_set_t expr_seq = NULL;
5152   expr_t expr;
5153   av_set_iterator i;
5154 
5155   FOR_EACH_EXPR (expr, i, BND_AV (bnd))
5156     {
5157       if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
5158         {
5159           if (for_moveop)
5160             {
5161               /* The sequential expression has the right form to pass
5162                  to move_op except when renaming happened.  Put the
5163                  correct register in EXPR then.  */
5164               if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
5165 		{
5166                   if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
5167 		    {
5168 		      replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
5169 		      stat_renamed_scheduled++;
5170 		    }
5171 		  /* Also put the correct TARGET_AVAILABLE bit on the expr.
5172                      This is needed when renaming came up with original
5173                      register.  */
5174                   else if (EXPR_TARGET_AVAILABLE (expr)
5175                            != EXPR_TARGET_AVAILABLE (expr_vliw))
5176 		    {
5177 		      gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
5178 		      EXPR_TARGET_AVAILABLE (expr) = 1;
5179 		    }
5180 		}
5181               if (EXPR_WAS_SUBSTITUTED (expr))
5182                 stat_substitutions_total++;
5183             }
5184 
5185           av_set_add (&expr_seq, expr);
5186 
5187           /* With substitution inside insn group, it is possible
5188              that more than one expression in expr_seq will correspond
5189              to expr_vliw.  In this case, choose one as the attempt to
5190              move both leads to miscompiles.  */
5191           break;
5192         }
5193     }
5194 
5195   if (for_moveop && sched_verbose >= 2)
5196     {
5197       sel_print ("Best expression(s) (sequential form): ");
5198       dump_av_set (expr_seq);
5199       sel_print ("\n");
5200     }
5201 
5202   return expr_seq;
5203 }
5204 
5205 
5206 /* Move nop to previous block.  */
5207 static void ATTRIBUTE_UNUSED
move_nop_to_previous_block(insn_t nop,basic_block prev_bb)5208 move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
5209 {
5210   insn_t prev_insn, next_insn, note;
5211 
5212   gcc_assert (sel_bb_head_p (nop)
5213               && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
5214   note = bb_note (BLOCK_FOR_INSN (nop));
5215   prev_insn = sel_bb_end (prev_bb);
5216   next_insn = NEXT_INSN (nop);
5217   gcc_assert (prev_insn != NULL_RTX
5218               && PREV_INSN (note) == prev_insn);
5219 
5220   NEXT_INSN (prev_insn) = nop;
5221   PREV_INSN (nop) = prev_insn;
5222 
5223   PREV_INSN (note) = nop;
5224   NEXT_INSN (note) = next_insn;
5225 
5226   NEXT_INSN (nop) = note;
5227   PREV_INSN (next_insn) = note;
5228 
5229   BB_END (prev_bb) = nop;
5230   BLOCK_FOR_INSN (nop) = prev_bb;
5231 }
5232 
5233 /* Prepare a place to insert the chosen expression on BND.  */
5234 static insn_t
prepare_place_to_insert(bnd_t bnd)5235 prepare_place_to_insert (bnd_t bnd)
5236 {
5237   insn_t place_to_insert;
5238 
5239   /* Init place_to_insert before calling move_op, as the later
5240      can possibly remove BND_TO (bnd).  */
5241   if (/* If this is not the first insn scheduled.  */
5242       BND_PTR (bnd))
5243     {
5244       /* Add it after last scheduled.  */
5245       place_to_insert = ILIST_INSN (BND_PTR (bnd));
5246       if (DEBUG_INSN_P (place_to_insert))
5247 	{
5248 	  ilist_t l = BND_PTR (bnd);
5249 	  while ((l = ILIST_NEXT (l)) &&
5250 		 DEBUG_INSN_P (ILIST_INSN (l)))
5251 	    ;
5252 	  if (!l)
5253 	    place_to_insert = NULL;
5254 	}
5255     }
5256   else
5257     place_to_insert = NULL;
5258 
5259   if (!place_to_insert)
5260     {
5261       /* Add it before BND_TO.  The difference is in the
5262          basic block, where INSN will be added.  */
5263       place_to_insert = get_nop_from_pool (BND_TO (bnd));
5264       gcc_assert (BLOCK_FOR_INSN (place_to_insert)
5265                   == BLOCK_FOR_INSN (BND_TO (bnd)));
5266     }
5267 
5268   return place_to_insert;
5269 }
5270 
5271 /* Find original instructions for EXPR_SEQ and move it to BND boundary.
5272    Return the expression to emit in C_EXPR.  */
5273 static bool
move_exprs_to_boundary(bnd_t bnd,expr_t expr_vliw,av_set_t expr_seq,expr_t c_expr)5274 move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
5275                         av_set_t expr_seq, expr_t c_expr)
5276 {
5277   bool b, should_move;
5278   unsigned book_uid;
5279   bitmap_iterator bi;
5280   int n_bookkeeping_copies_before_moveop;
5281 
5282   /* Make a move.  This call will remove the original operation,
5283      insert all necessary bookkeeping instructions and update the
5284      data sets.  After that all we have to do is add the operation
5285      at before BND_TO (BND).  */
5286   n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
5287   max_uid_before_move_op = get_max_uid ();
5288   bitmap_clear (current_copies);
5289   bitmap_clear (current_originators);
5290 
5291   b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
5292                get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
5293 
5294   /* We should be able to find the expression we've chosen for
5295      scheduling.  */
5296   gcc_assert (b);
5297 
5298   if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
5299     stat_insns_needed_bookkeeping++;
5300 
5301   EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
5302     {
5303       unsigned uid;
5304       bitmap_iterator bi;
5305 
5306       /* We allocate these bitmaps lazily.  */
5307       if (! INSN_ORIGINATORS_BY_UID (book_uid))
5308         INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
5309 
5310       bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
5311                    current_originators);
5312 
5313       /* Transitively add all originators' originators.  */
5314       EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)
5315        if (INSN_ORIGINATORS_BY_UID (uid))
5316 	 bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid),
5317 			  INSN_ORIGINATORS_BY_UID (uid));
5318     }
5319 
5320   return should_move;
5321 }
5322 
5323 
5324 /* Debug a DFA state as an array of bytes.  */
5325 static void
debug_state(state_t state)5326 debug_state (state_t state)
5327 {
5328   unsigned char *p;
5329   unsigned int i, size = dfa_state_size;
5330 
5331   sel_print ("state (%u):", size);
5332   for (i = 0, p = (unsigned char *) state; i < size; i++)
5333     sel_print (" %d", p[i]);
5334   sel_print ("\n");
5335 }
5336 
5337 /* Advance state on FENCE with INSN.  Return true if INSN is
5338    an ASM, and we should advance state once more.  */
5339 static bool
advance_state_on_fence(fence_t fence,insn_t insn)5340 advance_state_on_fence (fence_t fence, insn_t insn)
5341 {
5342   bool asm_p;
5343 
5344   if (recog_memoized (insn) >= 0)
5345     {
5346       int res;
5347       state_t temp_state = alloca (dfa_state_size);
5348 
5349       gcc_assert (!INSN_ASM_P (insn));
5350       asm_p = false;
5351 
5352       memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
5353       res = state_transition (FENCE_STATE (fence), insn);
5354       gcc_assert (res < 0);
5355 
5356       if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
5357         {
5358           FENCE_ISSUED_INSNS (fence)++;
5359 
5360           /* We should never issue more than issue_rate insns.  */
5361           if (FENCE_ISSUED_INSNS (fence) > issue_rate)
5362             gcc_unreachable ();
5363         }
5364     }
5365   else
5366     {
5367       /* This could be an ASM insn which we'd like to schedule
5368          on the next cycle.  */
5369       asm_p = INSN_ASM_P (insn);
5370       if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
5371         advance_one_cycle (fence);
5372     }
5373 
5374   if (sched_verbose >= 2)
5375     debug_state (FENCE_STATE (fence));
5376   if (!DEBUG_INSN_P (insn))
5377     FENCE_STARTS_CYCLE_P (fence) = 0;
5378   FENCE_ISSUE_MORE (fence) = can_issue_more;
5379   return asm_p;
5380 }
5381 
5382 /* Update FENCE on which INSN was scheduled and this INSN, too.  NEED_STALL
5383    is nonzero if we need to stall after issuing INSN.  */
5384 static void
update_fence_and_insn(fence_t fence,insn_t insn,int need_stall)5385 update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
5386 {
5387   bool asm_p;
5388 
5389   /* First, reflect that something is scheduled on this fence.  */
5390   asm_p = advance_state_on_fence (fence, insn);
5391   FENCE_LAST_SCHEDULED_INSN (fence) = insn;
5392   vec_safe_push (FENCE_EXECUTING_INSNS (fence), insn);
5393   if (SCHED_GROUP_P (insn))
5394     {
5395       FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
5396       SCHED_GROUP_P (insn) = 0;
5397     }
5398   else
5399     FENCE_SCHED_NEXT (fence) = NULL_RTX;
5400   if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
5401     FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
5402 
5403   /* Set instruction scheduling info.  This will be used in bundling,
5404      pipelining, tick computations etc.  */
5405   ++INSN_SCHED_TIMES (insn);
5406   EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
5407   EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
5408   INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
5409   INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
5410 
5411   /* This does not account for adjust_cost hooks, just add the biggest
5412      constant the hook may add to the latency.  TODO: make this
5413      a target dependent constant.  */
5414   INSN_READY_CYCLE (insn)
5415     = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
5416                                  ? 1
5417                                  : maximal_insn_latency (insn) + 1);
5418 
5419   /* Change these fields last, as they're used above.  */
5420   FENCE_AFTER_STALL_P (fence) = 0;
5421   if (asm_p || need_stall)
5422     advance_one_cycle (fence);
5423 
5424   /* Indicate that we've scheduled something on this fence.  */
5425   FENCE_SCHEDULED_P (fence) = true;
5426   scheduled_something_on_previous_fence = true;
5427 
5428   /* Print debug information when insn's fields are updated.  */
5429   if (sched_verbose >= 2)
5430     {
5431       sel_print ("Scheduling insn: ");
5432       dump_insn_1 (insn, 1);
5433       sel_print ("\n");
5434     }
5435 }
5436 
5437 /* Update boundary BND (and, if needed, FENCE) with INSN, remove the
5438    old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and
5439    return it.  */
5440 static blist_t *
update_boundaries(fence_t fence,bnd_t bnd,insn_t insn,blist_t * bndsp,blist_t * bnds_tailp)5441 update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
5442                    blist_t *bnds_tailp)
5443 {
5444   succ_iterator si;
5445   insn_t succ;
5446 
5447   advance_deps_context (BND_DC (bnd), insn);
5448   FOR_EACH_SUCC_1 (succ, si, insn,
5449                    SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
5450     {
5451       ilist_t ptr = ilist_copy (BND_PTR (bnd));
5452 
5453       ilist_add (&ptr, insn);
5454 
5455       if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
5456 	  && is_ineligible_successor (succ, ptr))
5457 	{
5458 	  ilist_clear (&ptr);
5459 	  continue;
5460 	}
5461 
5462       if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn))
5463 	{
5464 	  if (sched_verbose >= 9)
5465 	    sel_print ("Updating fence insn from %i to %i\n",
5466 		       INSN_UID (insn), INSN_UID (succ));
5467 	  FENCE_INSN (fence) = succ;
5468 	}
5469       blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
5470       bnds_tailp = &BLIST_NEXT (*bnds_tailp);
5471     }
5472 
5473   blist_remove (bndsp);
5474   return bnds_tailp;
5475 }
5476 
5477 /* Schedule EXPR_VLIW on BND.  Return the insn emitted.  */
5478 static insn_t
schedule_expr_on_boundary(bnd_t bnd,expr_t expr_vliw,int seqno)5479 schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
5480 {
5481   av_set_t expr_seq;
5482   expr_t c_expr = XALLOCA (expr_def);
5483   insn_t place_to_insert;
5484   insn_t insn;
5485   bool should_move;
5486 
5487   expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
5488 
5489   /* In case of scheduling a jump skipping some other instructions,
5490      prepare CFG.  After this, jump is at the boundary and can be
5491      scheduled as usual insn by MOVE_OP.  */
5492   if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
5493     {
5494       insn = EXPR_INSN_RTX (expr_vliw);
5495 
5496       /* Speculative jumps are not handled.  */
5497       if (insn != BND_TO (bnd)
5498           && !sel_insn_is_speculation_check (insn))
5499         move_cond_jump (insn, bnd);
5500     }
5501 
5502   /* Find a place for C_EXPR to schedule.  */
5503   place_to_insert = prepare_place_to_insert (bnd);
5504   should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
5505   clear_expr (c_expr);
5506 
5507   /* Add the instruction.  The corner case to care about is when
5508      the expr_seq set has more than one expr, and we chose the one that
5509      is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and
5510      we can't use it.  Generate the new vinsn.  */
5511   if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
5512     {
5513       vinsn_t vinsn_new;
5514 
5515       vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
5516       change_vinsn_in_expr (expr_vliw, vinsn_new);
5517       should_move = false;
5518     }
5519   if (should_move)
5520     insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
5521   else
5522     insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
5523                                       place_to_insert);
5524 
5525   /* Return the nops generated for preserving of data sets back
5526      into pool.  */
5527   if (INSN_NOP_P (place_to_insert))
5528     return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn));
5529   remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
5530 
5531   av_set_clear (&expr_seq);
5532 
5533   /* Save the expression scheduled so to reset target availability if we'll
5534      meet it later on the same fence.  */
5535   if (EXPR_WAS_RENAMED (expr_vliw))
5536     vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
5537 
5538   /* Check that the recent movement didn't destroyed loop
5539      structure.  */
5540   gcc_assert (!pipelining_p
5541               || current_loop_nest == NULL
5542               || loop_latch_edge (current_loop_nest));
5543   return insn;
5544 }
5545 
5546 /* Stall for N cycles on FENCE.  */
5547 static void
stall_for_cycles(fence_t fence,int n)5548 stall_for_cycles (fence_t fence, int n)
5549 {
5550   int could_more;
5551 
5552   could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
5553   while (n--)
5554     advance_one_cycle (fence);
5555   if (could_more)
5556     FENCE_AFTER_STALL_P (fence) = 1;
5557 }
5558 
5559 /* Gather a parallel group of insns at FENCE and assign their seqno
5560    to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
5561    list for later recalculation of seqnos.  */
5562 static void
fill_insns(fence_t fence,int seqno,ilist_t ** scheduled_insns_tailpp)5563 fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
5564 {
5565   blist_t bnds = NULL, *bnds_tailp;
5566   av_set_t av_vliw = NULL;
5567   insn_t insn = FENCE_INSN (fence);
5568 
5569   if (sched_verbose >= 2)
5570     sel_print ("Starting fill_insns for insn %d, cycle %d\n",
5571                INSN_UID (insn), FENCE_CYCLE (fence));
5572 
5573   blist_add (&bnds, insn, NULL, FENCE_DC (fence));
5574   bnds_tailp = &BLIST_NEXT (bnds);
5575   set_target_context (FENCE_TC (fence));
5576   can_issue_more = FENCE_ISSUE_MORE (fence);
5577   target_bb = INSN_BB (insn);
5578 
5579   /* Do while we can add any operation to the current group.  */
5580   do
5581     {
5582       blist_t *bnds_tailp1, *bndsp;
5583       expr_t expr_vliw;
5584       int need_stall = false;
5585       int was_stall = 0, scheduled_insns = 0;
5586       int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
5587       int max_stall = pipelining_p ? 1 : 3;
5588       bool last_insn_was_debug = false;
5589       bool was_debug_bb_end_p = false;
5590 
5591       compute_av_set_on_boundaries (fence, bnds, &av_vliw);
5592       remove_insns_that_need_bookkeeping (fence, &av_vliw);
5593       remove_insns_for_debug (bnds, &av_vliw);
5594 
5595       /* Return early if we have nothing to schedule.  */
5596       if (av_vliw == NULL)
5597         break;
5598 
5599       /* Choose the best expression and, if needed, destination register
5600 	 for it.  */
5601       do
5602         {
5603           expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall);
5604           if (! expr_vliw && need_stall)
5605             {
5606               /* All expressions required a stall.  Do not recompute av sets
5607                  as we'll get the same answer (modulo the insns between
5608                  the fence and its boundary, which will not be available for
5609                  pipelining).
5610 		 If we are going to stall for too long, break to recompute av
5611 		 sets and bring more insns for pipelining.  */
5612               was_stall++;
5613 	      if (need_stall <= 3)
5614 		stall_for_cycles (fence, need_stall);
5615 	      else
5616 		{
5617 		  stall_for_cycles (fence, 1);
5618 		  break;
5619 		}
5620             }
5621         }
5622       while (! expr_vliw && need_stall);
5623 
5624       /* Now either we've selected expr_vliw or we have nothing to schedule.  */
5625       if (!expr_vliw)
5626         {
5627 	  av_set_clear (&av_vliw);
5628           break;
5629         }
5630 
5631       bndsp = &bnds;
5632       bnds_tailp1 = bnds_tailp;
5633 
5634       do
5635 	/* This code will be executed only once until we'd have several
5636            boundaries per fence.  */
5637         {
5638 	  bnd_t bnd = BLIST_BND (*bndsp);
5639 
5640 	  if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
5641 	    {
5642 	      bndsp = &BLIST_NEXT (*bndsp);
5643 	      continue;
5644 	    }
5645 
5646           insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
5647 	  last_insn_was_debug = DEBUG_INSN_P (insn);
5648 	  if (last_insn_was_debug)
5649 	    was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn));
5650           update_fence_and_insn (fence, insn, need_stall);
5651           bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp);
5652 
5653 	  /* Add insn to the list of scheduled on this cycle instructions.  */
5654 	  ilist_add (*scheduled_insns_tailpp, insn);
5655 	  *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
5656         }
5657       while (*bndsp != *bnds_tailp1);
5658 
5659       av_set_clear (&av_vliw);
5660       if (!last_insn_was_debug)
5661 	scheduled_insns++;
5662 
5663       /* We currently support information about candidate blocks only for
5664 	 one 'target_bb' block.  Hence we can't schedule after jump insn,
5665 	 as this will bring two boundaries and, hence, necessity to handle
5666 	 information for two or more blocks concurrently.  */
5667       if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
5668           || (was_stall
5669               && (was_stall >= max_stall
5670                   || scheduled_insns >= max_insns)))
5671         break;
5672     }
5673   while (bnds);
5674 
5675   gcc_assert (!FENCE_BNDS (fence));
5676 
5677   /* Update boundaries of the FENCE.  */
5678   while (bnds)
5679     {
5680       ilist_t ptr = BND_PTR (BLIST_BND (bnds));
5681 
5682       if (ptr)
5683 	{
5684 	  insn = ILIST_INSN (ptr);
5685 
5686 	  if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
5687 	    ilist_add (&FENCE_BNDS (fence), insn);
5688 	}
5689 
5690       blist_remove (&bnds);
5691     }
5692 
5693   /* Update target context on the fence.  */
5694   reset_target_context (FENCE_TC (fence), false);
5695 }
5696 
5697 /* All exprs in ORIG_OPS must have the same destination register or memory.
5698    Return that destination.  */
5699 static rtx
get_dest_from_orig_ops(av_set_t orig_ops)5700 get_dest_from_orig_ops (av_set_t orig_ops)
5701 {
5702   rtx dest = NULL_RTX;
5703   av_set_iterator av_it;
5704   expr_t expr;
5705   bool first_p = true;
5706 
5707   FOR_EACH_EXPR (expr, av_it, orig_ops)
5708     {
5709       rtx x = EXPR_LHS (expr);
5710 
5711       if (first_p)
5712 	{
5713 	  first_p = false;
5714 	  dest = x;
5715 	}
5716       else
5717 	gcc_assert (dest == x
5718 		    || (dest != NULL_RTX && x != NULL_RTX
5719 			&& rtx_equal_p (dest, x)));
5720     }
5721 
5722   return dest;
5723 }
5724 
5725 /* Update data sets for the bookkeeping block and record those expressions
5726    which become no longer available after inserting this bookkeeping.  */
5727 static void
update_and_record_unavailable_insns(basic_block book_block)5728 update_and_record_unavailable_insns (basic_block book_block)
5729 {
5730   av_set_iterator i;
5731   av_set_t old_av_set = NULL;
5732   expr_t cur_expr;
5733   rtx bb_end = sel_bb_end (book_block);
5734 
5735   /* First, get correct liveness in the bookkeeping block.  The problem is
5736      the range between the bookeeping insn and the end of block.  */
5737   update_liveness_on_insn (bb_end);
5738   if (control_flow_insn_p (bb_end))
5739     update_liveness_on_insn (PREV_INSN (bb_end));
5740 
5741   /* If there's valid av_set on BOOK_BLOCK, then there might exist another
5742      fence above, where we may choose to schedule an insn which is
5743      actually blocked from moving up with the bookkeeping we create here.  */
5744   if (AV_SET_VALID_P (sel_bb_head (book_block)))
5745     {
5746       old_av_set = av_set_copy (BB_AV_SET (book_block));
5747       update_data_sets (sel_bb_head (book_block));
5748 
5749       /* Traverse all the expressions in the old av_set and check whether
5750 	 CUR_EXPR is in new AV_SET.  */
5751       FOR_EACH_EXPR (cur_expr, i, old_av_set)
5752         {
5753           expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
5754 					   EXPR_VINSN (cur_expr));
5755 
5756           if (! new_expr
5757               /* In this case, we can just turn off the E_T_A bit, but we can't
5758                  represent this information with the current vector.  */
5759               || EXPR_TARGET_AVAILABLE (new_expr)
5760 		 != EXPR_TARGET_AVAILABLE (cur_expr))
5761 	    /* Unfortunately, the below code could be also fired up on
5762 	       separable insns, e.g. when moving insns through the new
5763 	       speculation check as in PR 53701.  */
5764             vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
5765         }
5766 
5767       av_set_clear (&old_av_set);
5768     }
5769 }
5770 
5771 /* The main effect of this function is that sparams->c_expr is merged
5772    with (or copied to) lparams->c_expr_merged.  If there's only one successor,
5773    we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
5774    lparams->c_expr_merged is copied back to sparams->c_expr after all
5775    successors has been traversed.  lparams->c_expr_local is an expr allocated
5776    on stack in the caller function, and is used if there is more than one
5777    successor.
5778 
5779    SUCC is one of the SUCCS_NORMAL successors of INSN,
5780    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
5781    LPARAMS and STATIC_PARAMS contain the parameters described above.  */
5782 static void
move_op_merge_succs(insn_t insn ATTRIBUTE_UNUSED,insn_t succ ATTRIBUTE_UNUSED,int moveop_drv_call_res,cmpd_local_params_p lparams,void * static_params)5783 move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
5784                      insn_t succ ATTRIBUTE_UNUSED,
5785 		     int moveop_drv_call_res,
5786 		     cmpd_local_params_p lparams, void *static_params)
5787 {
5788   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
5789 
5790   /* Nothing to do, if original expr wasn't found below.  */
5791   if (moveop_drv_call_res != 1)
5792     return;
5793 
5794   /* If this is a first successor.  */
5795   if (!lparams->c_expr_merged)
5796     {
5797       lparams->c_expr_merged = sparams->c_expr;
5798       sparams->c_expr = lparams->c_expr_local;
5799     }
5800   else
5801     {
5802       /* We must merge all found expressions to get reasonable
5803 	 EXPR_SPEC_DONE_DS for the resulting insn.  If we don't
5804 	 do so then we can first find the expr with epsilon
5805 	 speculation success probability and only then with the
5806 	 good probability.  As a result the insn will get epsilon
5807 	 probability and will never be scheduled because of
5808 	 weakness_cutoff in find_best_expr.
5809 
5810 	 We call merge_expr_data here instead of merge_expr
5811 	 because due to speculation C_EXPR and X may have the
5812 	 same insns with different speculation types.  And as of
5813 	 now such insns are considered non-equal.
5814 
5815 	 However, EXPR_SCHED_TIMES is different -- we must get
5816 	 SCHED_TIMES from a real insn, not a bookkeeping copy.
5817 	 We force this here.  Instead, we may consider merging
5818 	 SCHED_TIMES to the maximum instead of minimum in the
5819 	 below function.  */
5820       int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
5821 
5822       merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL);
5823       if (EXPR_SCHED_TIMES (sparams->c_expr) == 0)
5824 	EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times;
5825 
5826       clear_expr (sparams->c_expr);
5827     }
5828 }
5829 
5830 /*  Add used regs for the successor SUCC into SPARAMS->USED_REGS.
5831 
5832    SUCC is one of the SUCCS_NORMAL successors of INSN,
5833    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0,
5834      if SUCC is one of SUCCS_BACK or SUCCS_OUT.
5835    STATIC_PARAMS contain USED_REGS set.  */
5836 static void
fur_merge_succs(insn_t insn ATTRIBUTE_UNUSED,insn_t succ,int moveop_drv_call_res,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params)5837 fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
5838 		 int moveop_drv_call_res,
5839 		 cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5840 		 void *static_params)
5841 {
5842   regset succ_live;
5843   fur_static_params_p sparams = (fur_static_params_p) static_params;
5844 
5845   /* Here we compute live regsets only for branches that do not lie
5846      on the code motion paths.  These branches correspond to value
5847      MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
5848      for such branches code_motion_path_driver is not called.  */
5849   if (moveop_drv_call_res != 0)
5850     return;
5851 
5852   /* Mark all registers that do not meet the following condition:
5853      (3) not live on the other path of any conditional branch
5854      that is passed by the operation, in case original
5855      operations are not present on both paths of the
5856      conditional branch.  */
5857   succ_live = compute_live (succ);
5858   IOR_REG_SET (sparams->used_regs, succ_live);
5859 }
5860 
5861 /* This function is called after the last successor.  Copies LP->C_EXPR_MERGED
5862    into SP->CEXPR.  */
5863 static void
move_op_after_merge_succs(cmpd_local_params_p lp,void * sparams)5864 move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
5865 {
5866   moveop_static_params_p sp = (moveop_static_params_p) sparams;
5867 
5868   sp->c_expr = lp->c_expr_merged;
5869 }
5870 
5871 /* Track bookkeeping copies created, insns scheduled, and blocks for
5872    rescheduling when INSN is found by move_op.  */
5873 static void
track_scheduled_insns_and_blocks(rtx insn)5874 track_scheduled_insns_and_blocks (rtx insn)
5875 {
5876   /* Even if this insn can be a copy that will be removed during current move_op,
5877      we still need to count it as an originator.  */
5878   bitmap_set_bit (current_originators, INSN_UID (insn));
5879 
5880   if (!bitmap_clear_bit (current_copies, INSN_UID (insn)))
5881     {
5882       /* Note that original block needs to be rescheduled, as we pulled an
5883 	 instruction out of it.  */
5884       if (INSN_SCHED_TIMES (insn) > 0)
5885 	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
5886       else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn))
5887 	num_insns_scheduled++;
5888     }
5889 
5890   /* For instructions we must immediately remove insn from the
5891      stream, so subsequent update_data_sets () won't include this
5892      insn into av_set.
5893      For expr we must make insn look like "INSN_REG (insn) := c_expr".  */
5894   if (INSN_UID (insn) > max_uid_before_move_op)
5895     stat_bookkeeping_copies--;
5896 }
5897 
5898 /* Emit a register-register copy for INSN if needed.  Return true if
5899    emitted one.  PARAMS is the move_op static parameters.  */
5900 static bool
maybe_emit_renaming_copy(rtx insn,moveop_static_params_p params)5901 maybe_emit_renaming_copy (rtx insn,
5902                           moveop_static_params_p params)
5903 {
5904   bool insn_emitted  = false;
5905   rtx cur_reg;
5906 
5907   /* Bail out early when expression can not be renamed at all.  */
5908   if (!EXPR_SEPARABLE_P (params->c_expr))
5909     return false;
5910 
5911   cur_reg = expr_dest_reg (params->c_expr);
5912   gcc_assert (cur_reg && params->dest && REG_P (params->dest));
5913 
5914   /* If original operation has expr and the register chosen for
5915      that expr is not original operation's dest reg, substitute
5916      operation's right hand side with the register chosen.  */
5917   if (REGNO (params->dest) != REGNO (cur_reg))
5918     {
5919       insn_t reg_move_insn, reg_move_insn_rtx;
5920 
5921       reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
5922                                                     params->dest);
5923       reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
5924                                                    INSN_EXPR (insn),
5925                                                    INSN_SEQNO (insn),
5926                                                    insn);
5927       EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
5928       replace_dest_with_reg_in_expr (params->c_expr, params->dest);
5929 
5930       insn_emitted = true;
5931       params->was_renamed = true;
5932     }
5933 
5934   return insn_emitted;
5935 }
5936 
5937 /* Emit a speculative check for INSN speculated as EXPR if needed.
5938    Return true if we've  emitted one.  PARAMS is the move_op static
5939    parameters.  */
5940 static bool
maybe_emit_speculative_check(rtx insn,expr_t expr,moveop_static_params_p params)5941 maybe_emit_speculative_check (rtx insn, expr_t expr,
5942                               moveop_static_params_p params)
5943 {
5944   bool insn_emitted = false;
5945   insn_t x;
5946   ds_t check_ds;
5947 
5948   check_ds = get_spec_check_type_for_insn (insn, expr);
5949   if (check_ds != 0)
5950     {
5951       /* A speculation check should be inserted.  */
5952       x = create_speculation_check (params->c_expr, check_ds, insn);
5953       insn_emitted = true;
5954     }
5955   else
5956     {
5957       EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
5958       x = insn;
5959     }
5960 
5961   gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
5962               && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
5963   return insn_emitted;
5964 }
5965 
5966 /* Handle transformations that leave an insn in place of original
5967    insn such as renaming/speculation.  Return true if one of such
5968    transformations actually happened, and we have emitted this insn.  */
5969 static bool
handle_emitting_transformations(rtx insn,expr_t expr,moveop_static_params_p params)5970 handle_emitting_transformations (rtx insn, expr_t expr,
5971                                  moveop_static_params_p params)
5972 {
5973   bool insn_emitted = false;
5974 
5975   insn_emitted = maybe_emit_renaming_copy (insn, params);
5976   insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
5977 
5978   return insn_emitted;
5979 }
5980 
5981 /* If INSN is the only insn in the basic block (not counting JUMP,
5982    which may be a jump to next insn, and DEBUG_INSNs), we want to
5983    leave a NOP there till the return to fill_insns.  */
5984 
5985 static bool
need_nop_to_preserve_insn_bb(rtx insn)5986 need_nop_to_preserve_insn_bb (rtx insn)
5987 {
5988   insn_t bb_head, bb_end, bb_next, in_next;
5989   basic_block bb = BLOCK_FOR_INSN (insn);
5990 
5991   bb_head = sel_bb_head (bb);
5992   bb_end = sel_bb_end (bb);
5993 
5994   if (bb_head == bb_end)
5995     return true;
5996 
5997   while (bb_head != bb_end && DEBUG_INSN_P (bb_head))
5998     bb_head = NEXT_INSN (bb_head);
5999 
6000   if (bb_head == bb_end)
6001     return true;
6002 
6003   while (bb_head != bb_end && DEBUG_INSN_P (bb_end))
6004     bb_end = PREV_INSN (bb_end);
6005 
6006   if (bb_head == bb_end)
6007     return true;
6008 
6009   bb_next = NEXT_INSN (bb_head);
6010   while (bb_next != bb_end && DEBUG_INSN_P (bb_next))
6011     bb_next = NEXT_INSN (bb_next);
6012 
6013   if (bb_next == bb_end && JUMP_P (bb_end))
6014     return true;
6015 
6016   in_next = NEXT_INSN (insn);
6017   while (DEBUG_INSN_P (in_next))
6018     in_next = NEXT_INSN (in_next);
6019 
6020   if (IN_CURRENT_FENCE_P (in_next))
6021     return true;
6022 
6023   return false;
6024 }
6025 
6026 /* Remove INSN from stream.  When ONLY_DISCONNECT is true, its data
6027    is not removed but reused when INSN is re-emitted.  */
6028 static void
remove_insn_from_stream(rtx insn,bool only_disconnect)6029 remove_insn_from_stream (rtx insn, bool only_disconnect)
6030 {
6031   /* If there's only one insn in the BB, make sure that a nop is
6032      inserted into it, so the basic block won't disappear when we'll
6033      delete INSN below with sel_remove_insn. It should also survive
6034      till the return to fill_insns.  */
6035   if (need_nop_to_preserve_insn_bb (insn))
6036     {
6037       insn_t nop = get_nop_from_pool (insn);
6038       gcc_assert (INSN_NOP_P (nop));
6039       vec_temp_moveop_nops.safe_push (nop);
6040     }
6041 
6042   sel_remove_insn (insn, only_disconnect, false);
6043 }
6044 
6045 /* This function is called when original expr is found.
6046    INSN - current insn traversed, EXPR - the corresponding expr found.
6047    LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
6048    is static parameters of move_op.  */
6049 static void
move_op_orig_expr_found(insn_t insn,expr_t expr,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params)6050 move_op_orig_expr_found (insn_t insn, expr_t expr,
6051                          cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6052                          void *static_params)
6053 {
6054   bool only_disconnect, insn_emitted;
6055   moveop_static_params_p params = (moveop_static_params_p) static_params;
6056 
6057   copy_expr_onside (params->c_expr, INSN_EXPR (insn));
6058   track_scheduled_insns_and_blocks (insn);
6059   insn_emitted = handle_emitting_transformations (insn, expr, params);
6060   only_disconnect = (params->uid == INSN_UID (insn)
6061                      && ! insn_emitted  && ! EXPR_WAS_CHANGED (expr));
6062 
6063   /* Mark that we've disconnected an insn.  */
6064   if (only_disconnect)
6065     params->uid = -1;
6066   remove_insn_from_stream (insn, only_disconnect);
6067 }
6068 
6069 /* The function is called when original expr is found.
6070    INSN - current insn traversed, EXPR - the corresponding expr found,
6071    crosses_call and original_insns in STATIC_PARAMS are updated.  */
6072 static void
fur_orig_expr_found(insn_t insn,expr_t expr ATTRIBUTE_UNUSED,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params)6073 fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
6074                      cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6075                      void *static_params)
6076 {
6077   fur_static_params_p params = (fur_static_params_p) static_params;
6078   regset tmp;
6079 
6080   if (CALL_P (insn))
6081     params->crosses_call = true;
6082 
6083   def_list_add (params->original_insns, insn, params->crosses_call);
6084 
6085   /* Mark the registers that do not meet the following condition:
6086     (2) not among the live registers of the point
6087 	immediately following the first original operation on
6088 	a given downward path, except for the original target
6089 	register of the operation.  */
6090   tmp = get_clear_regset_from_pool ();
6091   compute_live_below_insn (insn, tmp);
6092   AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn));
6093   AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn));
6094   IOR_REG_SET (params->used_regs, tmp);
6095   return_regset_to_pool (tmp);
6096 
6097   /* (*1) We need to add to USED_REGS registers that are read by
6098      INSN's lhs. This may lead to choosing wrong src register.
6099      E.g. (scheduling const expr enabled):
6100 
6101 	429: ax=0x0	<- Can't use AX for this expr (0x0)
6102 	433: dx=[bp-0x18]
6103 	427: [ax+dx+0x1]=ax
6104 	  REG_DEAD: ax
6105 	168: di=dx
6106 	  REG_DEAD: dx
6107      */
6108   /* FIXME: see comment above and enable MEM_P
6109      in vinsn_separable_p.  */
6110   gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
6111 	      || !MEM_P (INSN_LHS (insn)));
6112 }
6113 
6114 /* This function is called on the ascending pass, before returning from
6115    current basic block.  */
6116 static void
move_op_at_first_insn(insn_t insn,cmpd_local_params_p lparams,void * static_params)6117 move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
6118                        void *static_params)
6119 {
6120   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6121   basic_block book_block = NULL;
6122 
6123   /* When we have removed the boundary insn for scheduling, which also
6124      happened to be the end insn in its bb, we don't need to update sets.  */
6125   if (!lparams->removed_last_insn
6126       && lparams->e1
6127       && sel_bb_head_p (insn))
6128     {
6129       /* We should generate bookkeeping code only if we are not at the
6130          top level of the move_op.  */
6131       if (sel_num_cfg_preds_gt_1 (insn))
6132         book_block = generate_bookkeeping_insn (sparams->c_expr,
6133                                                 lparams->e1, lparams->e2);
6134       /* Update data sets for the current insn.  */
6135       update_data_sets (insn);
6136     }
6137 
6138   /* If bookkeeping code was inserted, we need to update av sets of basic
6139      block that received bookkeeping.  After generation of bookkeeping insn,
6140      bookkeeping block does not contain valid av set because we are not following
6141      the original algorithm in every detail with regards to e.g. renaming
6142      simple reg-reg copies.  Consider example:
6143 
6144      bookkeeping block           scheduling fence
6145      \            /
6146       \    join  /
6147        ----------
6148        |        |
6149        ----------
6150       /           \
6151      /             \
6152      r1 := r2          r1 := r3
6153 
6154      We try to schedule insn "r1 := r3" on the current
6155      scheduling fence.  Also, note that av set of bookkeeping block
6156      contain both insns "r1 := r2" and "r1 := r3".  When the insn has
6157      been scheduled, the CFG is as follows:
6158 
6159      r1 := r3               r1 := r3
6160      bookkeeping block           scheduling fence
6161      \            /
6162       \    join  /
6163        ----------
6164        |        |
6165        ----------
6166       /          \
6167      /            \
6168      r1 := r2
6169 
6170      Here, insn "r1 := r3" was scheduled at the current scheduling point
6171      and bookkeeping code was generated at the bookeeping block.  This
6172      way insn "r1 := r2" is no longer available as a whole instruction
6173      (but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
6174      This situation is handled by calling update_data_sets.
6175 
6176      Since update_data_sets is called only on the bookkeeping block, and
6177      it also may have predecessors with av_sets, containing instructions that
6178      are no longer available, we save all such expressions that become
6179      unavailable during data sets update on the bookkeeping block in
6180      VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such
6181      expressions for scheduling.  This allows us to avoid recomputation of
6182      av_sets outside the code motion path.  */
6183 
6184   if (book_block)
6185     update_and_record_unavailable_insns (book_block);
6186 
6187   /* If INSN was previously marked for deletion, it's time to do it.  */
6188   if (lparams->removed_last_insn)
6189     insn = PREV_INSN (insn);
6190 
6191   /* Do not tidy control flow at the topmost moveop, as we can erroneously
6192      kill a block with a single nop in which the insn should be emitted.  */
6193   if (lparams->e1)
6194     tidy_control_flow (BLOCK_FOR_INSN (insn), true);
6195 }
6196 
6197 /* This function is called on the ascending pass, before returning from the
6198    current basic block.  */
6199 static void
fur_at_first_insn(insn_t insn,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params ATTRIBUTE_UNUSED)6200 fur_at_first_insn (insn_t insn,
6201                    cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6202                    void *static_params ATTRIBUTE_UNUSED)
6203 {
6204   gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
6205 	      || AV_LEVEL (insn) == -1);
6206 }
6207 
6208 /* Called on the backward stage of recursion to call moveup_expr for insn
6209    and sparams->c_expr.  */
6210 static void
move_op_ascend(insn_t insn,void * static_params)6211 move_op_ascend (insn_t insn, void *static_params)
6212 {
6213   enum MOVEUP_EXPR_CODE res;
6214   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6215 
6216   if (! INSN_NOP_P (insn))
6217     {
6218       res = moveup_expr_cached (sparams->c_expr, insn, false);
6219       gcc_assert (res != MOVEUP_EXPR_NULL);
6220     }
6221 
6222   /* Update liveness for this insn as it was invalidated.  */
6223   update_liveness_on_insn (insn);
6224 }
6225 
6226 /* This function is called on enter to the basic block.
6227    Returns TRUE if this block already have been visited and
6228    code_motion_path_driver should return 1, FALSE otherwise.  */
6229 static int
fur_on_enter(insn_t insn ATTRIBUTE_UNUSED,cmpd_local_params_p local_params,void * static_params,bool visited_p)6230 fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
6231 	      void *static_params, bool visited_p)
6232 {
6233   fur_static_params_p sparams = (fur_static_params_p) static_params;
6234 
6235   if (visited_p)
6236     {
6237       /* If we have found something below this block, there should be at
6238 	 least one insn in ORIGINAL_INSNS.  */
6239       gcc_assert (*sparams->original_insns);
6240 
6241       /* Adjust CROSSES_CALL, since we may have come to this block along
6242 	 different path.  */
6243       DEF_LIST_DEF (*sparams->original_insns)->crosses_call
6244 	  |= sparams->crosses_call;
6245     }
6246   else
6247     local_params->old_original_insns = *sparams->original_insns;
6248 
6249   return 1;
6250 }
6251 
6252 /* Same as above but for move_op.   */
6253 static int
move_op_on_enter(insn_t insn ATTRIBUTE_UNUSED,cmpd_local_params_p local_params ATTRIBUTE_UNUSED,void * static_params ATTRIBUTE_UNUSED,bool visited_p)6254 move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
6255                   cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
6256                   void *static_params ATTRIBUTE_UNUSED, bool visited_p)
6257 {
6258   if (visited_p)
6259     return -1;
6260   return 1;
6261 }
6262 
6263 /* This function is called while descending current basic block if current
6264    insn is not the original EXPR we're searching for.
6265 
6266    Return value: FALSE, if code_motion_path_driver should perform a local
6267 			cleanup and return 0 itself;
6268 		 TRUE, if code_motion_path_driver should continue.  */
6269 static bool
move_op_orig_expr_not_found(insn_t insn,av_set_t orig_ops ATTRIBUTE_UNUSED,void * static_params)6270 move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
6271 			    void *static_params)
6272 {
6273   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6274 
6275 #ifdef ENABLE_CHECKING
6276   sparams->failed_insn = insn;
6277 #endif
6278 
6279   /* If we're scheduling separate expr, in order to generate correct code
6280      we need to stop the search at bookkeeping code generated with the
6281      same destination register or memory.  */
6282   if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
6283     return false;
6284   return true;
6285 }
6286 
6287 /* This function is called while descending current basic block if current
6288    insn is not the original EXPR we're searching for.
6289 
6290    Return value: TRUE (code_motion_path_driver should continue).  */
6291 static bool
fur_orig_expr_not_found(insn_t insn,av_set_t orig_ops,void * static_params)6292 fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params)
6293 {
6294   bool mutexed;
6295   expr_t r;
6296   av_set_iterator avi;
6297   fur_static_params_p sparams = (fur_static_params_p) static_params;
6298 
6299   if (CALL_P (insn))
6300     sparams->crosses_call = true;
6301   else if (DEBUG_INSN_P (insn))
6302     return true;
6303 
6304   /* If current insn we are looking at cannot be executed together
6305      with original insn, then we can skip it safely.
6306 
6307      Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); }
6308 	      INSN = (!p6) r14 = r14 + 1;
6309 
6310      Here we can schedule ORIG_OP with lhs = r14, though only
6311      looking at the set of used and set registers of INSN we must
6312      forbid it.  So, add set/used in INSN registers to the
6313      untouchable set only if there is an insn in ORIG_OPS that can
6314      affect INSN.  */
6315   mutexed = true;
6316   FOR_EACH_EXPR (r, avi, orig_ops)
6317     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r)))
6318       {
6319 	mutexed = false;
6320 	break;
6321       }
6322 
6323   /* Mark all registers that do not meet the following condition:
6324      (1) Not set or read on any path from xi to an instance of the
6325 	 original operation.  */
6326   if (!mutexed)
6327     {
6328       IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn));
6329       IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn));
6330       IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn));
6331     }
6332 
6333   return true;
6334 }
6335 
6336 /* Hooks and data to perform move_op operations with code_motion_path_driver.  */
6337 struct code_motion_path_driver_info_def move_op_hooks = {
6338   move_op_on_enter,
6339   move_op_orig_expr_found,
6340   move_op_orig_expr_not_found,
6341   move_op_merge_succs,
6342   move_op_after_merge_succs,
6343   move_op_ascend,
6344   move_op_at_first_insn,
6345   SUCCS_NORMAL,
6346   "move_op"
6347 };
6348 
6349 /* Hooks and data to perform find_used_regs operations
6350    with code_motion_path_driver.  */
6351 struct code_motion_path_driver_info_def fur_hooks = {
6352   fur_on_enter,
6353   fur_orig_expr_found,
6354   fur_orig_expr_not_found,
6355   fur_merge_succs,
6356   NULL, /* fur_after_merge_succs */
6357   NULL, /* fur_ascend */
6358   fur_at_first_insn,
6359   SUCCS_ALL,
6360   "find_used_regs"
6361 };
6362 
6363 /* Traverse all successors of INSN.  For each successor that is SUCCS_NORMAL
6364    code_motion_path_driver is called recursively.  Original operation
6365    was found at least on one path that is starting with one of INSN's
6366    successors (this fact is asserted).  ORIG_OPS is expressions we're looking
6367    for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
6368    of either move_op or find_used_regs depending on the caller.
6369 
6370    Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
6371    know for sure at this point.  */
6372 static int
code_motion_process_successors(insn_t insn,av_set_t orig_ops,ilist_t path,void * static_params)6373 code_motion_process_successors (insn_t insn, av_set_t orig_ops,
6374                                 ilist_t path, void *static_params)
6375 {
6376   int res = 0;
6377   succ_iterator succ_i;
6378   rtx succ;
6379   basic_block bb;
6380   int old_index;
6381   unsigned old_succs;
6382 
6383   struct cmpd_local_params lparams;
6384   expr_def _x;
6385 
6386   lparams.c_expr_local = &_x;
6387   lparams.c_expr_merged = NULL;
6388 
6389   /* We need to process only NORMAL succs for move_op, and collect live
6390      registers from ALL branches (including those leading out of the
6391      region) for find_used_regs.
6392 
6393      In move_op, there can be a case when insn's bb number has changed
6394      due to created bookkeeping.  This happens very rare, as we need to
6395      move expression from the beginning to the end of the same block.
6396      Rescan successors in this case.  */
6397 
6398  rescan:
6399   bb = BLOCK_FOR_INSN (insn);
6400   old_index = bb->index;
6401   old_succs = EDGE_COUNT (bb->succs);
6402 
6403   FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
6404     {
6405       int b;
6406 
6407       lparams.e1 = succ_i.e1;
6408       lparams.e2 = succ_i.e2;
6409 
6410       /* Go deep into recursion only for NORMAL edges (non-backedges within the
6411 	 current region).  */
6412       if (succ_i.current_flags == SUCCS_NORMAL)
6413 	b = code_motion_path_driver (succ, orig_ops, path, &lparams,
6414 				     static_params);
6415       else
6416 	b = 0;
6417 
6418       /* Merge c_expres found or unify live register sets from different
6419 	 successors.  */
6420       code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
6421 						 static_params);
6422       if (b == 1)
6423         res = b;
6424       else if (b == -1 && res != 1)
6425         res = b;
6426 
6427       /* We have simplified the control flow below this point.  In this case,
6428          the iterator becomes invalid.  We need to try again.  */
6429       if (BLOCK_FOR_INSN (insn)->index != old_index
6430           || EDGE_COUNT (bb->succs) != old_succs)
6431         {
6432           insn = sel_bb_end (BLOCK_FOR_INSN (insn));
6433           goto rescan;
6434         }
6435     }
6436 
6437 #ifdef ENABLE_CHECKING
6438   /* Here, RES==1 if original expr was found at least for one of the
6439      successors.  After the loop, RES may happen to have zero value
6440      only if at some point the expr searched is present in av_set, but is
6441      not found below.  In most cases, this situation is an error.
6442      The exception is when the original operation is blocked by
6443      bookkeeping generated for another fence or for another path in current
6444      move_op.  */
6445   gcc_assert (res == 1
6446 	      || (res == 0
6447 		  && av_set_could_be_blocked_by_bookkeeping_p (orig_ops,
6448 							       static_params))
6449 	      || res == -1);
6450 #endif
6451 
6452   /* Merge data, clean up, etc.  */
6453   if (res != -1 && code_motion_path_driver_info->after_merge_succs)
6454     code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
6455 
6456   return res;
6457 }
6458 
6459 
6460 /* Perform a cleanup when the driver is about to terminate.  ORIG_OPS_P
6461    is the pointer to the av set with expressions we were looking for,
6462    PATH_P is the pointer to the traversed path.  */
6463 static inline void
code_motion_path_driver_cleanup(av_set_t * orig_ops_p,ilist_t * path_p)6464 code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
6465 {
6466   ilist_remove (path_p);
6467   av_set_clear (orig_ops_p);
6468 }
6469 
6470 /* The driver function that implements move_op or find_used_regs
6471    functionality dependent whether code_motion_path_driver_INFO is set to
6472    &MOVE_OP_HOOKS or &FUR_HOOKS.  This function implements the common parts
6473    of code (CFG traversal etc) that are shared among both functions.  INSN
6474    is the insn we're starting the search from, ORIG_OPS are the expressions
6475    we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
6476    parameters of the driver, and STATIC_PARAMS are static parameters of
6477    the caller.
6478 
6479    Returns whether original instructions were found.  Note that top-level
6480    code_motion_path_driver always returns true.  */
6481 static int
code_motion_path_driver(insn_t insn,av_set_t orig_ops,ilist_t path,cmpd_local_params_p local_params_in,void * static_params)6482 code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
6483 			 cmpd_local_params_p local_params_in,
6484 			 void *static_params)
6485 {
6486   expr_t expr = NULL;
6487   basic_block bb = BLOCK_FOR_INSN (insn);
6488   insn_t first_insn, bb_tail, before_first;
6489   bool removed_last_insn = false;
6490 
6491   if (sched_verbose >= 6)
6492     {
6493       sel_print ("%s (", code_motion_path_driver_info->routine_name);
6494       dump_insn (insn);
6495       sel_print (",");
6496       dump_av_set (orig_ops);
6497       sel_print (")\n");
6498     }
6499 
6500   gcc_assert (orig_ops);
6501 
6502   /* If no original operations exist below this insn, return immediately.  */
6503   if (is_ineligible_successor (insn, path))
6504     {
6505       if (sched_verbose >= 6)
6506         sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
6507       return false;
6508     }
6509 
6510   /* The block can have invalid av set, in which case it was created earlier
6511      during move_op.  Return immediately.  */
6512   if (sel_bb_head_p (insn))
6513     {
6514       if (! AV_SET_VALID_P (insn))
6515         {
6516           if (sched_verbose >= 6)
6517             sel_print ("Returned from block %d as it had invalid av set\n",
6518                        bb->index);
6519           return false;
6520         }
6521 
6522       if (bitmap_bit_p (code_motion_visited_blocks, bb->index))
6523         {
6524           /* We have already found an original operation on this branch, do not
6525              go any further and just return TRUE here.  If we don't stop here,
6526              function can have exponential behaviour even on the small code
6527              with many different paths (e.g. with data speculation and
6528              recovery blocks).  */
6529           if (sched_verbose >= 6)
6530             sel_print ("Block %d already visited in this traversal\n", bb->index);
6531           if (code_motion_path_driver_info->on_enter)
6532             return code_motion_path_driver_info->on_enter (insn,
6533                                                            local_params_in,
6534                                                            static_params,
6535                                                            true);
6536         }
6537     }
6538 
6539   if (code_motion_path_driver_info->on_enter)
6540     code_motion_path_driver_info->on_enter (insn, local_params_in,
6541                                             static_params, false);
6542   orig_ops = av_set_copy (orig_ops);
6543 
6544   /* Filter the orig_ops set.  */
6545   if (AV_SET_VALID_P (insn))
6546     av_set_code_motion_filter (&orig_ops, AV_SET (insn));
6547 
6548   /* If no more original ops, return immediately.  */
6549   if (!orig_ops)
6550     {
6551       if (sched_verbose >= 6)
6552         sel_print ("No intersection with av set of block %d\n", bb->index);
6553       return false;
6554     }
6555 
6556   /* For non-speculative insns we have to leave only one form of the
6557      original operation, because if we don't, we may end up with
6558      different C_EXPRes and, consequently, with bookkeepings for different
6559      expression forms along the same code motion path.  That may lead to
6560      generation of incorrect code.  So for each code motion we stick to
6561      the single form of the instruction,  except for speculative insns
6562      which we need to keep in different forms with all speculation
6563      types.  */
6564   av_set_leave_one_nonspec (&orig_ops);
6565 
6566   /* It is not possible that all ORIG_OPS are filtered out.  */
6567   gcc_assert (orig_ops);
6568 
6569   /* It is enough to place only heads and tails of visited basic blocks into
6570      the PATH.  */
6571   ilist_add (&path, insn);
6572   first_insn = insn;
6573   bb_tail = sel_bb_end (bb);
6574 
6575   /* Descend the basic block in search of the original expr; this part
6576      corresponds to the part of the original move_op procedure executed
6577      before the recursive call.  */
6578   for (;;)
6579     {
6580       /* Look at the insn and decide if it could be an ancestor of currently
6581 	 scheduling operation.  If it is so, then the insn "dest = op" could
6582 	 either be replaced with "dest = reg", because REG now holds the result
6583 	 of OP, or just removed, if we've scheduled the insn as a whole.
6584 
6585 	 If this insn doesn't contain currently scheduling OP, then proceed
6586 	 with searching and look at its successors.  Operations we're searching
6587 	 for could have changed when moving up through this insn via
6588 	 substituting.  In this case, perform unsubstitution on them first.
6589 
6590 	 When traversing the DAG below this insn is finished, insert
6591 	 bookkeeping code, if the insn is a joint point, and remove
6592 	 leftovers.  */
6593 
6594       expr = av_set_lookup (orig_ops, INSN_VINSN (insn));
6595       if (expr)
6596 	{
6597 	  insn_t last_insn = PREV_INSN (insn);
6598 
6599 	  /* We have found the original operation.   */
6600           if (sched_verbose >= 6)
6601             sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
6602 
6603 	  code_motion_path_driver_info->orig_expr_found
6604             (insn, expr, local_params_in, static_params);
6605 
6606 	  /* Step back, so on the way back we'll start traversing from the
6607 	     previous insn (or we'll see that it's bb_note and skip that
6608 	     loop).  */
6609           if (insn == first_insn)
6610             {
6611               first_insn = NEXT_INSN (last_insn);
6612               removed_last_insn = sel_bb_end_p (last_insn);
6613             }
6614 	  insn = last_insn;
6615 	  break;
6616 	}
6617       else
6618 	{
6619 	  /* We haven't found the original expr, continue descending the basic
6620 	     block.  */
6621 	  if (code_motion_path_driver_info->orig_expr_not_found
6622               (insn, orig_ops, static_params))
6623 	    {
6624 	      /* Av set ops could have been changed when moving through this
6625 	         insn.  To find them below it, we have to un-substitute them.  */
6626 	      undo_transformations (&orig_ops, insn);
6627 	    }
6628 	  else
6629 	    {
6630 	      /* Clean up and return, if the hook tells us to do so.  It may
6631 		 happen if we've encountered the previously created
6632 		 bookkeeping.  */
6633 	      code_motion_path_driver_cleanup (&orig_ops, &path);
6634 	      return -1;
6635 	    }
6636 
6637 	  gcc_assert (orig_ops);
6638         }
6639 
6640       /* Stop at insn if we got to the end of BB.  */
6641       if (insn == bb_tail)
6642 	break;
6643 
6644       insn = NEXT_INSN (insn);
6645     }
6646 
6647   /* Here INSN either points to the insn before the original insn (may be
6648      bb_note, if original insn was a bb_head) or to the bb_end.  */
6649   if (!expr)
6650     {
6651       int res;
6652       rtx last_insn = PREV_INSN (insn);
6653       bool added_to_path;
6654 
6655       gcc_assert (insn == sel_bb_end (bb));
6656 
6657       /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
6658 	 it's already in PATH then).  */
6659       if (insn != first_insn)
6660 	{
6661 	  ilist_add (&path, insn);
6662 	  added_to_path = true;
6663 	}
6664       else
6665         added_to_path = false;
6666 
6667       /* Process_successors should be able to find at least one
6668 	 successor for which code_motion_path_driver returns TRUE.  */
6669       res = code_motion_process_successors (insn, orig_ops,
6670                                             path, static_params);
6671 
6672       /* Jump in the end of basic block could have been removed or replaced
6673          during code_motion_process_successors, so recompute insn as the
6674          last insn in bb.  */
6675       if (NEXT_INSN (last_insn) != insn)
6676         {
6677           insn = sel_bb_end (bb);
6678           first_insn = sel_bb_head (bb);
6679         }
6680 
6681       /* Remove bb tail from path.  */
6682       if (added_to_path)
6683 	ilist_remove (&path);
6684 
6685       if (res != 1)
6686 	{
6687 	  /* This is the case when one of the original expr is no longer available
6688 	     due to bookkeeping created on this branch with the same register.
6689 	     In the original algorithm, which doesn't have update_data_sets call
6690 	     on a bookkeeping block, it would simply result in returning
6691 	     FALSE when we've encountered a previously generated bookkeeping
6692 	     insn in moveop_orig_expr_not_found.  */
6693 	  code_motion_path_driver_cleanup (&orig_ops, &path);
6694 	  return res;
6695 	}
6696     }
6697 
6698   /* Don't need it any more.  */
6699   av_set_clear (&orig_ops);
6700 
6701   /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
6702      the beginning of the basic block.  */
6703   before_first = PREV_INSN (first_insn);
6704   while (insn != before_first)
6705     {
6706       if (code_motion_path_driver_info->ascend)
6707 	code_motion_path_driver_info->ascend (insn, static_params);
6708 
6709       insn = PREV_INSN (insn);
6710     }
6711 
6712   /* Now we're at the bb head.  */
6713   insn = first_insn;
6714   ilist_remove (&path);
6715   local_params_in->removed_last_insn = removed_last_insn;
6716   code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
6717 
6718   /* This should be the very last operation as at bb head we could change
6719      the numbering by creating bookkeeping blocks.  */
6720   if (removed_last_insn)
6721     insn = PREV_INSN (insn);
6722   bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index);
6723   return true;
6724 }
6725 
6726 /* Move up the operations from ORIG_OPS set traversing the dag starting
6727    from INSN.  PATH represents the edges traversed so far.
6728    DEST is the register chosen for scheduling the current expr.  Insert
6729    bookkeeping code in the join points.  EXPR_VLIW is the chosen expression,
6730    C_EXPR is how it looks like at the given cfg point.
6731    Set *SHOULD_MOVE to indicate whether we have only disconnected
6732    one of the insns found.
6733 
6734    Returns whether original instructions were found, which is asserted
6735    to be true in the caller.  */
6736 static bool
move_op(insn_t insn,av_set_t orig_ops,expr_t expr_vliw,rtx dest,expr_t c_expr,bool * should_move)6737 move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
6738          rtx dest, expr_t c_expr, bool *should_move)
6739 {
6740   struct moveop_static_params sparams;
6741   struct cmpd_local_params lparams;
6742   int res;
6743 
6744   /* Init params for code_motion_path_driver.  */
6745   sparams.dest = dest;
6746   sparams.c_expr = c_expr;
6747   sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
6748 #ifdef ENABLE_CHECKING
6749   sparams.failed_insn = NULL;
6750 #endif
6751   sparams.was_renamed = false;
6752   lparams.e1 = NULL;
6753 
6754   /* We haven't visited any blocks yet.  */
6755   bitmap_clear (code_motion_visited_blocks);
6756 
6757   /* Set appropriate hooks and data.  */
6758   code_motion_path_driver_info = &move_op_hooks;
6759   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
6760 
6761   gcc_assert (res != -1);
6762 
6763   if (sparams.was_renamed)
6764     EXPR_WAS_RENAMED (expr_vliw) = true;
6765 
6766   *should_move = (sparams.uid == -1);
6767 
6768   return res;
6769 }
6770 
6771 
6772 /* Functions that work with regions.  */
6773 
6774 /* Current number of seqno used in init_seqno and init_seqno_1.  */
6775 static int cur_seqno;
6776 
6777 /* A helper for init_seqno.  Traverse the region starting from BB and
6778    compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
6779    Clear visited blocks from BLOCKS_TO_RESCHEDULE.  */
6780 static void
init_seqno_1(basic_block bb,sbitmap visited_bbs,bitmap blocks_to_reschedule)6781 init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
6782 {
6783   int bbi = BLOCK_TO_BB (bb->index);
6784   insn_t insn, note = bb_note (bb);
6785   insn_t succ_insn;
6786   succ_iterator si;
6787 
6788   bitmap_set_bit (visited_bbs, bbi);
6789   if (blocks_to_reschedule)
6790     bitmap_clear_bit (blocks_to_reschedule, bb->index);
6791 
6792   FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
6793 		   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
6794     {
6795       basic_block succ = BLOCK_FOR_INSN (succ_insn);
6796       int succ_bbi = BLOCK_TO_BB (succ->index);
6797 
6798       gcc_assert (in_current_region_p (succ));
6799 
6800       if (!bitmap_bit_p (visited_bbs, succ_bbi))
6801 	{
6802 	  gcc_assert (succ_bbi > bbi);
6803 
6804 	  init_seqno_1 (succ, visited_bbs, blocks_to_reschedule);
6805 	}
6806       else if (blocks_to_reschedule)
6807         bitmap_set_bit (forced_ebb_heads, succ->index);
6808     }
6809 
6810   for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn))
6811     INSN_SEQNO (insn) = cur_seqno--;
6812 }
6813 
6814 /* Initialize seqnos for the current region.  BLOCKS_TO_RESCHEDULE contains
6815    blocks on which we're rescheduling when pipelining, FROM is the block where
6816    traversing region begins (it may not be the head of the region when
6817    pipelining, but the head of the loop instead).
6818 
6819    Returns the maximal seqno found.  */
6820 static int
init_seqno(bitmap blocks_to_reschedule,basic_block from)6821 init_seqno (bitmap blocks_to_reschedule, basic_block from)
6822 {
6823   sbitmap visited_bbs;
6824   bitmap_iterator bi;
6825   unsigned bbi;
6826 
6827   visited_bbs = sbitmap_alloc (current_nr_blocks);
6828 
6829   if (blocks_to_reschedule)
6830     {
6831       bitmap_ones (visited_bbs);
6832       EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
6833         {
6834 	  gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
6835           bitmap_clear_bit (visited_bbs, BLOCK_TO_BB (bbi));
6836 	}
6837     }
6838   else
6839     {
6840       bitmap_clear (visited_bbs);
6841       from = EBB_FIRST_BB (0);
6842     }
6843 
6844   cur_seqno = sched_max_luid - 1;
6845   init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
6846 
6847   /* cur_seqno may be positive if the number of instructions is less than
6848      sched_max_luid - 1 (when rescheduling or if some instructions have been
6849      removed by the call to purge_empty_blocks in sel_sched_region_1).  */
6850   gcc_assert (cur_seqno >= 0);
6851 
6852   sbitmap_free (visited_bbs);
6853   return sched_max_luid - 1;
6854 }
6855 
6856 /* Initialize scheduling parameters for current region.  */
6857 static void
sel_setup_region_sched_flags(void)6858 sel_setup_region_sched_flags (void)
6859 {
6860   enable_schedule_as_rhs_p = 1;
6861   bookkeeping_p = 1;
6862   pipelining_p = (bookkeeping_p
6863                   && (flag_sel_sched_pipelining != 0)
6864 		  && current_loop_nest != NULL
6865 		  && loop_has_exit_edges (current_loop_nest));
6866   max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
6867   max_ws = MAX_WS;
6868 }
6869 
6870 /* Return true if all basic blocks of current region are empty.  */
6871 static bool
current_region_empty_p(void)6872 current_region_empty_p (void)
6873 {
6874   int i;
6875   for (i = 0; i < current_nr_blocks; i++)
6876     if (! sel_bb_empty_p (BASIC_BLOCK (BB_TO_BLOCK (i))))
6877       return false;
6878 
6879   return true;
6880 }
6881 
6882 /* Prepare and verify loop nest for pipelining.  */
6883 static void
setup_current_loop_nest(int rgn,bb_vec_t * bbs)6884 setup_current_loop_nest (int rgn, bb_vec_t *bbs)
6885 {
6886   current_loop_nest = get_loop_nest_for_rgn (rgn);
6887 
6888   if (!current_loop_nest)
6889     return;
6890 
6891   /* If this loop has any saved loop preheaders from nested loops,
6892      add these basic blocks to the current region.  */
6893   sel_add_loop_preheaders (bbs);
6894 
6895   /* Check that we're starting with a valid information.  */
6896   gcc_assert (loop_latch_edge (current_loop_nest));
6897   gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
6898 }
6899 
6900 /* Compute instruction priorities for current region.  */
6901 static void
sel_compute_priorities(int rgn)6902 sel_compute_priorities (int rgn)
6903 {
6904   sched_rgn_compute_dependencies (rgn);
6905 
6906   /* Compute insn priorities in haifa style.  Then free haifa style
6907      dependencies that we've calculated for this.  */
6908   compute_priorities ();
6909 
6910   if (sched_verbose >= 5)
6911     debug_rgn_dependencies (0);
6912 
6913   free_rgn_deps ();
6914 }
6915 
6916 /* Init scheduling data for RGN.  Returns true when this region should not
6917    be scheduled.  */
6918 static bool
sel_region_init(int rgn)6919 sel_region_init (int rgn)
6920 {
6921   int i;
6922   bb_vec_t bbs;
6923 
6924   rgn_setup_region (rgn);
6925 
6926   /* Even if sched_is_disabled_for_current_region_p() is true, we still
6927      do region initialization here so the region can be bundled correctly,
6928      but we'll skip the scheduling in sel_sched_region ().  */
6929   if (current_region_empty_p ())
6930     return true;
6931 
6932   bbs.create (current_nr_blocks);
6933 
6934   for (i = 0; i < current_nr_blocks; i++)
6935     bbs.quick_push (BASIC_BLOCK (BB_TO_BLOCK (i)));
6936 
6937   sel_init_bbs (bbs);
6938 
6939   if (flag_sel_sched_pipelining)
6940     setup_current_loop_nest (rgn, &bbs);
6941 
6942   sel_setup_region_sched_flags ();
6943 
6944   /* Initialize luids and dependence analysis which both sel-sched and haifa
6945      need.  */
6946   sched_init_luids (bbs);
6947   sched_deps_init (false);
6948 
6949   /* Initialize haifa data.  */
6950   rgn_setup_sched_infos ();
6951   sel_set_sched_flags ();
6952   haifa_init_h_i_d (bbs);
6953 
6954   sel_compute_priorities (rgn);
6955   init_deps_global ();
6956 
6957   /* Main initialization.  */
6958   sel_setup_sched_infos ();
6959   sel_init_global_and_expr (bbs);
6960 
6961   bbs.release ();
6962 
6963   blocks_to_reschedule = BITMAP_ALLOC (NULL);
6964 
6965   /* Init correct liveness sets on each instruction of a single-block loop.
6966      This is the only situation when we can't update liveness when calling
6967      compute_live for the first insn of the loop.  */
6968   if (current_loop_nest)
6969     {
6970       int header = (sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0)))
6971                     ? 1
6972                     : 0);
6973 
6974       if (current_nr_blocks == header + 1)
6975         update_liveness_on_insn
6976           (sel_bb_head (BASIC_BLOCK (BB_TO_BLOCK (header))));
6977     }
6978 
6979   /* Set hooks so that no newly generated insn will go out unnoticed.  */
6980   sel_register_cfg_hooks ();
6981 
6982   /* !!! We call target.sched.init () for the whole region, but we invoke
6983      targetm.sched.finish () for every ebb.  */
6984   if (targetm.sched.init)
6985     /* None of the arguments are actually used in any target.  */
6986     targetm.sched.init (sched_dump, sched_verbose, -1);
6987 
6988   first_emitted_uid = get_max_uid () + 1;
6989   preheader_removed = false;
6990 
6991   /* Reset register allocation ticks array.  */
6992   memset (reg_rename_tick, 0, sizeof reg_rename_tick);
6993   reg_rename_this_tick = 0;
6994 
6995   bitmap_initialize (forced_ebb_heads, 0);
6996   bitmap_clear (forced_ebb_heads);
6997 
6998   setup_nop_vinsn ();
6999   current_copies = BITMAP_ALLOC (NULL);
7000   current_originators = BITMAP_ALLOC (NULL);
7001   code_motion_visited_blocks = BITMAP_ALLOC (NULL);
7002 
7003   return false;
7004 }
7005 
7006 /* Simplify insns after the scheduling.  */
7007 static void
simplify_changed_insns(void)7008 simplify_changed_insns (void)
7009 {
7010   int i;
7011 
7012   for (i = 0; i < current_nr_blocks; i++)
7013     {
7014       basic_block bb = BASIC_BLOCK (BB_TO_BLOCK (i));
7015       rtx insn;
7016 
7017       FOR_BB_INSNS (bb, insn)
7018 	if (INSN_P (insn))
7019 	  {
7020 	    expr_t expr = INSN_EXPR (insn);
7021 
7022 	    if (EXPR_WAS_SUBSTITUTED (expr))
7023 	      validate_simplify_insn (insn);
7024 	  }
7025     }
7026 }
7027 
7028 /* Find boundaries of the EBB starting from basic block BB, marking blocks of
7029    this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL,
7030    PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure.  */
7031 static void
find_ebb_boundaries(basic_block bb,bitmap scheduled_blocks)7032 find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks)
7033 {
7034   insn_t head, tail;
7035   basic_block bb1 = bb;
7036   if (sched_verbose >= 2)
7037     sel_print ("Finishing schedule in bbs: ");
7038 
7039   do
7040     {
7041       bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
7042 
7043       if (sched_verbose >= 2)
7044 	sel_print ("%d; ", bb1->index);
7045     }
7046   while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1)));
7047 
7048   if (sched_verbose >= 2)
7049     sel_print ("\n");
7050 
7051   get_ebb_head_tail (bb, bb1, &head, &tail);
7052 
7053   current_sched_info->head = head;
7054   current_sched_info->tail = tail;
7055   current_sched_info->prev_head = PREV_INSN (head);
7056   current_sched_info->next_tail = NEXT_INSN (tail);
7057 }
7058 
7059 /* Regenerate INSN_SCHED_CYCLEs for insns of current EBB.  */
7060 static void
reset_sched_cycles_in_current_ebb(void)7061 reset_sched_cycles_in_current_ebb (void)
7062 {
7063   int last_clock = 0;
7064   int haifa_last_clock = -1;
7065   int haifa_clock = 0;
7066   int issued_insns = 0;
7067   insn_t insn;
7068 
7069   if (targetm.sched.init)
7070     {
7071       /* None of the arguments are actually used in any target.
7072 	 NB: We should have md_reset () hook for cases like this.  */
7073       targetm.sched.init (sched_dump, sched_verbose, -1);
7074     }
7075 
7076   state_reset (curr_state);
7077   advance_state (curr_state);
7078 
7079   for (insn = current_sched_info->head;
7080        insn != current_sched_info->next_tail;
7081        insn = NEXT_INSN (insn))
7082     {
7083       int cost, haifa_cost;
7084       int sort_p;
7085       bool asm_p, real_insn, after_stall, all_issued;
7086       int clock;
7087 
7088       if (!INSN_P (insn))
7089 	continue;
7090 
7091       asm_p = false;
7092       real_insn = recog_memoized (insn) >= 0;
7093       clock = INSN_SCHED_CYCLE (insn);
7094 
7095       cost = clock - last_clock;
7096 
7097       /* Initialize HAIFA_COST.  */
7098       if (! real_insn)
7099 	{
7100 	  asm_p = INSN_ASM_P (insn);
7101 
7102 	  if (asm_p)
7103 	    /* This is asm insn which *had* to be scheduled first
7104 	       on the cycle.  */
7105 	    haifa_cost = 1;
7106 	  else
7107 	    /* This is a use/clobber insn.  It should not change
7108 	       cost.  */
7109 	    haifa_cost = 0;
7110 	}
7111       else
7112         haifa_cost = estimate_insn_cost (insn, curr_state);
7113 
7114       /* Stall for whatever cycles we've stalled before.  */
7115       after_stall = 0;
7116       if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
7117         {
7118           haifa_cost = cost;
7119           after_stall = 1;
7120         }
7121       all_issued = issued_insns == issue_rate;
7122       if (haifa_cost == 0 && all_issued)
7123 	haifa_cost = 1;
7124       if (haifa_cost > 0)
7125 	{
7126 	  int i = 0;
7127 
7128 	  while (haifa_cost--)
7129 	    {
7130 	      advance_state (curr_state);
7131 	      issued_insns = 0;
7132               i++;
7133 
7134 	      if (sched_verbose >= 2)
7135                 {
7136                   sel_print ("advance_state (state_transition)\n");
7137                   debug_state (curr_state);
7138                 }
7139 
7140               /* The DFA may report that e.g. insn requires 2 cycles to be
7141                  issued, but on the next cycle it says that insn is ready
7142                  to go.  Check this here.  */
7143               if (!after_stall
7144                   && real_insn
7145                   && haifa_cost > 0
7146                   && estimate_insn_cost (insn, curr_state) == 0)
7147                 break;
7148 
7149               /* When the data dependency stall is longer than the DFA stall,
7150                  and when we have issued exactly issue_rate insns and stalled,
7151                  it could be that after this longer stall the insn will again
7152                  become unavailable  to the DFA restrictions.  Looks strange
7153                  but happens e.g. on x86-64.  So recheck DFA on the last
7154                  iteration.  */
7155               if ((after_stall || all_issued)
7156                   && real_insn
7157                   && haifa_cost == 0)
7158                 haifa_cost = estimate_insn_cost (insn, curr_state);
7159             }
7160 
7161 	  haifa_clock += i;
7162           if (sched_verbose >= 2)
7163             sel_print ("haifa clock: %d\n", haifa_clock);
7164 	}
7165       else
7166 	gcc_assert (haifa_cost == 0);
7167 
7168       if (sched_verbose >= 2)
7169 	sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost);
7170 
7171       if (targetm.sched.dfa_new_cycle)
7172 	while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn,
7173 					    haifa_last_clock, haifa_clock,
7174 					    &sort_p))
7175 	  {
7176 	    advance_state (curr_state);
7177 	    issued_insns = 0;
7178 	    haifa_clock++;
7179 	    if (sched_verbose >= 2)
7180               {
7181                 sel_print ("advance_state (dfa_new_cycle)\n");
7182                 debug_state (curr_state);
7183 		sel_print ("haifa clock: %d\n", haifa_clock + 1);
7184               }
7185           }
7186 
7187       if (real_insn)
7188 	{
7189 	  static state_t temp = NULL;
7190 
7191 	  if (!temp)
7192 	    temp = xmalloc (dfa_state_size);
7193 	  memcpy (temp, curr_state, dfa_state_size);
7194 
7195 	  cost = state_transition (curr_state, insn);
7196 	  if (memcmp (temp, curr_state, dfa_state_size))
7197 	    issued_insns++;
7198 
7199           if (sched_verbose >= 2)
7200 	    {
7201 	      sel_print ("scheduled insn %d, clock %d\n", INSN_UID (insn),
7202 			 haifa_clock + 1);
7203               debug_state (curr_state);
7204 	    }
7205 	  gcc_assert (cost < 0);
7206 	}
7207 
7208       if (targetm.sched.variable_issue)
7209 	targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0);
7210 
7211       INSN_SCHED_CYCLE (insn) = haifa_clock;
7212 
7213       last_clock = clock;
7214       haifa_last_clock = haifa_clock;
7215     }
7216 }
7217 
7218 /* Put TImode markers on insns starting a new issue group.  */
7219 static void
put_TImodes(void)7220 put_TImodes (void)
7221 {
7222   int last_clock = -1;
7223   insn_t insn;
7224 
7225   for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
7226        insn = NEXT_INSN (insn))
7227     {
7228       int cost, clock;
7229 
7230       if (!INSN_P (insn))
7231 	continue;
7232 
7233       clock = INSN_SCHED_CYCLE (insn);
7234       cost = (last_clock == -1) ? 1 : clock - last_clock;
7235 
7236       gcc_assert (cost >= 0);
7237 
7238       if (issue_rate > 1
7239 	  && GET_CODE (PATTERN (insn)) != USE
7240 	  && GET_CODE (PATTERN (insn)) != CLOBBER)
7241 	{
7242 	  if (reload_completed && cost > 0)
7243 	    PUT_MODE (insn, TImode);
7244 
7245 	  last_clock = clock;
7246 	}
7247 
7248       if (sched_verbose >= 2)
7249 	sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost);
7250     }
7251 }
7252 
7253 /* Perform MD_FINISH on EBBs comprising current region.  When
7254    RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
7255    to produce correct sched cycles on insns.  */
7256 static void
sel_region_target_finish(bool reset_sched_cycles_p)7257 sel_region_target_finish (bool reset_sched_cycles_p)
7258 {
7259   int i;
7260   bitmap scheduled_blocks = BITMAP_ALLOC (NULL);
7261 
7262   for (i = 0; i < current_nr_blocks; i++)
7263     {
7264       if (bitmap_bit_p (scheduled_blocks, i))
7265 	continue;
7266 
7267       /* While pipelining outer loops, skip bundling for loop
7268 	 preheaders.  Those will be rescheduled in the outer loop.  */
7269       if (sel_is_loop_preheader_p (EBB_FIRST_BB (i)))
7270 	continue;
7271 
7272       find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
7273 
7274       if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
7275 	continue;
7276 
7277       if (reset_sched_cycles_p)
7278 	reset_sched_cycles_in_current_ebb ();
7279 
7280       if (targetm.sched.init)
7281 	targetm.sched.init (sched_dump, sched_verbose, -1);
7282 
7283       put_TImodes ();
7284 
7285       if (targetm.sched.finish)
7286 	{
7287 	  targetm.sched.finish (sched_dump, sched_verbose);
7288 
7289 	  /* Extend luids so that insns generated by the target will
7290 	     get zero luid.  */
7291 	  sched_extend_luids ();
7292 	}
7293     }
7294 
7295   BITMAP_FREE (scheduled_blocks);
7296 }
7297 
7298 /* Free the scheduling data for the current region.  When RESET_SCHED_CYCLES_P
7299    is true, make an additional pass emulating scheduler to get correct insn
7300    cycles for md_finish calls.  */
7301 static void
sel_region_finish(bool reset_sched_cycles_p)7302 sel_region_finish (bool reset_sched_cycles_p)
7303 {
7304   simplify_changed_insns ();
7305   sched_finish_ready_list ();
7306   free_nop_pool ();
7307 
7308   /* Free the vectors.  */
7309   vec_av_set.release ();
7310   BITMAP_FREE (current_copies);
7311   BITMAP_FREE (current_originators);
7312   BITMAP_FREE (code_motion_visited_blocks);
7313   vinsn_vec_free (vec_bookkeeping_blocked_vinsns);
7314   vinsn_vec_free (vec_target_unavailable_vinsns);
7315 
7316   /* If LV_SET of the region head should be updated, do it now because
7317      there will be no other chance.  */
7318   {
7319     succ_iterator si;
7320     insn_t insn;
7321 
7322     FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)),
7323                      SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
7324       {
7325 	basic_block bb = BLOCK_FOR_INSN (insn);
7326 
7327 	if (!BB_LV_SET_VALID_P (bb))
7328 	  compute_live (insn);
7329       }
7330   }
7331 
7332   /* Emulate the Haifa scheduler for bundling.  */
7333   if (reload_completed)
7334     sel_region_target_finish (reset_sched_cycles_p);
7335 
7336   sel_finish_global_and_expr ();
7337 
7338   bitmap_clear (forced_ebb_heads);
7339 
7340   free_nop_vinsn ();
7341 
7342   finish_deps_global ();
7343   sched_finish_luids ();
7344   h_d_i_d.release ();
7345 
7346   sel_finish_bbs ();
7347   BITMAP_FREE (blocks_to_reschedule);
7348 
7349   sel_unregister_cfg_hooks ();
7350 
7351   max_issue_size = 0;
7352 }
7353 
7354 
7355 /* Functions that implement the scheduler driver.  */
7356 
7357 /* Schedule a parallel instruction group on each of FENCES.  MAX_SEQNO
7358    is the current maximum seqno.  SCHEDULED_INSNS_TAILPP is the list
7359    of insns scheduled -- these would be postprocessed later.  */
7360 static void
schedule_on_fences(flist_t fences,int max_seqno,ilist_t ** scheduled_insns_tailpp)7361 schedule_on_fences (flist_t fences, int max_seqno,
7362                     ilist_t **scheduled_insns_tailpp)
7363 {
7364   flist_t old_fences = fences;
7365 
7366   if (sched_verbose >= 1)
7367     {
7368       sel_print ("\nScheduling on fences: ");
7369       dump_flist (fences);
7370       sel_print ("\n");
7371     }
7372 
7373   scheduled_something_on_previous_fence = false;
7374   for (; fences; fences = FLIST_NEXT (fences))
7375     {
7376       fence_t fence = NULL;
7377       int seqno = 0;
7378       flist_t fences2;
7379       bool first_p = true;
7380 
7381       /* Choose the next fence group to schedule.
7382          The fact that insn can be scheduled only once
7383          on the cycle is guaranteed by two properties:
7384          1. seqnos of parallel groups decrease with each iteration.
7385          2. If is_ineligible_successor () sees the larger seqno, it
7386          checks if candidate insn is_in_current_fence_p ().  */
7387       for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
7388         {
7389           fence_t f = FLIST_FENCE (fences2);
7390 
7391           if (!FENCE_PROCESSED_P (f))
7392             {
7393               int i = INSN_SEQNO (FENCE_INSN (f));
7394 
7395               if (first_p || i > seqno)
7396                 {
7397                   seqno = i;
7398                   fence = f;
7399                   first_p = false;
7400                 }
7401               else
7402                 /* ??? Seqnos of different groups should be different.  */
7403                 gcc_assert (1 || i != seqno);
7404             }
7405         }
7406 
7407       gcc_assert (fence);
7408 
7409       /* As FENCE is nonnull, SEQNO is initialized.  */
7410       seqno -= max_seqno + 1;
7411       fill_insns (fence, seqno, scheduled_insns_tailpp);
7412       FENCE_PROCESSED_P (fence) = true;
7413     }
7414 
7415   /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
7416      don't need to keep bookkeeping-invalidated and target-unavailable
7417      vinsns any more.  */
7418   vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
7419   vinsn_vec_clear (&vec_target_unavailable_vinsns);
7420 }
7421 
7422 /* Calculate MIN_SEQNO and MAX_SEQNO.  */
7423 static void
find_min_max_seqno(flist_t fences,int * min_seqno,int * max_seqno)7424 find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
7425 {
7426   *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7427 
7428   /* The first element is already processed.  */
7429   while ((fences = FLIST_NEXT (fences)))
7430     {
7431       int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7432 
7433       if (*min_seqno > seqno)
7434         *min_seqno = seqno;
7435       else if (*max_seqno < seqno)
7436         *max_seqno = seqno;
7437     }
7438 }
7439 
7440 /* Calculate new fences from FENCES.  */
7441 static flist_t
calculate_new_fences(flist_t fences,int orig_max_seqno)7442 calculate_new_fences (flist_t fences, int orig_max_seqno)
7443 {
7444   flist_t old_fences = fences;
7445   struct flist_tail_def _new_fences, *new_fences = &_new_fences;
7446 
7447   flist_tail_init (new_fences);
7448   for (; fences; fences = FLIST_NEXT (fences))
7449     {
7450       fence_t fence = FLIST_FENCE (fences);
7451       insn_t insn;
7452 
7453       if (!FENCE_BNDS (fence))
7454         {
7455           /* This fence doesn't have any successors.  */
7456           if (!FENCE_SCHEDULED_P (fence))
7457             {
7458               /* Nothing was scheduled on this fence.  */
7459               int seqno;
7460 
7461               insn = FENCE_INSN (fence);
7462               seqno = INSN_SEQNO (insn);
7463               gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
7464 
7465               if (sched_verbose >= 1)
7466                 sel_print ("Fence %d[%d] has not changed\n",
7467                            INSN_UID (insn),
7468                            BLOCK_NUM (insn));
7469               move_fence_to_fences (fences, new_fences);
7470             }
7471         }
7472       else
7473         extract_new_fences_from (fences, new_fences, orig_max_seqno);
7474     }
7475 
7476   flist_clear (&old_fences);
7477   return FLIST_TAIL_HEAD (new_fences);
7478 }
7479 
7480 /* Update seqnos of insns given by PSCHEDULED_INSNS.  MIN_SEQNO and MAX_SEQNO
7481    are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
7482    the highest seqno used in a region.  Return the updated highest seqno.  */
7483 static int
update_seqnos_and_stage(int min_seqno,int max_seqno,int highest_seqno_in_use,ilist_t * pscheduled_insns)7484 update_seqnos_and_stage (int min_seqno, int max_seqno,
7485                          int highest_seqno_in_use,
7486                          ilist_t *pscheduled_insns)
7487 {
7488   int new_hs;
7489   ilist_iterator ii;
7490   insn_t insn;
7491 
7492   /* Actually, new_hs is the seqno of the instruction, that was
7493      scheduled first (i.e. it is the first one in SCHEDULED_INSNS).  */
7494   if (*pscheduled_insns)
7495     {
7496       new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
7497                 + highest_seqno_in_use + max_seqno - min_seqno + 2);
7498       gcc_assert (new_hs > highest_seqno_in_use);
7499     }
7500   else
7501     new_hs = highest_seqno_in_use;
7502 
7503   FOR_EACH_INSN (insn, ii, *pscheduled_insns)
7504     {
7505       gcc_assert (INSN_SEQNO (insn) < 0);
7506       INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
7507       gcc_assert (INSN_SEQNO (insn) <= new_hs);
7508 
7509       /* When not pipelining, purge unneeded insn info on the scheduled insns.
7510          For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
7511          require > 1GB of memory e.g. on limit-fnargs.c.  */
7512       if (! pipelining_p)
7513         free_data_for_scheduled_insn (insn);
7514     }
7515 
7516   ilist_clear (pscheduled_insns);
7517   global_level++;
7518 
7519   return new_hs;
7520 }
7521 
7522 /* The main driver for scheduling a region.  This function is responsible
7523    for correct propagation of fences (i.e. scheduling points) and creating
7524    a group of parallel insns at each of them.  It also supports
7525    pipelining.  ORIG_MAX_SEQNO is the maximal seqno before this pass
7526    of scheduling.  */
7527 static void
sel_sched_region_2(int orig_max_seqno)7528 sel_sched_region_2 (int orig_max_seqno)
7529 {
7530   int highest_seqno_in_use = orig_max_seqno;
7531 
7532   stat_bookkeeping_copies = 0;
7533   stat_insns_needed_bookkeeping = 0;
7534   stat_renamed_scheduled = 0;
7535   stat_substitutions_total = 0;
7536   num_insns_scheduled = 0;
7537 
7538   while (fences)
7539     {
7540       int min_seqno, max_seqno;
7541       ilist_t scheduled_insns = NULL;
7542       ilist_t *scheduled_insns_tailp = &scheduled_insns;
7543 
7544       find_min_max_seqno (fences, &min_seqno, &max_seqno);
7545       schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
7546       fences = calculate_new_fences (fences, orig_max_seqno);
7547       highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
7548                                                       highest_seqno_in_use,
7549                                                       &scheduled_insns);
7550     }
7551 
7552   if (sched_verbose >= 1)
7553     sel_print ("Scheduled %d bookkeeping copies, %d insns needed "
7554                "bookkeeping, %d insns renamed, %d insns substituted\n",
7555                stat_bookkeeping_copies,
7556                stat_insns_needed_bookkeeping,
7557                stat_renamed_scheduled,
7558                stat_substitutions_total);
7559 }
7560 
7561 /* Schedule a region.  When pipelining, search for possibly never scheduled
7562    bookkeeping code and schedule it.  Reschedule pipelined code without
7563    pipelining after.  */
7564 static void
sel_sched_region_1(void)7565 sel_sched_region_1 (void)
7566 {
7567   int orig_max_seqno;
7568 
7569   /* Remove empty blocks that might be in the region from the beginning.  */
7570   purge_empty_blocks ();
7571 
7572   orig_max_seqno = init_seqno (NULL, NULL);
7573   gcc_assert (orig_max_seqno >= 1);
7574 
7575   /* When pipelining outer loops, create fences on the loop header,
7576      not preheader.  */
7577   fences = NULL;
7578   if (current_loop_nest)
7579     init_fences (BB_END (EBB_FIRST_BB (0)));
7580   else
7581     init_fences (bb_note (EBB_FIRST_BB (0)));
7582   global_level = 1;
7583 
7584   sel_sched_region_2 (orig_max_seqno);
7585 
7586   gcc_assert (fences == NULL);
7587 
7588   if (pipelining_p)
7589     {
7590       int i;
7591       basic_block bb;
7592       struct flist_tail_def _new_fences;
7593       flist_tail_t new_fences = &_new_fences;
7594       bool do_p = true;
7595 
7596       pipelining_p = false;
7597       max_ws = MIN (max_ws, issue_rate * 3 / 2);
7598       bookkeeping_p = false;
7599       enable_schedule_as_rhs_p = false;
7600 
7601       /* Schedule newly created code, that has not been scheduled yet.  */
7602       do_p = true;
7603 
7604       while (do_p)
7605         {
7606           do_p = false;
7607 
7608           for (i = 0; i < current_nr_blocks; i++)
7609             {
7610               basic_block bb = EBB_FIRST_BB (i);
7611 
7612               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7613                 {
7614                   if (! bb_ends_ebb_p (bb))
7615                     bitmap_set_bit (blocks_to_reschedule, bb_next_bb (bb)->index);
7616                   if (sel_bb_empty_p (bb))
7617                     {
7618                       bitmap_clear_bit (blocks_to_reschedule, bb->index);
7619                       continue;
7620                     }
7621                   clear_outdated_rtx_info (bb);
7622                   if (sel_insn_is_speculation_check (BB_END (bb))
7623                       && JUMP_P (BB_END (bb)))
7624                     bitmap_set_bit (blocks_to_reschedule,
7625                                     BRANCH_EDGE (bb)->dest->index);
7626                 }
7627               else if (! sel_bb_empty_p (bb)
7628                        && INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0)
7629                 bitmap_set_bit (blocks_to_reschedule, bb->index);
7630             }
7631 
7632           for (i = 0; i < current_nr_blocks; i++)
7633             {
7634               bb = EBB_FIRST_BB (i);
7635 
7636               /* While pipelining outer loops, skip bundling for loop
7637                  preheaders.  Those will be rescheduled in the outer
7638                  loop.  */
7639               if (sel_is_loop_preheader_p (bb))
7640                 {
7641                   clear_outdated_rtx_info (bb);
7642                   continue;
7643                 }
7644 
7645               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7646                 {
7647                   flist_tail_init (new_fences);
7648 
7649                   orig_max_seqno = init_seqno (blocks_to_reschedule, bb);
7650 
7651                   /* Mark BB as head of the new ebb.  */
7652                   bitmap_set_bit (forced_ebb_heads, bb->index);
7653 
7654                   gcc_assert (fences == NULL);
7655 
7656                   init_fences (bb_note (bb));
7657 
7658                   sel_sched_region_2 (orig_max_seqno);
7659 
7660                   do_p = true;
7661                   break;
7662                 }
7663             }
7664         }
7665     }
7666 }
7667 
7668 /* Schedule the RGN region.  */
7669 void
sel_sched_region(int rgn)7670 sel_sched_region (int rgn)
7671 {
7672   bool schedule_p;
7673   bool reset_sched_cycles_p;
7674 
7675   if (sel_region_init (rgn))
7676     return;
7677 
7678   if (sched_verbose >= 1)
7679     sel_print ("Scheduling region %d\n", rgn);
7680 
7681   schedule_p = (!sched_is_disabled_for_current_region_p ()
7682                 && dbg_cnt (sel_sched_region_cnt));
7683   reset_sched_cycles_p = pipelining_p;
7684   if (schedule_p)
7685     sel_sched_region_1 ();
7686   else
7687     /* Force initialization of INSN_SCHED_CYCLEs for correct bundling.  */
7688     reset_sched_cycles_p = true;
7689 
7690   sel_region_finish (reset_sched_cycles_p);
7691 }
7692 
7693 /* Perform global init for the scheduler.  */
7694 static void
sel_global_init(void)7695 sel_global_init (void)
7696 {
7697   calculate_dominance_info (CDI_DOMINATORS);
7698   alloc_sched_pools ();
7699 
7700   /* Setup the infos for sched_init.  */
7701   sel_setup_sched_infos ();
7702   setup_sched_dump ();
7703 
7704   sched_rgn_init (false);
7705   sched_init ();
7706 
7707   sched_init_bbs ();
7708   /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass.  */
7709   after_recovery = 0;
7710   can_issue_more = issue_rate;
7711 
7712   sched_extend_target ();
7713   sched_deps_init (true);
7714   setup_nop_and_exit_insns ();
7715   sel_extend_global_bb_info ();
7716   init_lv_sets ();
7717   init_hard_regs_data ();
7718 }
7719 
7720 /* Free the global data of the scheduler.  */
7721 static void
sel_global_finish(void)7722 sel_global_finish (void)
7723 {
7724   free_bb_note_pool ();
7725   free_lv_sets ();
7726   sel_finish_global_bb_info ();
7727 
7728   free_regset_pool ();
7729   free_nop_and_exit_insns ();
7730 
7731   sched_rgn_finish ();
7732   sched_deps_finish ();
7733   sched_finish ();
7734 
7735   if (current_loops)
7736     sel_finish_pipelining ();
7737 
7738   free_sched_pools ();
7739   free_dominance_info (CDI_DOMINATORS);
7740 }
7741 
7742 /* Return true when we need to skip selective scheduling.  Used for debugging.  */
7743 bool
maybe_skip_selective_scheduling(void)7744 maybe_skip_selective_scheduling (void)
7745 {
7746   return ! dbg_cnt (sel_sched_cnt);
7747 }
7748 
7749 /* The entry point.  */
7750 void
run_selective_scheduling(void)7751 run_selective_scheduling (void)
7752 {
7753   int rgn;
7754 
7755   if (n_basic_blocks == NUM_FIXED_BLOCKS)
7756     return;
7757 
7758   sel_global_init ();
7759 
7760   for (rgn = 0; rgn < nr_regions; rgn++)
7761     sel_sched_region (rgn);
7762 
7763   sel_global_finish ();
7764 }
7765 
7766 #endif
7767