1 /* Instruction scheduling pass.  Selective scheduler and pipeliner.
2    Copyright (C) 2006-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "df.h"
27 #include "memmodel.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "cfgbuild.h"
31 #include "cfgcleanup.h"
32 #include "insn-config.h"
33 #include "insn-attr.h"
34 #include "params.h"
35 #include "target.h"
36 #include "sched-int.h"
37 #include "rtlhooks-def.h"
38 #include "ira.h"
39 #include "ira-int.h"
40 #include "rtl-iter.h"
41 
42 #ifdef INSN_SCHEDULING
43 #include "regset.h"
44 #include "cfgloop.h"
45 #include "sel-sched-ir.h"
46 #include "sel-sched-dump.h"
47 #include "sel-sched.h"
48 #include "dbgcnt.h"
49 
50 /* Implementation of selective scheduling approach.
51    The below implementation follows the original approach with the following
52    changes:
53 
54    o the scheduler works after register allocation (but can be also tuned
55    to work before RA);
56    o some instructions are not copied or register renamed;
57    o conditional jumps are not moved with code duplication;
58    o several jumps in one parallel group are not supported;
59    o when pipelining outer loops, code motion through inner loops
60    is not supported;
61    o control and data speculation are supported;
62    o some improvements for better compile time/performance were made.
63 
64    Terminology
65    ===========
66 
67    A vinsn, or virtual insn, is an insn with additional data characterizing
68    insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
69    Vinsns also act as smart pointers to save memory by reusing them in
70    different expressions.  A vinsn is described by vinsn_t type.
71 
72    An expression is a vinsn with additional data characterizing its properties
73    at some point in the control flow graph.  The data may be its usefulness,
74    priority, speculative status, whether it was renamed/subsituted, etc.
75    An expression is described by expr_t type.
76 
77    Availability set (av_set) is a set of expressions at a given control flow
78    point. It is represented as av_set_t.  The expressions in av sets are kept
79    sorted in the terms of expr_greater_p function.  It allows to truncate
80    the set while leaving the best expressions.
81 
82    A fence is a point through which code motion is prohibited.  On each step,
83    we gather a parallel group of insns at a fence.  It is possible to have
84    multiple fences. A fence is represented via fence_t.
85 
86    A boundary is the border between the fence group and the rest of the code.
87    Currently, we never have more than one boundary per fence, as we finalize
88    the fence group when a jump is scheduled. A boundary is represented
89    via bnd_t.
90 
91    High-level overview
92    ===================
93 
94    The scheduler finds regions to schedule, schedules each one, and finalizes.
95    The regions are formed starting from innermost loops, so that when the inner
96    loop is pipelined, its prologue can be scheduled together with yet unprocessed
97    outer loop. The rest of acyclic regions are found using extend_rgns:
98    the blocks that are not yet allocated to any regions are traversed in top-down
99    order, and a block is added to a region to which all its predecessors belong;
100    otherwise, the block starts its own region.
101 
102    The main scheduling loop (sel_sched_region_2) consists of just
103    scheduling on each fence and updating fences.  For each fence,
104    we fill a parallel group of insns (fill_insns) until some insns can be added.
105    First, we compute available exprs (av-set) at the boundary of the current
106    group.  Second, we choose the best expression from it.  If the stall is
107    required to schedule any of the expressions, we advance the current cycle
108    appropriately.  So, the final group does not exactly correspond to a VLIW
109    word.  Third, we move the chosen expression to the boundary (move_op)
110    and update the intermediate av sets and liveness sets.  We quit fill_insns
111    when either no insns left for scheduling or we have scheduled enough insns
112    so we feel like advancing a scheduling point.
113 
114    Computing available expressions
115    ===============================
116 
117    The computation (compute_av_set) is a bottom-up traversal.  At each insn,
118    we're moving the union of its successors' sets through it via
119    moveup_expr_set.  The dependent expressions are removed.  Local
120    transformations (substitution, speculation) are applied to move more
121    exprs.  Then the expr corresponding to the current insn is added.
122    The result is saved on each basic block header.
123 
124    When traversing the CFG, we're moving down for no more than max_ws insns.
125    Also, we do not move down to ineligible successors (is_ineligible_successor),
126    which include moving along a back-edge, moving to already scheduled code,
127    and moving to another fence.  The first two restrictions are lifted during
128    pipelining, which allows us to move insns along a back-edge.  We always have
129    an acyclic region for scheduling because we forbid motion through fences.
130 
131    Choosing the best expression
132    ============================
133 
134    We sort the final availability set via sel_rank_for_schedule, then we remove
135    expressions which are not yet ready (tick_check_p) or which dest registers
136    cannot be used.  For some of them, we choose another register via
137    find_best_reg.  To do this, we run find_used_regs to calculate the set of
138    registers which cannot be used.  The find_used_regs function performs
139    a traversal of code motion paths for an expr.  We consider for renaming
140    only registers which are from the same regclass as the original one and
141    using which does not interfere with any live ranges.  Finally, we convert
142    the resulting set to the ready list format and use max_issue and reorder*
143    hooks similarly to the Haifa scheduler.
144 
145    Scheduling the best expression
146    ==============================
147 
148    We run the move_op routine to perform the same type of code motion paths
149    traversal as in find_used_regs.  (These are working via the same driver,
150    code_motion_path_driver.)  When moving down the CFG, we look for original
151    instruction that gave birth to a chosen expression.  We undo
152    the transformations performed on an expression via the history saved in it.
153    When found, we remove the instruction or leave a reg-reg copy/speculation
154    check if needed.  On a way up, we insert bookkeeping copies at each join
155    point.  If a copy is not needed, it will be removed later during this
156    traversal.  We update the saved av sets and liveness sets on the way up, too.
157 
158    Finalizing the schedule
159    =======================
160 
161    When pipelining, we reschedule the blocks from which insns were pipelined
162    to get a tighter schedule.  On Itanium, we also perform bundling via
163    the same routine from ia64.c.
164 
165    Dependence analysis changes
166    ===========================
167 
168    We augmented the sched-deps.c with hooks that get called when a particular
169    dependence is found in a particular part of an insn.  Using these hooks, we
170    can do several actions such as: determine whether an insn can be moved through
171    another (has_dependence_p, moveup_expr); find out whether an insn can be
172    scheduled on the current cycle (tick_check_p); find out registers that
173    are set/used/clobbered by an insn and find out all the strange stuff that
174    restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
175    init_global_and_expr_for_insn).
176 
177    Initialization changes
178    ======================
179 
180    There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
181    reused in all of the schedulers.  We have split up the initialization of data
182    of such parts into different functions prefixed with scheduler type and
183    postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
184    sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
185    The same splitting is done with current_sched_info structure:
186    dependence-related parts are in sched_deps_info, common part is in
187    common_sched_info, and haifa/sel/etc part is in current_sched_info.
188 
189    Target contexts
190    ===============
191 
192    As we now have multiple-point scheduling, this would not work with backends
193    which save some of the scheduler state to use it in the target hooks.
194    For this purpose, we introduce a concept of target contexts, which
195    encapsulate such information.  The backend should implement simple routines
196    of allocating/freeing/setting such a context.  The scheduler calls these
197    as target hooks and handles the target context as an opaque pointer (similar
198    to the DFA state type, state_t).
199 
200    Various speedups
201    ================
202 
203    As the correct data dependence graph is not supported during scheduling (which
204    is to be changed in mid-term), we cache as much of the dependence analysis
205    results as possible to avoid reanalyzing.  This includes: bitmap caches on
206    each insn in stream of the region saying yes/no for a query with a pair of
207    UIDs; hashtables with the previously done transformations on each insn in
208    stream; a vector keeping a history of transformations on each expr.
209 
210    Also, we try to minimize the dependence context used on each fence to check
211    whether the given expression is ready for scheduling by removing from it
212    insns that are definitely completed the execution.  The results of
213    tick_check_p checks are also cached in a vector on each fence.
214 
215    We keep a valid liveness set on each insn in a region to avoid the high
216    cost of recomputation on large basic blocks.
217 
218    Finally, we try to minimize the number of needed updates to the availability
219    sets.  The updates happen in two cases: when fill_insns terminates,
220    we advance all fences and increase the stage number to show that the region
221    has changed and the sets are to be recomputed; and when the next iteration
222    of a loop in fill_insns happens (but this one reuses the saved av sets
223    on bb headers.)  Thus, we try to break the fill_insns loop only when
224    "significant" number of insns from the current scheduling window was
225    scheduled.  This should be made a target param.
226 
227 
228    TODO: correctly support the data dependence graph at all stages and get rid
229    of all caches.  This should speed up the scheduler.
230    TODO: implement moving cond jumps with bookkeeping copies on both targets.
231    TODO: tune the scheduler before RA so it does not create too much pseudos.
232 
233 
234    References:
235    S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
236    selective scheduling and software pipelining.
237    ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
238 
239    Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
240    and Dmitry Zhurikhin.  An interblock VLIW-targeted instruction scheduler
241    for GCC. In Proceedings of GCC Developers' Summit 2006.
242 
243    Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik.  GCC Instruction
244    Scheduler and Software Pipeliner on the Itanium Platform.   EPIC-7 Workshop.
245    http://rogue.colorado.edu/EPIC7/.
246 
247 */
248 
249 /* True when pipelining is enabled.  */
250 bool pipelining_p;
251 
252 /* True if bookkeeping is enabled.  */
253 bool bookkeeping_p;
254 
255 /* Maximum number of insns that are eligible for renaming.  */
256 int max_insns_to_rename;
257 
258 
259 /* Definitions of local types and macros.  */
260 
261 /* Represents possible outcomes of moving an expression through an insn.  */
262 enum MOVEUP_EXPR_CODE
263   {
264     /* The expression is not changed.  */
265     MOVEUP_EXPR_SAME,
266 
267     /* Not changed, but requires a new destination register.  */
268     MOVEUP_EXPR_AS_RHS,
269 
270     /* Cannot be moved.  */
271     MOVEUP_EXPR_NULL,
272 
273     /* Changed (substituted or speculated).  */
274     MOVEUP_EXPR_CHANGED
275   };
276 
277 /* The container to be passed into rtx search & replace functions.  */
278 struct rtx_search_arg
279 {
280   /* What we are searching for.  */
281   rtx x;
282 
283   /* The occurrence counter.  */
284   int n;
285 };
286 
287 typedef struct rtx_search_arg *rtx_search_arg_p;
288 
289 /* This struct contains precomputed hard reg sets that are needed when
290    computing registers available for renaming.  */
291 struct hard_regs_data
292 {
293   /* For every mode, this stores registers available for use with
294      that mode.  */
295   HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
296 
297   /* True when regs_for_mode[mode] is initialized.  */
298   bool regs_for_mode_ok[NUM_MACHINE_MODES];
299 
300   /* For every register, it has regs that are ok to rename into it.
301      The register in question is always set.  If not, this means
302      that the whole set is not computed yet.  */
303   HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
304 
305   /* For every mode, this stores registers not available due to
306      call clobbering.  */
307   HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
308 
309   /* All registers that are used or call used.  */
310   HARD_REG_SET regs_ever_used;
311 
312 #ifdef STACK_REGS
313   /* Stack registers.  */
314   HARD_REG_SET stack_regs;
315 #endif
316 };
317 
318 /* Holds the results of computation of available for renaming and
319    unavailable hard registers.  */
320 struct reg_rename
321 {
322   /* These are unavailable due to calls crossing, globalness, etc.  */
323   HARD_REG_SET unavailable_hard_regs;
324 
325   /* These are *available* for renaming.  */
326   HARD_REG_SET available_for_renaming;
327 
328   /* Whether this code motion path crosses a call.  */
329   bool crosses_call;
330 };
331 
332 /* A global structure that contains the needed information about harg
333    regs.  */
334 static struct hard_regs_data sel_hrd;
335 
336 
337 /* This structure holds local data used in code_motion_path_driver hooks on
338    the same or adjacent levels of recursion.  Here we keep those parameters
339    that are not used in code_motion_path_driver routine itself, but only in
340    its hooks.  Moreover, all parameters that can be modified in hooks are
341    in this structure, so all other parameters passed explicitly to hooks are
342    read-only.  */
343 struct cmpd_local_params
344 {
345   /* Local params used in move_op_* functions.  */
346 
347   /* Edges for bookkeeping generation.  */
348   edge e1, e2;
349 
350   /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
351   expr_t c_expr_merged, c_expr_local;
352 
353   /* Local params used in fur_* functions.  */
354   /* Copy of the ORIGINAL_INSN list, stores the original insns already
355      found before entering the current level of code_motion_path_driver.  */
356   def_list_t old_original_insns;
357 
358   /* Local params used in move_op_* functions.  */
359   /* True when we have removed last insn in the block which was
360      also a boundary.  Do not update anything or create bookkeeping copies.  */
361   BOOL_BITFIELD removed_last_insn : 1;
362 };
363 
364 /* Stores the static parameters for move_op_* calls.  */
365 struct moveop_static_params
366 {
367   /* Destination register.  */
368   rtx dest;
369 
370   /* Current C_EXPR.  */
371   expr_t c_expr;
372 
373   /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
374      they are to be removed.  */
375   int uid;
376 
377   /* This is initialized to the insn on which the driver stopped its traversal.  */
378   insn_t failed_insn;
379 
380   /* True if we scheduled an insn with different register.  */
381   bool was_renamed;
382 };
383 
384 /* Stores the static parameters for fur_* calls.  */
385 struct fur_static_params
386 {
387   /* Set of registers unavailable on the code motion path.  */
388   regset used_regs;
389 
390   /* Pointer to the list of original insns definitions.  */
391   def_list_t *original_insns;
392 
393   /* True if a code motion path contains a CALL insn.  */
394   bool crosses_call;
395 };
396 
397 typedef struct fur_static_params *fur_static_params_p;
398 typedef struct cmpd_local_params *cmpd_local_params_p;
399 typedef struct moveop_static_params *moveop_static_params_p;
400 
401 /* Set of hooks and parameters that determine behavior specific to
402    move_op or find_used_regs functions.  */
403 struct code_motion_path_driver_info_def
404 {
405   /* Called on enter to the basic block.  */
406   int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool);
407 
408   /* Called when original expr is found.  */
409   void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *);
410 
411   /* Called while descending current basic block if current insn is not
412      the original EXPR we're searching for.  */
413   bool (*orig_expr_not_found) (insn_t, av_set_t, void *);
414 
415   /* Function to merge C_EXPRes from different successors.  */
416   void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *);
417 
418   /* Function to finalize merge from different successors and possibly
419      deallocate temporary data structures used for merging.  */
420   void (*after_merge_succs) (cmpd_local_params_p, void *);
421 
422   /* Called on the backward stage of recursion to do moveup_expr.
423      Used only with move_op_*.  */
424   void (*ascend) (insn_t, void *);
425 
426   /* Called on the ascending pass, before returning from the current basic
427      block or from the whole traversal.  */
428   void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
429 
430   /* When processing successors in move_op we need only descend into
431      SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL.  */
432   int succ_flags;
433 
434   /* The routine name to print in dumps ("move_op" of "find_used_regs").  */
435   const char *routine_name;
436 };
437 
438 /* Global pointer to current hooks, either points to MOVE_OP_HOOKS or
439    FUR_HOOKS.  */
440 struct code_motion_path_driver_info_def *code_motion_path_driver_info;
441 
442 /* Set of hooks for performing move_op and find_used_regs routines with
443    code_motion_path_driver.  */
444 extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
445 
446 /* True if/when we want to emulate Haifa scheduler in the common code.
447    This is used in sched_rgn_local_init and in various places in
448    sched-deps.c.  */
449 int sched_emulate_haifa_p;
450 
451 /* GLOBAL_LEVEL is used to discard information stored in basic block headers
452    av_sets.  Av_set of bb header is valid if its (bb header's) level is equal
453    to GLOBAL_LEVEL.  And invalid if lesser.  This is primarily used to advance
454    scheduling window.  */
455 int global_level;
456 
457 /* Current fences.  */
458 flist_t fences;
459 
460 /* True when separable insns should be scheduled as RHSes.  */
461 static bool enable_schedule_as_rhs_p;
462 
463 /* Used in verify_target_availability to assert that target reg is reported
464    unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
465    we haven't scheduled anything on the previous fence.
466    if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
467    have more conservative value than the one returned by the
468    find_used_regs, thus we shouldn't assert that these values are equal.  */
469 static bool scheduled_something_on_previous_fence;
470 
471 /* All newly emitted insns will have their uids greater than this value.  */
472 static int first_emitted_uid;
473 
474 /* Set of basic blocks that are forced to start new ebbs.  This is a subset
475    of all the ebb heads.  */
476 bitmap forced_ebb_heads;
477 
478 /* Blocks that need to be rescheduled after pipelining.  */
479 bitmap blocks_to_reschedule = NULL;
480 
481 /* True when the first lv set should be ignored when updating liveness.  */
482 static bool ignore_first = false;
483 
484 /* Number of insns max_issue has initialized data structures for.  */
485 static int max_issue_size = 0;
486 
487 /* Whether we can issue more instructions.  */
488 static int can_issue_more;
489 
490 /* Maximum software lookahead window size, reduced when rescheduling after
491    pipelining.  */
492 static int max_ws;
493 
494 /* Number of insns scheduled in current region.  */
495 static int num_insns_scheduled;
496 
497 /* A vector of expressions is used to be able to sort them.  */
498 static vec<expr_t> vec_av_set;
499 
500 /* A vector of vinsns is used to hold temporary lists of vinsns.  */
501 typedef vec<vinsn_t> vinsn_vec_t;
502 
503 /* This vector has the exprs which may still present in av_sets, but actually
504    can't be moved up due to bookkeeping created during code motion to another
505    fence.  See comment near the call to update_and_record_unavailable_insns
506    for the detailed explanations.  */
507 static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t ();
508 
509 /* This vector has vinsns which are scheduled with renaming on the first fence
510    and then seen on the second.  For expressions with such vinsns, target
511    availability information may be wrong.  */
512 static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t ();
513 
514 /* Vector to store temporary nops inserted in move_op to prevent removal
515    of empty bbs.  */
516 static vec<insn_t> vec_temp_moveop_nops;
517 
518 /* These bitmaps record original instructions scheduled on the current
519    iteration and bookkeeping copies created by them.  */
520 static bitmap current_originators = NULL;
521 static bitmap current_copies = NULL;
522 
523 /* This bitmap marks the blocks visited by code_motion_path_driver so we don't
524    visit them afterwards.  */
525 static bitmap code_motion_visited_blocks = NULL;
526 
527 /* Variables to accumulate different statistics.  */
528 
529 /* The number of bookkeeping copies created.  */
530 static int stat_bookkeeping_copies;
531 
532 /* The number of insns that required bookkeeiping for their scheduling.  */
533 static int stat_insns_needed_bookkeeping;
534 
535 /* The number of insns that got renamed.  */
536 static int stat_renamed_scheduled;
537 
538 /* The number of substitutions made during scheduling.  */
539 static int stat_substitutions_total;
540 
541 
542 /* Forward declarations of static functions.  */
543 static bool rtx_ok_for_substitution_p (rtx, rtx);
544 static int sel_rank_for_schedule (const void *, const void *);
545 static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
546 static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
547 
548 static rtx get_dest_from_orig_ops (av_set_t);
549 static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
550 static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
551                             def_list_t *);
552 static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
553 static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
554                                     cmpd_local_params_p, void *);
555 static void sel_sched_region_1 (void);
556 static void sel_sched_region_2 (int);
557 static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
558 
559 static void debug_state (state_t);
560 
561 
562 /* Functions that work with fences.  */
563 
564 /* Advance one cycle on FENCE.  */
565 static void
advance_one_cycle(fence_t fence)566 advance_one_cycle (fence_t fence)
567 {
568   unsigned i;
569   int cycle;
570   rtx_insn *insn;
571 
572   advance_state (FENCE_STATE (fence));
573   cycle = ++FENCE_CYCLE (fence);
574   FENCE_ISSUED_INSNS (fence) = 0;
575   FENCE_STARTS_CYCLE_P (fence) = 1;
576   can_issue_more = issue_rate;
577   FENCE_ISSUE_MORE (fence) = can_issue_more;
578 
579   for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); )
580     {
581       if (INSN_READY_CYCLE (insn) < cycle)
582         {
583           remove_from_deps (FENCE_DC (fence), insn);
584           FENCE_EXECUTING_INSNS (fence)->unordered_remove (i);
585           continue;
586         }
587       i++;
588     }
589   if (sched_verbose >= 2)
590     {
591       sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
592       debug_state (FENCE_STATE (fence));
593     }
594 }
595 
596 /* Returns true when SUCC in a fallthru bb of INSN, possibly
597    skipping empty basic blocks.  */
598 static bool
in_fallthru_bb_p(rtx_insn * insn,rtx succ)599 in_fallthru_bb_p (rtx_insn *insn, rtx succ)
600 {
601   basic_block bb = BLOCK_FOR_INSN (insn);
602   edge e;
603 
604   if (bb == BLOCK_FOR_INSN (succ))
605     return true;
606 
607   e = find_fallthru_edge_from (bb);
608   if (e)
609     bb = e->dest;
610   else
611     return false;
612 
613   while (sel_bb_empty_p (bb))
614     bb = bb->next_bb;
615 
616   return bb == BLOCK_FOR_INSN (succ);
617 }
618 
619 /* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
620    When a successor will continue a ebb, transfer all parameters of a fence
621    to the new fence.  ORIG_MAX_SEQNO is the maximal seqno before this round
622    of scheduling helping to distinguish between the old and the new code.  */
623 static void
extract_new_fences_from(flist_t old_fences,flist_tail_t new_fences,int orig_max_seqno)624 extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
625 			 int orig_max_seqno)
626 {
627   bool was_here_p = false;
628   insn_t insn = NULL;
629   insn_t succ;
630   succ_iterator si;
631   ilist_iterator ii;
632   fence_t fence = FLIST_FENCE (old_fences);
633   basic_block bb;
634 
635   /* Get the only element of FENCE_BNDS (fence).  */
636   FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))
637     {
638       gcc_assert (!was_here_p);
639       was_here_p = true;
640     }
641   gcc_assert (was_here_p && insn != NULL_RTX);
642 
643   /* When in the "middle" of the block, just move this fence
644      to the new list.  */
645   bb = BLOCK_FOR_INSN (insn);
646   if (! sel_bb_end_p (insn)
647       || (single_succ_p (bb)
648           && single_pred_p (single_succ (bb))))
649     {
650       insn_t succ;
651 
652       succ = (sel_bb_end_p (insn)
653               ? sel_bb_head (single_succ (bb))
654               : NEXT_INSN (insn));
655 
656       if (INSN_SEQNO (succ) > 0
657           && INSN_SEQNO (succ) <= orig_max_seqno
658           && INSN_SCHED_TIMES (succ) <= 0)
659         {
660           FENCE_INSN (fence) = succ;
661           move_fence_to_fences (old_fences, new_fences);
662 
663           if (sched_verbose >= 1)
664             sel_print ("Fence %d continues as %d[%d] (state continue)\n",
665                        INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
666         }
667       return;
668     }
669 
670   /* Otherwise copy fence's structures to (possibly) multiple successors.  */
671   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
672     {
673       int seqno = INSN_SEQNO (succ);
674 
675       if (seqno > 0 && seqno <= orig_max_seqno
676           && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
677         {
678           bool b = (in_same_ebb_p (insn, succ)
679                     || in_fallthru_bb_p (insn, succ));
680 
681           if (sched_verbose >= 1)
682             sel_print ("Fence %d continues as %d[%d] (state %s)\n",
683                        INSN_UID (insn), INSN_UID (succ),
684                        BLOCK_NUM (succ), b ? "continue" : "reset");
685 
686           if (b)
687             add_dirty_fence_to_fences (new_fences, succ, fence);
688           else
689             {
690               /* Mark block of the SUCC as head of the new ebb.  */
691               bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ));
692               add_clean_fence_to_fences (new_fences, succ, fence);
693             }
694         }
695     }
696 }
697 
698 
699 /* Functions to support substitution.  */
700 
701 /* Returns whether INSN with dependence status DS is eligible for
702    substitution, i.e. it's a copy operation x := y, and RHS that is
703    moved up through this insn should be substituted.  */
704 static bool
can_substitute_through_p(insn_t insn,ds_t ds)705 can_substitute_through_p (insn_t insn, ds_t ds)
706 {
707   /* We can substitute only true dependencies.  */
708   if ((ds & DEP_OUTPUT)
709       || (ds & DEP_ANTI)
710       || ! INSN_RHS (insn)
711       || ! INSN_LHS (insn))
712     return false;
713 
714   /* Now we just need to make sure the INSN_RHS consists of only one
715      simple REG rtx.  */
716   if (REG_P (INSN_LHS (insn))
717       && REG_P (INSN_RHS (insn)))
718     return true;
719   return false;
720 }
721 
722 /* Substitute all occurrences of INSN's destination in EXPR' vinsn with INSN's
723    source (if INSN is eligible for substitution).  Returns TRUE if
724    substitution was actually performed, FALSE otherwise.  Substitution might
725    be not performed because it's either EXPR' vinsn doesn't contain INSN's
726    destination or the resulting insn is invalid for the target machine.
727    When UNDO is true, perform unsubstitution instead (the difference is in
728    the part of rtx on which validate_replace_rtx is called).  */
729 static bool
substitute_reg_in_expr(expr_t expr,insn_t insn,bool undo)730 substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
731 {
732   rtx *where;
733   bool new_insn_valid;
734   vinsn_t *vi = &EXPR_VINSN (expr);
735   bool has_rhs = VINSN_RHS (*vi) != NULL;
736   rtx old, new_rtx;
737 
738   /* Do not try to replace in SET_DEST.  Although we'll choose new
739      register for the RHS, we don't want to change RHS' original reg.
740      If the insn is not SET, we may still be able to substitute something
741      in it, and if we're here (don't have deps), it doesn't write INSN's
742      dest.  */
743   where = (has_rhs
744 	   ? &VINSN_RHS (*vi)
745 	   : &PATTERN (VINSN_INSN_RTX (*vi)));
746   old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
747 
748   /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
749   if (rtx_ok_for_substitution_p (old, *where))
750     {
751       rtx_insn *new_insn;
752       rtx *where_replace;
753 
754       /* We should copy these rtxes before substitution.  */
755       new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
756       new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
757 
758       /* Where we'll replace.
759          WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
760 	 used instead of SET_SRC.  */
761       where_replace = (has_rhs
762 		       ? &SET_SRC (PATTERN (new_insn))
763 		       : &PATTERN (new_insn));
764 
765       new_insn_valid
766         = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
767                                                 new_insn);
768 
769       /* ??? Actually, constrain_operands result depends upon choice of
770          destination register.  E.g. if we allow single register to be an rhs,
771 	 and if we try to move dx=ax(as rhs) through ax=dx, we'll result
772 	 in invalid insn dx=dx, so we'll loose this rhs here.
773 	 Just can't come up with significant testcase for this, so just
774 	 leaving it for now.  */
775       if (new_insn_valid)
776 	{
777 	  change_vinsn_in_expr (expr,
778 				create_vinsn_from_insn_rtx (new_insn, false));
779 
780 	  /* Do not allow clobbering the address register of speculative
781              insns.  */
782 	  if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
783               && register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
784 					 expr_dest_reg (expr)))
785 	    EXPR_TARGET_AVAILABLE (expr) = false;
786 
787 	  return true;
788 	}
789       else
790         return false;
791     }
792   else
793     return false;
794 }
795 
796 /* Return the number of places WHAT appears within WHERE.
797    Bail out when we found a reference occupying several hard registers.  */
798 static int
count_occurrences_equiv(const_rtx what,const_rtx where)799 count_occurrences_equiv (const_rtx what, const_rtx where)
800 {
801   int count = 0;
802   subrtx_iterator::array_type array;
803   FOR_EACH_SUBRTX (iter, array, where, NONCONST)
804     {
805       const_rtx x = *iter;
806       if (REG_P (x) && REGNO (x) == REGNO (what))
807 	{
808 	  /* Bail out if mode is different or more than one register is
809 	     used.  */
810 	  if (GET_MODE (x) != GET_MODE (what) || REG_NREGS (x) > 1)
811 	    return 0;
812 	  count += 1;
813 	}
814       else if (GET_CODE (x) == SUBREG
815 	       && (!REG_P (SUBREG_REG (x))
816 		   || REGNO (SUBREG_REG (x)) == REGNO (what)))
817 	/* ??? Do not support substituting regs inside subregs.  In that case,
818 	   simplify_subreg will be called by validate_replace_rtx, and
819 	   unsubstitution will fail later.  */
820 	return 0;
821     }
822   return count;
823 }
824 
825 /* Returns TRUE if WHAT is found in WHERE rtx tree.  */
826 static bool
rtx_ok_for_substitution_p(rtx what,rtx where)827 rtx_ok_for_substitution_p (rtx what, rtx where)
828 {
829   return (count_occurrences_equiv (what, where) > 0);
830 }
831 
832 
833 /* Functions to support register renaming.  */
834 
835 /* Substitute VI's set source with REGNO.  Returns newly created pattern
836    that has REGNO as its source.  */
837 static rtx_insn *
create_insn_rtx_with_rhs(vinsn_t vi,rtx rhs_rtx)838 create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
839 {
840   rtx lhs_rtx;
841   rtx pattern;
842   rtx_insn *insn_rtx;
843 
844   lhs_rtx = copy_rtx (VINSN_LHS (vi));
845 
846   pattern = gen_rtx_SET (lhs_rtx, rhs_rtx);
847   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
848 
849   return insn_rtx;
850 }
851 
852 /* Returns whether INSN's src can be replaced with register number
853    NEW_SRC_REG. E.g. the following insn is valid for i386:
854 
855     (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
856       (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
857 			(reg:SI 0 ax [orig:770 c1 ] [770]))
858 		    (const_int 288 [0x120])) [0 str S1 A8])
859 	    (const_int 0 [0x0])) 43 {*movqi_1} (nil)
860 	(nil))
861 
862   But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
863   because of operand constraints:
864 
865     (define_insn "*movqi_1"
866       [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
867 	    (match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn")
868 	    )]
869 
870   So do constrain_operands here, before choosing NEW_SRC_REG as best
871   reg for rhs.  */
872 
873 static bool
replace_src_with_reg_ok_p(insn_t insn,rtx new_src_reg)874 replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg)
875 {
876   vinsn_t vi = INSN_VINSN (insn);
877   machine_mode mode;
878   rtx dst_loc;
879   bool res;
880 
881   gcc_assert (VINSN_SEPARABLE_P (vi));
882 
883   get_dest_and_mode (insn, &dst_loc, &mode);
884   gcc_assert (mode == GET_MODE (new_src_reg));
885 
886   if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc))
887     return true;
888 
889   /* See whether SET_SRC can be replaced with this register.  */
890   validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1);
891   res = verify_changes (0);
892   cancel_changes (0);
893 
894   return res;
895 }
896 
897 /* Returns whether INSN still be valid after replacing it's DEST with
898    register NEW_REG.  */
899 static bool
replace_dest_with_reg_ok_p(insn_t insn,rtx new_reg)900 replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg)
901 {
902   vinsn_t vi = INSN_VINSN (insn);
903   bool res;
904 
905   /* We should deal here only with separable insns.  */
906   gcc_assert (VINSN_SEPARABLE_P (vi));
907   gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg));
908 
909   /* See whether SET_DEST can be replaced with this register.  */
910   validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1);
911   res = verify_changes (0);
912   cancel_changes (0);
913 
914   return res;
915 }
916 
917 /* Create a pattern with rhs of VI and lhs of LHS_RTX.  */
918 static rtx_insn *
create_insn_rtx_with_lhs(vinsn_t vi,rtx lhs_rtx)919 create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
920 {
921   rtx rhs_rtx;
922   rtx pattern;
923   rtx_insn *insn_rtx;
924 
925   rhs_rtx = copy_rtx (VINSN_RHS (vi));
926 
927   pattern = gen_rtx_SET (lhs_rtx, rhs_rtx);
928   insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
929 
930   return insn_rtx;
931 }
932 
933 /* Substitute lhs in the given expression EXPR for the register with number
934    NEW_REGNO.  SET_DEST may be arbitrary rtx, not only register.  */
935 static void
replace_dest_with_reg_in_expr(expr_t expr,rtx new_reg)936 replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
937 {
938   rtx_insn *insn_rtx;
939   vinsn_t vinsn;
940 
941   insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
942   vinsn = create_vinsn_from_insn_rtx (insn_rtx, false);
943 
944   change_vinsn_in_expr (expr, vinsn);
945   EXPR_WAS_RENAMED (expr) = 1;
946   EXPR_TARGET_AVAILABLE (expr) = 1;
947 }
948 
949 /* Returns whether VI writes either one of the USED_REGS registers or,
950    if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers.  */
951 static bool
vinsn_writes_one_of_regs_p(vinsn_t vi,regset used_regs,HARD_REG_SET unavailable_hard_regs)952 vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
953                             HARD_REG_SET unavailable_hard_regs)
954 {
955   unsigned regno;
956   reg_set_iterator rsi;
957 
958   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)
959     {
960       if (REGNO_REG_SET_P (used_regs, regno))
961         return true;
962       if (HARD_REGISTER_NUM_P (regno)
963           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
964 	return true;
965     }
966 
967   EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)
968     {
969       if (REGNO_REG_SET_P (used_regs, regno))
970         return true;
971       if (HARD_REGISTER_NUM_P (regno)
972           && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
973 	return true;
974     }
975 
976   return false;
977 }
978 
979 /* Returns register class of the output register in INSN.
980    Returns NO_REGS for call insns because some targets have constraints on
981    destination register of a call insn.
982 
983    Code adopted from regrename.c::build_def_use.  */
984 static enum reg_class
get_reg_class(rtx_insn * insn)985 get_reg_class (rtx_insn *insn)
986 {
987   int i, n_ops;
988 
989   extract_constrain_insn (insn);
990   preprocess_constraints (insn);
991   n_ops = recog_data.n_operands;
992 
993   const operand_alternative *op_alt = which_op_alt ();
994   if (asm_noperands (PATTERN (insn)) > 0)
995     {
996       for (i = 0; i < n_ops; i++)
997 	if (recog_data.operand_type[i] == OP_OUT)
998 	  {
999 	    rtx *loc = recog_data.operand_loc[i];
1000 	    rtx op = *loc;
1001 	    enum reg_class cl = alternative_class (op_alt, i);
1002 
1003 	    if (REG_P (op)
1004 		&& REGNO (op) == ORIGINAL_REGNO (op))
1005 	      continue;
1006 
1007 	    return cl;
1008 	  }
1009     }
1010   else if (!CALL_P (insn))
1011     {
1012       for (i = 0; i < n_ops + recog_data.n_dups; i++)
1013        {
1014 	 int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
1015 	 enum reg_class cl = alternative_class (op_alt, opn);
1016 
1017 	 if (recog_data.operand_type[opn] == OP_OUT ||
1018 	     recog_data.operand_type[opn] == OP_INOUT)
1019 	   return cl;
1020        }
1021     }
1022 
1023 /*  Insns like
1024     (insn (set (reg:CCZ 17 flags) (compare:CCZ ...)))
1025     may result in returning NO_REGS, cause flags is written implicitly through
1026     CMP insn, which has no OP_OUT | OP_INOUT operands.  */
1027   return NO_REGS;
1028 }
1029 
1030 /* Calculate HARD_REGNO_RENAME_OK data for REGNO.  */
1031 static void
init_hard_regno_rename(int regno)1032 init_hard_regno_rename (int regno)
1033 {
1034   int cur_reg;
1035 
1036   SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno);
1037 
1038   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1039     {
1040       /* We are not interested in renaming in other regs.  */
1041       if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg))
1042         continue;
1043 
1044       if (HARD_REGNO_RENAME_OK (regno, cur_reg))
1045         SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg);
1046     }
1047 }
1048 
1049 /* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
1050    data first.  */
1051 static inline bool
sel_hard_regno_rename_ok(int from ATTRIBUTE_UNUSED,int to ATTRIBUTE_UNUSED)1052 sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
1053 {
1054   /* Check whether this is all calculated.  */
1055   if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from))
1056     return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1057 
1058   init_hard_regno_rename (from);
1059 
1060   return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1061 }
1062 
1063 /* Calculate set of registers that are capable of holding MODE.  */
1064 static void
init_regs_for_mode(machine_mode mode)1065 init_regs_for_mode (machine_mode mode)
1066 {
1067   int cur_reg;
1068 
1069   CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
1070   CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
1071 
1072   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1073     {
1074       int nregs;
1075       int i;
1076 
1077       /* See whether it accepts all modes that occur in
1078          original insns.  */
1079       if (!targetm.hard_regno_mode_ok (cur_reg, mode))
1080         continue;
1081 
1082       nregs = hard_regno_nregs (cur_reg, mode);
1083 
1084       for (i = nregs - 1; i >= 0; --i)
1085         if (fixed_regs[cur_reg + i]
1086                 || global_regs[cur_reg + i]
1087             /* Can't use regs which aren't saved by
1088                the prologue.  */
1089             || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
1090 	    /* Can't use regs with non-null REG_BASE_VALUE, because adjusting
1091 	       it affects aliasing globally and invalidates all AV sets.  */
1092 	    || get_reg_base_value (cur_reg + i)
1093 #ifdef LEAF_REGISTERS
1094             /* We can't use a non-leaf register if we're in a
1095                leaf function.  */
1096             || (crtl->is_leaf
1097                 && !LEAF_REGISTERS[cur_reg + i])
1098 #endif
1099             )
1100           break;
1101 
1102       if (i >= 0)
1103         continue;
1104 
1105       if (targetm.hard_regno_call_part_clobbered (NULL, cur_reg, mode))
1106         SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
1107                           cur_reg);
1108 
1109       /* If the CUR_REG passed all the checks above,
1110          then it's ok.  */
1111       SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
1112     }
1113 
1114   sel_hrd.regs_for_mode_ok[mode] = true;
1115 }
1116 
1117 /* Init all register sets gathered in HRD.  */
1118 static void
init_hard_regs_data(void)1119 init_hard_regs_data (void)
1120 {
1121   int cur_reg = 0;
1122   int cur_mode = 0;
1123 
1124   CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
1125   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1126     if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
1127       SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
1128 
1129   /* Initialize registers that are valid based on mode when this is
1130      really needed.  */
1131   for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
1132     sel_hrd.regs_for_mode_ok[cur_mode] = false;
1133 
1134   /* Mark that all HARD_REGNO_RENAME_OK is not calculated.  */
1135   for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1136     CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
1137 
1138 #ifdef STACK_REGS
1139   CLEAR_HARD_REG_SET (sel_hrd.stack_regs);
1140 
1141   for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
1142     SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
1143 #endif
1144 }
1145 
1146 /* Mark hardware regs in REG_RENAME_P that are not suitable
1147    for renaming rhs in INSN due to hardware restrictions (register class,
1148    modes compatibility etc).  This doesn't affect original insn's dest reg,
1149    if it isn't in USED_REGS.  DEF is a definition insn of rhs for which the
1150    destination register is sought.  LHS (DEF->ORIG_INSN) may be REG or MEM.
1151    Registers that are in used_regs are always marked in
1152    unavailable_hard_regs as well.  */
1153 
1154 static void
mark_unavailable_hard_regs(def_t def,struct reg_rename * reg_rename_p,regset used_regs ATTRIBUTE_UNUSED)1155 mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
1156                             regset used_regs ATTRIBUTE_UNUSED)
1157 {
1158   machine_mode mode;
1159   enum reg_class cl = NO_REGS;
1160   rtx orig_dest;
1161   unsigned cur_reg, regno;
1162   hard_reg_set_iterator hrsi;
1163 
1164   gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET);
1165   gcc_assert (reg_rename_p);
1166 
1167   orig_dest = SET_DEST (PATTERN (def->orig_insn));
1168 
1169   /* We have decided not to rename 'mem = something;' insns, as 'something'
1170      is usually a register.  */
1171   if (!REG_P (orig_dest))
1172     return;
1173 
1174   regno = REGNO (orig_dest);
1175 
1176   /* If before reload, don't try to work with pseudos.  */
1177   if (!reload_completed && !HARD_REGISTER_NUM_P (regno))
1178     return;
1179 
1180   if (reload_completed)
1181     cl = get_reg_class (def->orig_insn);
1182 
1183   /* Stop if the original register is one of the fixed_regs, global_regs or
1184      frame pointer, or we could not discover its class.  */
1185   if (fixed_regs[regno]
1186       || global_regs[regno]
1187       || (!HARD_FRAME_POINTER_IS_FRAME_POINTER && frame_pointer_needed
1188 	  && regno == HARD_FRAME_POINTER_REGNUM)
1189       || (HARD_FRAME_POINTER_IS_FRAME_POINTER && frame_pointer_needed
1190 	  && regno == FRAME_POINTER_REGNUM)
1191       || (reload_completed && cl == NO_REGS))
1192     {
1193       SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
1194 
1195       /* Give a chance for original register, if it isn't in used_regs.  */
1196       if (!def->crosses_call)
1197         CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
1198 
1199       return;
1200     }
1201 
1202   /* If something allocated on stack in this function, mark frame pointer
1203      register unavailable, considering also modes.
1204      FIXME: it is enough to do this once per all original defs.  */
1205   if (frame_pointer_needed)
1206     {
1207       add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1208 			   Pmode, FRAME_POINTER_REGNUM);
1209 
1210       if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
1211         add_to_hard_reg_set (&reg_rename_p->unavailable_hard_regs,
1212 			     Pmode, HARD_FRAME_POINTER_REGNUM);
1213     }
1214 
1215 #ifdef STACK_REGS
1216   /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
1217      is equivalent to as if all stack regs were in this set.
1218      I.e. no stack register can be renamed, and even if it's an original
1219      register here we make sure it won't be lifted over it's previous def
1220      (it's previous def will appear as if it's a FIRST_STACK_REG def.
1221      The HARD_REGNO_RENAME_OK covers other cases in condition below.  */
1222   if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
1223       && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
1224     IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1225                       sel_hrd.stack_regs);
1226 #endif
1227 
1228   /* If there's a call on this path, make regs from call_used_reg_set
1229      unavailable.  */
1230   if (def->crosses_call)
1231     IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1232                       call_used_reg_set);
1233 
1234   /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
1235      but not register classes.  */
1236   if (!reload_completed)
1237     return;
1238 
1239   /* Leave regs as 'available' only from the current
1240      register class.  */
1241   COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
1242                      reg_class_contents[cl]);
1243 
1244   mode = GET_MODE (orig_dest);
1245 
1246   /* Leave only registers available for this mode.  */
1247   if (!sel_hrd.regs_for_mode_ok[mode])
1248     init_regs_for_mode (mode);
1249   AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
1250                     sel_hrd.regs_for_mode[mode]);
1251 
1252   /* Exclude registers that are partially call clobbered.  */
1253   if (def->crosses_call
1254       && !targetm.hard_regno_call_part_clobbered (NULL, regno, mode))
1255     AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1256                             sel_hrd.regs_for_call_clobbered[mode]);
1257 
1258   /* Leave only those that are ok to rename.  */
1259   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1260                                   0, cur_reg, hrsi)
1261     {
1262       int nregs;
1263       int i;
1264 
1265       nregs = hard_regno_nregs (cur_reg, mode);
1266       gcc_assert (nregs > 0);
1267 
1268       for (i = nregs - 1; i >= 0; --i)
1269         if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
1270           break;
1271 
1272       if (i >= 0)
1273         CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1274                             cur_reg);
1275     }
1276 
1277   AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1278                           reg_rename_p->unavailable_hard_regs);
1279 
1280   /* Regno is always ok from the renaming part of view, but it really
1281      could be in *unavailable_hard_regs already, so set it here instead
1282      of there.  */
1283   SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno);
1284 }
1285 
1286 /* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the
1287    best register more recently than REG2.  */
1288 static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
1289 
1290 /* Indicates the number of times renaming happened before the current one.  */
1291 static int reg_rename_this_tick;
1292 
1293 /* Choose the register among free, that is suitable for storing
1294    the rhs value.
1295 
1296    ORIGINAL_INSNS is the list of insns where the operation (rhs)
1297    originally appears.  There could be multiple original operations
1298    for single rhs since we moving it up and merging along different
1299    paths.
1300 
1301    Some code is adapted from regrename.c (regrename_optimize).
1302    If original register is available, function returns it.
1303    Otherwise it performs the checks, so the new register should
1304    comply with the following:
1305     - it should not violate any live ranges (such registers are in
1306       REG_RENAME_P->available_for_renaming set);
1307     - it should not be in the HARD_REGS_USED regset;
1308     - it should be in the class compatible with original uses;
1309     - it should not be clobbered through reference with different mode;
1310     - if we're in the leaf function, then the new register should
1311       not be in the LEAF_REGISTERS;
1312     - etc.
1313 
1314    If several registers meet the conditions, the register with smallest
1315    tick is returned to achieve more even register allocation.
1316 
1317    If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true.
1318 
1319    If no register satisfies the above conditions, NULL_RTX is returned.  */
1320 static rtx
choose_best_reg_1(HARD_REG_SET hard_regs_used,struct reg_rename * reg_rename_p,def_list_t original_insns,bool * is_orig_reg_p_ptr)1321 choose_best_reg_1 (HARD_REG_SET hard_regs_used,
1322                    struct reg_rename *reg_rename_p,
1323                    def_list_t original_insns, bool *is_orig_reg_p_ptr)
1324 {
1325   int best_new_reg;
1326   unsigned cur_reg;
1327   machine_mode mode = VOIDmode;
1328   unsigned regno, i, n;
1329   hard_reg_set_iterator hrsi;
1330   def_list_iterator di;
1331   def_t def;
1332 
1333   /* If original register is available, return it.  */
1334   *is_orig_reg_p_ptr = true;
1335 
1336   FOR_EACH_DEF (def, di, original_insns)
1337     {
1338       rtx orig_dest = SET_DEST (PATTERN (def->orig_insn));
1339 
1340       gcc_assert (REG_P (orig_dest));
1341 
1342       /* Check that all original operations have the same mode.
1343          This is done for the next loop; if we'd return from this
1344          loop, we'd check only part of them, but in this case
1345          it doesn't matter.  */
1346       if (mode == VOIDmode)
1347         mode = GET_MODE (orig_dest);
1348       gcc_assert (mode == GET_MODE (orig_dest));
1349 
1350       regno = REGNO (orig_dest);
1351       for (i = 0, n = REG_NREGS (orig_dest); i < n; i++)
1352         if (TEST_HARD_REG_BIT (hard_regs_used, regno + i))
1353           break;
1354 
1355       /* All hard registers are available.  */
1356       if (i == n)
1357         {
1358           gcc_assert (mode != VOIDmode);
1359 
1360           /* Hard registers should not be shared.  */
1361           return gen_rtx_REG (mode, regno);
1362         }
1363     }
1364 
1365   *is_orig_reg_p_ptr = false;
1366   best_new_reg = -1;
1367 
1368   /* Among all available regs choose the register that was
1369      allocated earliest.  */
1370   EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1371                                   0, cur_reg, hrsi)
1372     if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
1373       {
1374 	/* Check that all hard regs for mode are available.  */
1375 	for (i = 1, n = hard_regno_nregs (cur_reg, mode); i < n; i++)
1376 	  if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
1377 	      || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1378 				     cur_reg + i))
1379 	    break;
1380 
1381 	if (i < n)
1382 	  continue;
1383 
1384         /* All hard registers are available.  */
1385         if (best_new_reg < 0
1386             || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
1387           {
1388             best_new_reg = cur_reg;
1389 
1390             /* Return immediately when we know there's no better reg.  */
1391             if (! reg_rename_tick[best_new_reg])
1392               break;
1393           }
1394       }
1395 
1396   if (best_new_reg >= 0)
1397     {
1398       /* Use the check from the above loop.  */
1399       gcc_assert (mode != VOIDmode);
1400       return gen_rtx_REG (mode, best_new_reg);
1401     }
1402 
1403   return NULL_RTX;
1404 }
1405 
1406 /* A wrapper around choose_best_reg_1 () to verify that we make correct
1407    assumptions about available registers in the function.  */
1408 static rtx
choose_best_reg(HARD_REG_SET hard_regs_used,struct reg_rename * reg_rename_p,def_list_t original_insns,bool * is_orig_reg_p_ptr)1409 choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
1410                  def_list_t original_insns, bool *is_orig_reg_p_ptr)
1411 {
1412   rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
1413                                     original_insns, is_orig_reg_p_ptr);
1414 
1415   /* FIXME loop over hard_regno_nregs here.  */
1416   gcc_assert (best_reg == NULL_RTX
1417 	      || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)));
1418 
1419   return best_reg;
1420 }
1421 
1422 /* Choose the pseudo register for storing rhs value.  As this is supposed
1423    to work before reload, we return either the original register or make
1424    the new one.  The parameters are the same that in choose_nest_reg_1
1425    functions, except that USED_REGS may contain pseudos.
1426    If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
1427 
1428    TODO: take into account register pressure while doing this.  Up to this
1429    moment, this function would never return NULL for pseudos, but we should
1430    not rely on this.  */
1431 static rtx
choose_best_pseudo_reg(regset used_regs,struct reg_rename * reg_rename_p,def_list_t original_insns,bool * is_orig_reg_p_ptr)1432 choose_best_pseudo_reg (regset used_regs,
1433                         struct reg_rename *reg_rename_p,
1434                         def_list_t original_insns, bool *is_orig_reg_p_ptr)
1435 {
1436   def_list_iterator i;
1437   def_t def;
1438   machine_mode mode = VOIDmode;
1439   bool bad_hard_regs = false;
1440 
1441   /* We should not use this after reload.  */
1442   gcc_assert (!reload_completed);
1443 
1444   /* If original register is available, return it.  */
1445   *is_orig_reg_p_ptr = true;
1446 
1447   FOR_EACH_DEF (def, i, original_insns)
1448     {
1449       rtx dest = SET_DEST (PATTERN (def->orig_insn));
1450       int orig_regno;
1451 
1452       gcc_assert (REG_P (dest));
1453 
1454       /* Check that all original operations have the same mode.  */
1455       if (mode == VOIDmode)
1456         mode = GET_MODE (dest);
1457       else
1458         gcc_assert (mode == GET_MODE (dest));
1459       orig_regno = REGNO (dest);
1460 
1461       /* Check that nothing in used_regs intersects with orig_regno.  When
1462 	 we have a hard reg here, still loop over hard_regno_nregs.  */
1463       if (HARD_REGISTER_NUM_P (orig_regno))
1464 	{
1465 	  int j, n;
1466 	  for (j = 0, n = REG_NREGS (dest); j < n; j++)
1467 	    if (REGNO_REG_SET_P (used_regs, orig_regno + j))
1468 	      break;
1469 	  if (j < n)
1470 	    continue;
1471 	}
1472       else
1473 	{
1474 	  if (REGNO_REG_SET_P (used_regs, orig_regno))
1475 	    continue;
1476 	}
1477       if (HARD_REGISTER_NUM_P (orig_regno))
1478 	{
1479 	  gcc_assert (df_regs_ever_live_p (orig_regno));
1480 
1481 	  /* For hard registers, we have to check hardware imposed
1482 	     limitations (frame/stack registers, calls crossed).  */
1483 	  if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1484 				  orig_regno))
1485 	    {
1486 	      /* Don't let register cross a call if it doesn't already
1487 		 cross one.  This condition is written in accordance with
1488 		 that in sched-deps.c sched_analyze_reg().  */
1489 	      if (!reg_rename_p->crosses_call
1490 		  || REG_N_CALLS_CROSSED (orig_regno) > 0)
1491 		return gen_rtx_REG (mode, orig_regno);
1492 	    }
1493 
1494 	  bad_hard_regs = true;
1495 	}
1496       else
1497 	return dest;
1498     }
1499 
1500   *is_orig_reg_p_ptr = false;
1501 
1502   /* We had some original hard registers that couldn't be used.
1503      Those were likely special.  Don't try to create a pseudo.  */
1504   if (bad_hard_regs)
1505     return NULL_RTX;
1506 
1507   /* We haven't found a register from original operations.  Get a new one.
1508      FIXME: control register pressure somehow.  */
1509   {
1510     rtx new_reg = gen_reg_rtx (mode);
1511 
1512     gcc_assert (mode != VOIDmode);
1513 
1514     max_regno = max_reg_num ();
1515     maybe_extend_reg_info_p ();
1516     REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
1517 
1518     return new_reg;
1519   }
1520 }
1521 
1522 /* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
1523    USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS.  */
1524 static void
verify_target_availability(expr_t expr,regset used_regs,struct reg_rename * reg_rename_p)1525 verify_target_availability (expr_t expr, regset used_regs,
1526 			    struct reg_rename *reg_rename_p)
1527 {
1528   unsigned n, i, regno;
1529   machine_mode mode;
1530   bool target_available, live_available, hard_available;
1531 
1532   if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
1533     return;
1534 
1535   regno = expr_dest_regno (expr);
1536   mode = GET_MODE (EXPR_LHS (expr));
1537   target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
1538   n = HARD_REGISTER_NUM_P (regno) ? hard_regno_nregs (regno, mode) : 1;
1539 
1540   live_available = hard_available = true;
1541   for (i = 0; i < n; i++)
1542     {
1543       if (bitmap_bit_p (used_regs, regno + i))
1544         live_available = false;
1545       if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
1546         hard_available = false;
1547     }
1548 
1549   /* When target is not available, it may be due to hard register
1550      restrictions, e.g. crosses calls, so we check hard_available too.  */
1551   if (target_available)
1552     gcc_assert (live_available);
1553   else
1554     /* Check only if we haven't scheduled something on the previous fence,
1555        cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
1556        and having more than one fence, we may end having targ_un in a block
1557        in which successors target register is actually available.
1558 
1559        The last condition handles the case when a dependence from a call insn
1560        was created in sched-deps.c for insns with destination registers that
1561        never crossed a call before, but do cross one after our code motion.
1562 
1563        FIXME: in the latter case, we just uselessly called find_used_regs,
1564        because we can't move this expression with any other register
1565        as well.  */
1566     gcc_assert (scheduled_something_on_previous_fence || !live_available
1567 		|| !hard_available
1568 		|| (!reload_completed && reg_rename_p->crosses_call
1569 		    && REG_N_CALLS_CROSSED (regno) == 0));
1570 }
1571 
1572 /* Collect unavailable registers due to liveness for EXPR from BNDS
1573    into USED_REGS.  Save additional information about available
1574    registers and unavailable due to hardware restriction registers
1575    into REG_RENAME_P structure.  Save original insns into ORIGINAL_INSNS
1576    list.  */
1577 static void
collect_unavailable_regs_from_bnds(expr_t expr,blist_t bnds,regset used_regs,struct reg_rename * reg_rename_p,def_list_t * original_insns)1578 collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs,
1579 				    struct reg_rename *reg_rename_p,
1580 				    def_list_t *original_insns)
1581 {
1582   for (; bnds; bnds = BLIST_NEXT (bnds))
1583     {
1584       bool res;
1585       av_set_t orig_ops = NULL;
1586       bnd_t bnd = BLIST_BND (bnds);
1587 
1588       /* If the chosen best expr doesn't belong to current boundary,
1589 	 skip it.  */
1590       if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr)))
1591 	continue;
1592 
1593       /* Put in ORIG_OPS all exprs from this boundary that became
1594 	 RES on top.  */
1595       orig_ops = find_sequential_best_exprs (bnd, expr, false);
1596 
1597       /* Compute used regs and OR it into the USED_REGS.  */
1598       res = find_used_regs (BND_TO (bnd), orig_ops, used_regs,
1599 			    reg_rename_p, original_insns);
1600 
1601       /* FIXME: the assert is true until we'd have several boundaries.  */
1602       gcc_assert (res);
1603       av_set_clear (&orig_ops);
1604     }
1605 }
1606 
1607 /* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG.
1608    If BEST_REG is valid, replace LHS of EXPR with it.  */
1609 static bool
try_replace_dest_reg(ilist_t orig_insns,rtx best_reg,expr_t expr)1610 try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
1611 {
1612   /* Try whether we'll be able to generate the insn
1613      'dest := best_reg' at the place of the original operation.  */
1614   for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns))
1615     {
1616       insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn;
1617 
1618       gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)));
1619 
1620       if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn))
1621 	  && (! replace_src_with_reg_ok_p (orig_insn, best_reg)
1622 	      || ! replace_dest_with_reg_ok_p (orig_insn, best_reg)))
1623 	return false;
1624     }
1625 
1626   /* Make sure that EXPR has the right destination
1627      register.  */
1628   if (expr_dest_regno (expr) != REGNO (best_reg))
1629     replace_dest_with_reg_in_expr (expr, best_reg);
1630   else
1631     EXPR_TARGET_AVAILABLE (expr) = 1;
1632 
1633   return true;
1634 }
1635 
1636 /* Select and assign best register to EXPR searching from BNDS.
1637    Set *IS_ORIG_REG_P to TRUE if original register was selected.
1638    Return FALSE if no register can be chosen, which could happen when:
1639    * EXPR_SEPARABLE_P is true but we were unable to find suitable register;
1640    * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
1641      that are used on the moving path.  */
1642 static bool
find_best_reg_for_expr(expr_t expr,blist_t bnds,bool * is_orig_reg_p)1643 find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
1644 {
1645   static struct reg_rename reg_rename_data;
1646 
1647   regset used_regs;
1648   def_list_t original_insns = NULL;
1649   bool reg_ok;
1650 
1651   *is_orig_reg_p = false;
1652 
1653   /* Don't bother to do anything if this insn doesn't set any registers.  */
1654   if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr)))
1655       && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))))
1656     return true;
1657 
1658   used_regs = get_clear_regset_from_pool ();
1659   CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs);
1660 
1661   collect_unavailable_regs_from_bnds (expr, bnds, used_regs, &reg_rename_data,
1662 				      &original_insns);
1663 
1664   /* If after reload, make sure we're working with hard regs here.  */
1665   if (flag_checking && reload_completed)
1666     {
1667       reg_set_iterator rsi;
1668       unsigned i;
1669 
1670       EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
1671         gcc_unreachable ();
1672     }
1673 
1674   if (EXPR_SEPARABLE_P (expr))
1675     {
1676       rtx best_reg = NULL_RTX;
1677       /* Check that we have computed availability of a target register
1678 	 correctly.  */
1679       verify_target_availability (expr, used_regs, &reg_rename_data);
1680 
1681       /* Turn everything in hard regs after reload.  */
1682       if (reload_completed)
1683 	{
1684 	  HARD_REG_SET hard_regs_used;
1685 	  REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs);
1686 
1687 	  /* Join hard registers unavailable due to register class
1688 	     restrictions and live range intersection.  */
1689 	  IOR_HARD_REG_SET (hard_regs_used,
1690 			    reg_rename_data.unavailable_hard_regs);
1691 
1692 	  best_reg = choose_best_reg (hard_regs_used, &reg_rename_data,
1693 				      original_insns, is_orig_reg_p);
1694 	}
1695       else
1696 	best_reg = choose_best_pseudo_reg (used_regs, &reg_rename_data,
1697 					   original_insns, is_orig_reg_p);
1698 
1699       if (!best_reg)
1700 	reg_ok = false;
1701       else if (*is_orig_reg_p)
1702 	{
1703 	  /* In case of unification BEST_REG may be different from EXPR's LHS
1704 	     when EXPR's LHS is unavailable, and there is another LHS among
1705 	     ORIGINAL_INSNS.  */
1706 	  reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1707 	}
1708       else
1709 	{
1710 	  /* Forbid renaming of low-cost insns.  */
1711 	  if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2)
1712 	    reg_ok = false;
1713 	  else
1714 	    reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1715 	}
1716     }
1717   else
1718     {
1719       /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set
1720 	 any of the HARD_REGS_USED set.  */
1721       if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs,
1722 				      reg_rename_data.unavailable_hard_regs))
1723 	{
1724 	  reg_ok = false;
1725 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0);
1726 	}
1727       else
1728 	{
1729 	  reg_ok = true;
1730 	  gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0);
1731 	}
1732     }
1733 
1734   ilist_clear (&original_insns);
1735   return_regset_to_pool (used_regs);
1736 
1737   return reg_ok;
1738 }
1739 
1740 
1741 /* Return true if dependence described by DS can be overcomed.  */
1742 static bool
can_speculate_dep_p(ds_t ds)1743 can_speculate_dep_p (ds_t ds)
1744 {
1745   if (spec_info == NULL)
1746     return false;
1747 
1748   /* Leave only speculative data.  */
1749   ds &= SPECULATIVE;
1750 
1751   if (ds == 0)
1752     return false;
1753 
1754   {
1755     /* FIXME: make sched-deps.c produce only those non-hard dependencies,
1756        that we can overcome.  */
1757     ds_t spec_mask = spec_info->mask;
1758 
1759     if ((ds & spec_mask) != ds)
1760       return false;
1761   }
1762 
1763   if (ds_weak (ds) < spec_info->data_weakness_cutoff)
1764     return false;
1765 
1766   return true;
1767 }
1768 
1769 /* Get a speculation check instruction.
1770    C_EXPR is a speculative expression,
1771    CHECK_DS describes speculations that should be checked,
1772    ORIG_INSN is the original non-speculative insn in the stream.  */
1773 static insn_t
create_speculation_check(expr_t c_expr,ds_t check_ds,insn_t orig_insn)1774 create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
1775 {
1776   rtx check_pattern;
1777   rtx_insn *insn_rtx;
1778   insn_t insn;
1779   basic_block recovery_block;
1780   rtx_insn *label;
1781 
1782   /* Create a recovery block if target is going to emit branchy check, or if
1783      ORIG_INSN was speculative already.  */
1784   if (targetm.sched.needs_block_p (check_ds)
1785       || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0)
1786     {
1787       recovery_block = sel_create_recovery_block (orig_insn);
1788       label = BB_HEAD (recovery_block);
1789     }
1790   else
1791     {
1792       recovery_block = NULL;
1793       label = NULL;
1794     }
1795 
1796   /* Get pattern of the check.  */
1797   check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label,
1798 						check_ds);
1799 
1800   gcc_assert (check_pattern != NULL);
1801 
1802   /* Emit check.  */
1803   insn_rtx = create_insn_rtx_from_pattern (check_pattern, label);
1804 
1805   insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn),
1806 				      INSN_SEQNO (orig_insn), orig_insn);
1807 
1808   /* Make check to be non-speculative.  */
1809   EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
1810   INSN_SPEC_CHECKED_DS (insn) = check_ds;
1811 
1812   /* Decrease priority of check by difference of load/check instruction
1813      latencies.  */
1814   EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn))
1815 				       - sel_vinsn_cost (INSN_VINSN (insn)));
1816 
1817   /* Emit copy of original insn (though with replaced target register,
1818      if needed) to the recovery block.  */
1819   if (recovery_block != NULL)
1820     {
1821       rtx twin_rtx;
1822 
1823       twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)));
1824       twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX);
1825       sel_gen_recovery_insn_from_rtx_after (twin_rtx,
1826 					    INSN_EXPR (orig_insn),
1827 					    INSN_SEQNO (insn),
1828 					    bb_note (recovery_block));
1829     }
1830 
1831   /* If we've generated a data speculation check, make sure
1832      that all the bookkeeping instruction we'll create during
1833      this move_op () will allocate an ALAT entry so that the
1834      check won't fail.
1835      In case of control speculation we must convert C_EXPR to control
1836      speculative mode, because failing to do so will bring us an exception
1837      thrown by the non-control-speculative load.  */
1838   check_ds = ds_get_max_dep_weak (check_ds);
1839   speculate_expr (c_expr, check_ds);
1840 
1841   return insn;
1842 }
1843 
1844 /* True when INSN is a "regN = regN" copy.  */
1845 static bool
identical_copy_p(rtx_insn * insn)1846 identical_copy_p (rtx_insn *insn)
1847 {
1848   rtx lhs, rhs, pat;
1849 
1850   pat = PATTERN (insn);
1851 
1852   if (GET_CODE (pat) != SET)
1853     return false;
1854 
1855   lhs = SET_DEST (pat);
1856   if (!REG_P (lhs))
1857     return false;
1858 
1859   rhs = SET_SRC (pat);
1860   if (!REG_P (rhs))
1861     return false;
1862 
1863   return REGNO (lhs) == REGNO (rhs);
1864 }
1865 
1866 /* Undo all transformations on *AV_PTR that were done when
1867    moving through INSN.  */
1868 static void
undo_transformations(av_set_t * av_ptr,rtx_insn * insn)1869 undo_transformations (av_set_t *av_ptr, rtx_insn *insn)
1870 {
1871   av_set_iterator av_iter;
1872   expr_t expr;
1873   av_set_t new_set = NULL;
1874 
1875   /* First, kill any EXPR that uses registers set by an insn.  This is
1876      required for correctness.  */
1877   FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
1878     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
1879         && bitmap_intersect_p (INSN_REG_SETS (insn),
1880                                VINSN_REG_USES (EXPR_VINSN (expr)))
1881         /* When an insn looks like 'r1 = r1', we could substitute through
1882            it, but the above condition will still hold.  This happened with
1883            gcc.c-torture/execute/961125-1.c.  */
1884         && !identical_copy_p (insn))
1885       {
1886         if (sched_verbose >= 6)
1887           sel_print ("Expr %d removed due to use/set conflict\n",
1888                      INSN_UID (EXPR_INSN_RTX (expr)));
1889         av_set_iter_remove (&av_iter);
1890       }
1891 
1892   /* Undo transformations looking at the history vector.  */
1893   FOR_EACH_EXPR (expr, av_iter, *av_ptr)
1894     {
1895       int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr),
1896                                         insn, EXPR_VINSN (expr), true);
1897 
1898       if (index >= 0)
1899         {
1900           expr_history_def *phist;
1901 
1902           phist = &EXPR_HISTORY_OF_CHANGES (expr)[index];
1903 
1904           switch (phist->type)
1905             {
1906             case TRANS_SPECULATION:
1907               {
1908                 ds_t old_ds, new_ds;
1909 
1910                 /* Compute the difference between old and new speculative
1911                    statuses: that's what we need to check.
1912                    Earlier we used to assert that the status will really
1913                    change.  This no longer works because only the probability
1914                    bits in the status may have changed during compute_av_set,
1915                    and in the case of merging different probabilities of the
1916                    same speculative status along different paths we do not
1917                    record this in the history vector.  */
1918                 old_ds = phist->spec_ds;
1919                 new_ds = EXPR_SPEC_DONE_DS (expr);
1920 
1921                 old_ds &= SPECULATIVE;
1922                 new_ds &= SPECULATIVE;
1923                 new_ds &= ~old_ds;
1924 
1925                 EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
1926                 break;
1927               }
1928             case TRANS_SUBSTITUTION:
1929               {
1930                 expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
1931                 vinsn_t new_vi;
1932                 bool add = true;
1933 
1934                 new_vi = phist->old_expr_vinsn;
1935 
1936                 gcc_assert (VINSN_SEPARABLE_P (new_vi)
1937                             == EXPR_SEPARABLE_P (expr));
1938                 copy_expr (tmp_expr, expr);
1939 
1940                 if (vinsn_equal_p (phist->new_expr_vinsn,
1941                                    EXPR_VINSN (tmp_expr)))
1942                   change_vinsn_in_expr (tmp_expr, new_vi);
1943                 else
1944                   /* This happens when we're unsubstituting on a bookkeeping
1945                      copy, which was in turn substituted.  The history is wrong
1946                      in this case.  Do it the hard way.  */
1947                   add = substitute_reg_in_expr (tmp_expr, insn, true);
1948                 if (add)
1949                   av_set_add (&new_set, tmp_expr);
1950                 clear_expr (tmp_expr);
1951                 break;
1952               }
1953             default:
1954               gcc_unreachable ();
1955             }
1956         }
1957 
1958     }
1959 
1960   av_set_union_and_clear (av_ptr, &new_set, NULL);
1961 }
1962 
1963 
1964 /* Moveup_* helpers for code motion and computing av sets.  */
1965 
1966 /* Propagates EXPR inside an insn group through THROUGH_INSN.
1967    The difference from the below function is that only substitution is
1968    performed.  */
1969 static enum MOVEUP_EXPR_CODE
moveup_expr_inside_insn_group(expr_t expr,insn_t through_insn)1970 moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
1971 {
1972   vinsn_t vi = EXPR_VINSN (expr);
1973   ds_t *has_dep_p;
1974   ds_t full_ds;
1975 
1976   /* Do this only inside insn group.  */
1977   gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0);
1978 
1979   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
1980   if (full_ds == 0)
1981     return MOVEUP_EXPR_SAME;
1982 
1983   /* Substitution is the possible choice in this case.  */
1984   if (has_dep_p[DEPS_IN_RHS])
1985     {
1986       /* Can't substitute UNIQUE VINSNs.  */
1987       gcc_assert (!VINSN_UNIQUE_P (vi));
1988 
1989       if (can_substitute_through_p (through_insn,
1990                                     has_dep_p[DEPS_IN_RHS])
1991           && substitute_reg_in_expr (expr, through_insn, false))
1992         {
1993           EXPR_WAS_SUBSTITUTED (expr) = true;
1994           return MOVEUP_EXPR_CHANGED;
1995         }
1996 
1997       /* Don't care about this, as even true dependencies may be allowed
1998          in an insn group.  */
1999       return MOVEUP_EXPR_SAME;
2000     }
2001 
2002   /* This can catch output dependencies in COND_EXECs.  */
2003   if (has_dep_p[DEPS_IN_INSN])
2004     return MOVEUP_EXPR_NULL;
2005 
2006   /* This is either an output or an anti dependence, which usually have
2007      a zero latency.  Allow this here, if we'd be wrong, tick_check_p
2008      will fix this.  */
2009   gcc_assert (has_dep_p[DEPS_IN_LHS]);
2010   return MOVEUP_EXPR_AS_RHS;
2011 }
2012 
2013 /* True when a trapping EXPR cannot be moved through THROUGH_INSN.  */
2014 #define CANT_MOVE_TRAPPING(expr, through_insn)                \
2015   (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))                       \
2016    && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \
2017    && !sel_insn_is_speculation_check (through_insn))
2018 
2019 /* True when a conflict on a target register was found during moveup_expr.  */
2020 static bool was_target_conflict = false;
2021 
2022 /* Return true when moving a debug INSN across THROUGH_INSN will
2023    create a bookkeeping block.  We don't want to create such blocks,
2024    for they would cause codegen differences between compilations with
2025    and without debug info.  */
2026 
2027 static bool
moving_insn_creates_bookkeeping_block_p(insn_t insn,insn_t through_insn)2028 moving_insn_creates_bookkeeping_block_p (insn_t insn,
2029 					 insn_t through_insn)
2030 {
2031   basic_block bbi, bbt;
2032   edge e1, e2;
2033   edge_iterator ei1, ei2;
2034 
2035   if (!bookkeeping_can_be_created_if_moved_through_p (through_insn))
2036     {
2037       if (sched_verbose >= 9)
2038 	sel_print ("no bookkeeping required: ");
2039       return FALSE;
2040     }
2041 
2042   bbi = BLOCK_FOR_INSN (insn);
2043 
2044   if (EDGE_COUNT (bbi->preds) == 1)
2045     {
2046       if (sched_verbose >= 9)
2047 	sel_print ("only one pred edge: ");
2048       return TRUE;
2049     }
2050 
2051   bbt = BLOCK_FOR_INSN (through_insn);
2052 
2053   FOR_EACH_EDGE (e1, ei1, bbt->succs)
2054     {
2055       FOR_EACH_EDGE (e2, ei2, bbi->preds)
2056 	{
2057 	  if (find_block_for_bookkeeping (e1, e2, TRUE))
2058 	    {
2059 	      if (sched_verbose >= 9)
2060 		sel_print ("found existing block: ");
2061 	      return FALSE;
2062 	    }
2063 	}
2064     }
2065 
2066   if (sched_verbose >= 9)
2067     sel_print ("would create bookkeeping block: ");
2068 
2069   return TRUE;
2070 }
2071 
2072 /* Return true when the conflict with newly created implicit clobbers
2073    between EXPR and THROUGH_INSN is found because of renaming.  */
2074 static bool
implicit_clobber_conflict_p(insn_t through_insn,expr_t expr)2075 implicit_clobber_conflict_p (insn_t through_insn, expr_t expr)
2076 {
2077   HARD_REG_SET temp;
2078   rtx_insn *insn;
2079   rtx reg, rhs, pat;
2080   hard_reg_set_iterator hrsi;
2081   unsigned regno;
2082   bool valid;
2083 
2084   /* Make a new pseudo register.  */
2085   reg = gen_reg_rtx (GET_MODE (EXPR_LHS (expr)));
2086   max_regno = max_reg_num ();
2087   maybe_extend_reg_info_p ();
2088 
2089   /* Validate a change and bail out early.  */
2090   insn = EXPR_INSN_RTX (expr);
2091   validate_change (insn, &SET_DEST (PATTERN (insn)), reg, true);
2092   valid = verify_changes (0);
2093   cancel_changes (0);
2094   if (!valid)
2095     {
2096       if (sched_verbose >= 6)
2097 	sel_print ("implicit clobbers failed validation, ");
2098       return true;
2099     }
2100 
2101   /* Make a new insn with it.  */
2102   rhs = copy_rtx (VINSN_RHS (EXPR_VINSN (expr)));
2103   pat = gen_rtx_SET (reg, rhs);
2104   start_sequence ();
2105   insn = emit_insn (pat);
2106   end_sequence ();
2107 
2108   /* Calculate implicit clobbers.  */
2109   extract_insn (insn);
2110   preprocess_constraints (insn);
2111   alternative_mask prefrred = get_preferred_alternatives (insn);
2112   ira_implicitly_set_insn_hard_regs (&temp, prefrred);
2113   AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2114 
2115   /* If any implicit clobber registers intersect with regular ones in
2116      through_insn, we have a dependency and thus bail out.  */
2117   EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
2118     {
2119       vinsn_t vi = INSN_VINSN (through_insn);
2120       if (bitmap_bit_p (VINSN_REG_SETS (vi), regno)
2121 	  || bitmap_bit_p (VINSN_REG_CLOBBERS (vi), regno)
2122 	  || bitmap_bit_p (VINSN_REG_USES (vi), regno))
2123 	return true;
2124     }
2125 
2126   return false;
2127 }
2128 
2129 /* Modifies EXPR so it can be moved through the THROUGH_INSN,
2130    performing necessary transformations.  Record the type of transformation
2131    made in PTRANS_TYPE, when it is not NULL.  When INSIDE_INSN_GROUP,
2132    permit all dependencies except true ones, and try to remove those
2133    too via forward substitution.  All cases when a non-eliminable
2134    non-zero cost dependency exists inside an insn group will be fixed
2135    in tick_check_p instead.  */
2136 static enum MOVEUP_EXPR_CODE
moveup_expr(expr_t expr,insn_t through_insn,bool inside_insn_group,enum local_trans_type * ptrans_type)2137 moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
2138             enum local_trans_type *ptrans_type)
2139 {
2140   vinsn_t vi = EXPR_VINSN (expr);
2141   insn_t insn = VINSN_INSN_RTX (vi);
2142   bool was_changed = false;
2143   bool as_rhs = false;
2144   ds_t *has_dep_p;
2145   ds_t full_ds;
2146 
2147   /* ??? We use dependencies of non-debug insns on debug insns to
2148      indicate that the debug insns need to be reset if the non-debug
2149      insn is pulled ahead of it.  It's hard to figure out how to
2150      introduce such a notion in sel-sched, but it already fails to
2151      support debug insns in other ways, so we just go ahead and
2152      let the deug insns go corrupt for now.  */
2153   if (DEBUG_INSN_P (through_insn) && !DEBUG_INSN_P (insn))
2154     return MOVEUP_EXPR_SAME;
2155 
2156   /* When inside_insn_group, delegate to the helper.  */
2157   if (inside_insn_group)
2158     return moveup_expr_inside_insn_group (expr, through_insn);
2159 
2160   /* Deal with unique insns and control dependencies.  */
2161   if (VINSN_UNIQUE_P (vi))
2162     {
2163       /* We can move jumps without side-effects or jumps that are
2164 	 mutually exclusive with instruction THROUGH_INSN (all in cases
2165 	 dependencies allow to do so and jump is not speculative).  */
2166       if (control_flow_insn_p (insn))
2167         {
2168           basic_block fallthru_bb;
2169 
2170           /* Do not move checks and do not move jumps through other
2171              jumps.  */
2172           if (control_flow_insn_p (through_insn)
2173               || sel_insn_is_speculation_check (insn))
2174             return MOVEUP_EXPR_NULL;
2175 
2176           /* Don't move jumps through CFG joins.  */
2177           if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
2178             return MOVEUP_EXPR_NULL;
2179 
2180           /* The jump should have a clear fallthru block, and
2181              this block should be in the current region.  */
2182           if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
2183               || ! in_current_region_p (fallthru_bb))
2184             return MOVEUP_EXPR_NULL;
2185 
2186           /* And it should be mutually exclusive with through_insn.  */
2187           if (! sched_insns_conditions_mutex_p (insn, through_insn)
2188 	      && ! DEBUG_INSN_P (through_insn))
2189             return MOVEUP_EXPR_NULL;
2190         }
2191 
2192       /* Don't move what we can't move.  */
2193       if (EXPR_CANT_MOVE (expr)
2194 	  && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn))
2195 	return MOVEUP_EXPR_NULL;
2196 
2197       /* Don't move SCHED_GROUP instruction through anything.
2198          If we don't force this, then it will be possible to start
2199          scheduling a sched_group before all its dependencies are
2200          resolved.
2201          ??? Haifa deals with this issue by delaying the SCHED_GROUP
2202          as late as possible through rank_for_schedule.  */
2203       if (SCHED_GROUP_P (insn))
2204 	return MOVEUP_EXPR_NULL;
2205     }
2206   else
2207     gcc_assert (!control_flow_insn_p (insn));
2208 
2209   /* Don't move debug insns if this would require bookkeeping.  */
2210   if (DEBUG_INSN_P (insn)
2211       && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)
2212       && moving_insn_creates_bookkeeping_block_p (insn, through_insn))
2213     return MOVEUP_EXPR_NULL;
2214 
2215   /* Deal with data dependencies.  */
2216   was_target_conflict = false;
2217   full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2218   if (full_ds == 0)
2219     {
2220       if (!CANT_MOVE_TRAPPING (expr, through_insn))
2221 	return MOVEUP_EXPR_SAME;
2222     }
2223   else
2224     {
2225       /* We can move UNIQUE insn up only as a whole and unchanged,
2226          so it shouldn't have any dependencies.  */
2227       if (VINSN_UNIQUE_P (vi))
2228 	return MOVEUP_EXPR_NULL;
2229     }
2230 
2231   if (full_ds != 0 && can_speculate_dep_p (full_ds))
2232     {
2233       int res;
2234 
2235       res = speculate_expr (expr, full_ds);
2236       if (res >= 0)
2237 	{
2238           /* Speculation was successful.  */
2239           full_ds = 0;
2240           was_changed = (res > 0);
2241           if (res == 2)
2242             was_target_conflict = true;
2243           if (ptrans_type)
2244             *ptrans_type = TRANS_SPECULATION;
2245 	  sel_clear_has_dependence ();
2246 	}
2247     }
2248 
2249   if (has_dep_p[DEPS_IN_INSN])
2250     /* We have some dependency that cannot be discarded.  */
2251     return MOVEUP_EXPR_NULL;
2252 
2253   if (has_dep_p[DEPS_IN_LHS])
2254     {
2255       /* Only separable insns can be moved up with the new register.
2256          Anyways, we should mark that the original register is
2257          unavailable.  */
2258       if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
2259         return MOVEUP_EXPR_NULL;
2260 
2261       /* When renaming a hard register to a pseudo before reload, extra
2262 	 dependencies can occur from the implicit clobbers of the insn.
2263 	 Filter out such cases here.  */
2264       if (!reload_completed && REG_P (EXPR_LHS (expr))
2265 	  && HARD_REGISTER_P (EXPR_LHS (expr))
2266 	  && implicit_clobber_conflict_p (through_insn, expr))
2267 	{
2268 	  if (sched_verbose >= 6)
2269 	    sel_print ("implicit clobbers conflict detected, ");
2270 	  return MOVEUP_EXPR_NULL;
2271 	}
2272       EXPR_TARGET_AVAILABLE (expr) = false;
2273       was_target_conflict = true;
2274       as_rhs = true;
2275     }
2276 
2277   /* At this point we have either separable insns, that will be lifted
2278      up only as RHSes, or non-separable insns with no dependency in lhs.
2279      If dependency is in RHS, then try to perform substitution and move up
2280      substituted RHS:
2281 
2282       Ex. 1:				  Ex.2
2283 	y = x;				    y = x;
2284 	z = y*2;			    y = y*2;
2285 
2286     In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
2287     moved above y=x assignment as z=x*2.
2288 
2289     In Ex.2 y*2 also can be substituted for x*2, but only the right hand
2290     side can be moved because of the output dependency.  The operation was
2291     cropped to its rhs above.  */
2292   if (has_dep_p[DEPS_IN_RHS])
2293     {
2294       ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS];
2295 
2296       /* Can't substitute UNIQUE VINSNs.  */
2297       gcc_assert (!VINSN_UNIQUE_P (vi));
2298 
2299       if (can_speculate_dep_p (*rhs_dsp))
2300 	{
2301           int res;
2302 
2303           res = speculate_expr (expr, *rhs_dsp);
2304           if (res >= 0)
2305             {
2306               /* Speculation was successful.  */
2307               *rhs_dsp = 0;
2308               was_changed = (res > 0);
2309               if (res == 2)
2310                 was_target_conflict = true;
2311               if (ptrans_type)
2312                 *ptrans_type = TRANS_SPECULATION;
2313             }
2314 	  else
2315 	    return MOVEUP_EXPR_NULL;
2316 	}
2317       else if (can_substitute_through_p (through_insn,
2318                                          *rhs_dsp)
2319                && substitute_reg_in_expr (expr, through_insn, false))
2320 	{
2321           /* ??? We cannot perform substitution AND speculation on the same
2322              insn.  */
2323           gcc_assert (!was_changed);
2324           was_changed = true;
2325           if (ptrans_type)
2326             *ptrans_type = TRANS_SUBSTITUTION;
2327           EXPR_WAS_SUBSTITUTED (expr) = true;
2328 	}
2329       else
2330 	return MOVEUP_EXPR_NULL;
2331     }
2332 
2333   /* Don't move trapping insns through jumps.
2334      This check should be at the end to give a chance to control speculation
2335      to perform its duties.  */
2336   if (CANT_MOVE_TRAPPING (expr, through_insn))
2337     return MOVEUP_EXPR_NULL;
2338 
2339   return (was_changed
2340           ? MOVEUP_EXPR_CHANGED
2341           : (as_rhs
2342              ? MOVEUP_EXPR_AS_RHS
2343              : MOVEUP_EXPR_SAME));
2344 }
2345 
2346 /* Try to look at bitmap caches for EXPR and INSN pair, return true
2347    if successful.  When INSIDE_INSN_GROUP, also try ignore dependencies
2348    that can exist within a parallel group.  Write to RES the resulting
2349    code for moveup_expr.  */
2350 static bool
try_bitmap_cache(expr_t expr,insn_t insn,bool inside_insn_group,enum MOVEUP_EXPR_CODE * res)2351 try_bitmap_cache (expr_t expr, insn_t insn,
2352                   bool inside_insn_group,
2353                   enum MOVEUP_EXPR_CODE *res)
2354 {
2355   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2356 
2357   /* First check whether we've analyzed this situation already.  */
2358   if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
2359     {
2360       if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2361         {
2362           if (sched_verbose >= 6)
2363             sel_print ("removed (cached)\n");
2364           *res = MOVEUP_EXPR_NULL;
2365           return true;
2366         }
2367       else
2368         {
2369           if (sched_verbose >= 6)
2370             sel_print ("unchanged (cached)\n");
2371           *res = MOVEUP_EXPR_SAME;
2372           return true;
2373         }
2374     }
2375   else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2376     {
2377       if (inside_insn_group)
2378         {
2379           if (sched_verbose >= 6)
2380             sel_print ("unchanged (as RHS, cached, inside insn group)\n");
2381           *res = MOVEUP_EXPR_SAME;
2382           return true;
2383 
2384         }
2385       else
2386         EXPR_TARGET_AVAILABLE (expr) = false;
2387 
2388       /* This is the only case when propagation result can change over time,
2389          as we can dynamically switch off scheduling as RHS.  In this case,
2390          just check the flag to reach the correct decision.  */
2391       if (enable_schedule_as_rhs_p)
2392         {
2393           if (sched_verbose >= 6)
2394             sel_print ("unchanged (as RHS, cached)\n");
2395           *res = MOVEUP_EXPR_AS_RHS;
2396           return true;
2397         }
2398       else
2399         {
2400           if (sched_verbose >= 6)
2401             sel_print ("removed (cached as RHS, but renaming"
2402                        " is now disabled)\n");
2403           *res = MOVEUP_EXPR_NULL;
2404           return true;
2405         }
2406     }
2407 
2408   return false;
2409 }
2410 
2411 /* Try to look at bitmap caches for EXPR and INSN pair, return true
2412    if successful.  Write to RES the resulting code for moveup_expr.  */
2413 static bool
try_transformation_cache(expr_t expr,insn_t insn,enum MOVEUP_EXPR_CODE * res)2414 try_transformation_cache (expr_t expr, insn_t insn,
2415                           enum MOVEUP_EXPR_CODE *res)
2416 {
2417   struct transformed_insns *pti
2418     = (struct transformed_insns *)
2419     htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
2420                          &EXPR_VINSN (expr),
2421                          VINSN_HASH_RTX (EXPR_VINSN (expr)));
2422   if (pti)
2423     {
2424       /* This EXPR was already moved through this insn and was
2425          changed as a result.  Fetch the proper data from
2426          the hashtable.  */
2427       insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2428                               INSN_UID (insn), pti->type,
2429                               pti->vinsn_old, pti->vinsn_new,
2430                               EXPR_SPEC_DONE_DS (expr));
2431 
2432       if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
2433         pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
2434       change_vinsn_in_expr (expr, pti->vinsn_new);
2435       if (pti->was_target_conflict)
2436         EXPR_TARGET_AVAILABLE (expr) = false;
2437       if (pti->type == TRANS_SPECULATION)
2438         {
2439           EXPR_SPEC_DONE_DS (expr) = pti->ds;
2440           EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
2441         }
2442 
2443       if (sched_verbose >= 6)
2444         {
2445           sel_print ("changed (cached): ");
2446           dump_expr (expr);
2447           sel_print ("\n");
2448         }
2449 
2450       *res = MOVEUP_EXPR_CHANGED;
2451       return true;
2452     }
2453 
2454   return false;
2455 }
2456 
2457 /* Update bitmap caches on INSN with result RES of propagating EXPR.  */
2458 static void
update_bitmap_cache(expr_t expr,insn_t insn,bool inside_insn_group,enum MOVEUP_EXPR_CODE res)2459 update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
2460                      enum MOVEUP_EXPR_CODE res)
2461 {
2462   int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2463 
2464   /* Do not cache result of propagating jumps through an insn group,
2465      as it is always true, which is not useful outside the group.  */
2466   if (inside_insn_group)
2467     return;
2468 
2469   if (res == MOVEUP_EXPR_NULL)
2470     {
2471       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2472       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2473     }
2474   else if (res == MOVEUP_EXPR_SAME)
2475     {
2476       bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2477       bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid);
2478     }
2479   else if (res == MOVEUP_EXPR_AS_RHS)
2480     {
2481       bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2482       bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2483     }
2484   else
2485     gcc_unreachable ();
2486 }
2487 
2488 /* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
2489    and transformation type TRANS_TYPE.  */
2490 static void
update_transformation_cache(expr_t expr,insn_t insn,bool inside_insn_group,enum local_trans_type trans_type,vinsn_t expr_old_vinsn)2491 update_transformation_cache (expr_t expr, insn_t insn,
2492                              bool inside_insn_group,
2493                              enum local_trans_type trans_type,
2494                              vinsn_t expr_old_vinsn)
2495 {
2496   struct transformed_insns *pti;
2497 
2498   if (inside_insn_group)
2499     return;
2500 
2501   pti = XNEW (struct transformed_insns);
2502   pti->vinsn_old = expr_old_vinsn;
2503   pti->vinsn_new = EXPR_VINSN (expr);
2504   pti->type = trans_type;
2505   pti->was_target_conflict = was_target_conflict;
2506   pti->ds = EXPR_SPEC_DONE_DS (expr);
2507   pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
2508   vinsn_attach (pti->vinsn_old);
2509   vinsn_attach (pti->vinsn_new);
2510   *((struct transformed_insns **)
2511     htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
2512                               pti, VINSN_HASH_RTX (expr_old_vinsn),
2513                               INSERT)) = pti;
2514 }
2515 
2516 /* Same as moveup_expr, but first looks up the result of
2517    transformation in caches.  */
2518 static enum MOVEUP_EXPR_CODE
moveup_expr_cached(expr_t expr,insn_t insn,bool inside_insn_group)2519 moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
2520 {
2521   enum MOVEUP_EXPR_CODE res;
2522   bool got_answer = false;
2523 
2524   if (sched_verbose >= 6)
2525     {
2526       sel_print ("Moving ");
2527       dump_expr (expr);
2528       sel_print (" through %d: ", INSN_UID (insn));
2529     }
2530 
2531   if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))
2532       && BLOCK_FOR_INSN (EXPR_INSN_RTX (expr))
2533       && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)))
2534 	  == EXPR_INSN_RTX (expr)))
2535     /* Don't use cached information for debug insns that are heads of
2536        basic blocks.  */;
2537   else if (try_bitmap_cache (expr, insn, inside_insn_group, &res))
2538     /* When inside insn group, we do not want remove stores conflicting
2539        with previosly issued loads.  */
2540     got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL;
2541   else if (try_transformation_cache (expr, insn, &res))
2542     got_answer = true;
2543 
2544   if (! got_answer)
2545     {
2546       /* Invoke moveup_expr and record the results.  */
2547       vinsn_t expr_old_vinsn = EXPR_VINSN (expr);
2548       ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr);
2549       int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn));
2550       bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
2551       enum local_trans_type trans_type = TRANS_SUBSTITUTION;
2552 
2553       /* ??? Invent something better than this.  We can't allow old_vinsn
2554          to go, we need it for the history vector.  */
2555       vinsn_attach (expr_old_vinsn);
2556 
2557       res = moveup_expr (expr, insn, inside_insn_group,
2558                          &trans_type);
2559       switch (res)
2560         {
2561         case MOVEUP_EXPR_NULL:
2562           update_bitmap_cache (expr, insn, inside_insn_group, res);
2563 	  if (sched_verbose >= 6)
2564             sel_print ("removed\n");
2565 	  break;
2566 
2567 	case MOVEUP_EXPR_SAME:
2568           update_bitmap_cache (expr, insn, inside_insn_group, res);
2569           if (sched_verbose >= 6)
2570             sel_print ("unchanged\n");
2571 	  break;
2572 
2573         case MOVEUP_EXPR_AS_RHS:
2574           gcc_assert (!unique_p || inside_insn_group);
2575           update_bitmap_cache (expr, insn, inside_insn_group, res);
2576 	  if (sched_verbose >= 6)
2577             sel_print ("unchanged (as RHS)\n");
2578 	  break;
2579 
2580 	case MOVEUP_EXPR_CHANGED:
2581           gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
2582                       || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
2583           insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2584                                   INSN_UID (insn), trans_type,
2585                                   expr_old_vinsn, EXPR_VINSN (expr),
2586                                   expr_old_spec_ds);
2587           update_transformation_cache (expr, insn, inside_insn_group,
2588                                        trans_type, expr_old_vinsn);
2589           if (sched_verbose >= 6)
2590             {
2591               sel_print ("changed: ");
2592               dump_expr (expr);
2593               sel_print ("\n");
2594             }
2595 	  break;
2596 	default:
2597 	  gcc_unreachable ();
2598         }
2599 
2600       vinsn_detach (expr_old_vinsn);
2601     }
2602 
2603   return res;
2604 }
2605 
2606 /* Moves an av set AVP up through INSN, performing necessary
2607    transformations.  */
2608 static void
moveup_set_expr(av_set_t * avp,insn_t insn,bool inside_insn_group)2609 moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
2610 {
2611   av_set_iterator i;
2612   expr_t expr;
2613 
2614   FOR_EACH_EXPR_1 (expr, i, avp)
2615     {
2616 
2617       switch (moveup_expr_cached (expr, insn, inside_insn_group))
2618 	{
2619 	case MOVEUP_EXPR_SAME:
2620         case MOVEUP_EXPR_AS_RHS:
2621 	  break;
2622 
2623 	case MOVEUP_EXPR_NULL:
2624 	  av_set_iter_remove (&i);
2625 	  break;
2626 
2627 	case MOVEUP_EXPR_CHANGED:
2628           expr = merge_with_other_exprs (avp, &i, expr);
2629 	  break;
2630 
2631 	default:
2632 	  gcc_unreachable ();
2633 	}
2634     }
2635 }
2636 
2637 /* Moves AVP set along PATH.  */
2638 static void
moveup_set_inside_insn_group(av_set_t * avp,ilist_t path)2639 moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
2640 {
2641   int last_cycle;
2642 
2643   if (sched_verbose >= 6)
2644     sel_print ("Moving expressions up in the insn group...\n");
2645   if (! path)
2646     return;
2647   last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
2648   while (path
2649          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2650     {
2651       moveup_set_expr (avp, ILIST_INSN (path), true);
2652       path = ILIST_NEXT (path);
2653     }
2654 }
2655 
2656 /* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW.  */
2657 static bool
equal_after_moveup_path_p(expr_t expr,ilist_t path,expr_t expr_vliw)2658 equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
2659 {
2660   expr_def _tmp, *tmp = &_tmp;
2661   int last_cycle;
2662   bool res = true;
2663 
2664   copy_expr_onside (tmp, expr);
2665   last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
2666   while (path
2667          && res
2668          && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2669     {
2670       res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
2671              != MOVEUP_EXPR_NULL);
2672       path = ILIST_NEXT (path);
2673     }
2674 
2675   if (res)
2676     {
2677       vinsn_t tmp_vinsn = EXPR_VINSN (tmp);
2678       vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw);
2679 
2680       if (tmp_vinsn != expr_vliw_vinsn)
2681 	res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn);
2682     }
2683 
2684   clear_expr (tmp);
2685   return res;
2686 }
2687 
2688 
2689 /* Functions that compute av and lv sets.  */
2690 
2691 /* Returns true if INSN is not a downward continuation of the given path P in
2692    the current stage.  */
2693 static bool
is_ineligible_successor(insn_t insn,ilist_t p)2694 is_ineligible_successor (insn_t insn, ilist_t p)
2695 {
2696   insn_t prev_insn;
2697 
2698   /* Check if insn is not deleted.  */
2699   if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
2700     gcc_unreachable ();
2701   else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
2702     gcc_unreachable ();
2703 
2704   /* If it's the first insn visited, then the successor is ok.  */
2705   if (!p)
2706     return false;
2707 
2708   prev_insn = ILIST_INSN (p);
2709 
2710   if (/* a backward edge.  */
2711       INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
2712       /* is already visited.  */
2713       || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
2714 	  && (ilist_is_in_p (p, insn)
2715               /* We can reach another fence here and still seqno of insn
2716                  would be equal to seqno of prev_insn.  This is possible
2717                  when prev_insn is a previously created bookkeeping copy.
2718                  In that case it'd get a seqno of insn.  Thus, check here
2719                  whether insn is in current fence too.  */
2720               || IN_CURRENT_FENCE_P (insn)))
2721       /* Was already scheduled on this round.  */
2722       || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
2723 	  && IN_CURRENT_FENCE_P (insn))
2724       /* An insn from another fence could also be
2725 	 scheduled earlier even if this insn is not in
2726 	 a fence list right now.  Check INSN_SCHED_CYCLE instead.  */
2727       || (!pipelining_p
2728           && INSN_SCHED_TIMES (insn) > 0))
2729     return true;
2730   else
2731     return false;
2732 }
2733 
2734 /* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
2735    of handling multiple successors and properly merging its av_sets.  P is
2736    the current path traversed.  WS is the size of lookahead window.
2737    Return the av set computed.  */
2738 static av_set_t
compute_av_set_at_bb_end(insn_t insn,ilist_t p,int ws)2739 compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
2740 {
2741   struct succs_info *sinfo;
2742   av_set_t expr_in_all_succ_branches = NULL;
2743   int is;
2744   insn_t succ, zero_succ = NULL;
2745   av_set_t av1 = NULL;
2746 
2747   gcc_assert (sel_bb_end_p (insn));
2748 
2749   /* Find different kind of successors needed for correct computing of
2750      SPEC and TARGET_AVAILABLE attributes.  */
2751   sinfo = compute_succs_info (insn, SUCCS_NORMAL);
2752 
2753   /* Debug output.  */
2754   if (sched_verbose >= 6)
2755     {
2756       sel_print ("successors of bb end (%d): ", INSN_UID (insn));
2757       dump_insn_vector (sinfo->succs_ok);
2758       sel_print ("\n");
2759       if (sinfo->succs_ok_n != sinfo->all_succs_n)
2760         sel_print ("real successors num: %d\n", sinfo->all_succs_n);
2761     }
2762 
2763   /* Add insn to the tail of current path.  */
2764   ilist_add (&p, insn);
2765 
2766   FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
2767     {
2768       av_set_t succ_set;
2769 
2770       /* We will edit SUCC_SET and EXPR_SPEC field of its elements.  */
2771       succ_set = compute_av_set_inside_bb (succ, p, ws, true);
2772 
2773       av_set_split_usefulness (succ_set,
2774                                sinfo->probs_ok[is],
2775                                sinfo->all_prob);
2776 
2777       if (sinfo->all_succs_n > 1)
2778 	{
2779           /* Find EXPR'es that came from *all* successors and save them
2780              into expr_in_all_succ_branches.  This set will be used later
2781              for calculating speculation attributes of EXPR'es.  */
2782           if (is == 0)
2783             {
2784               expr_in_all_succ_branches = av_set_copy (succ_set);
2785 
2786               /* Remember the first successor for later. */
2787               zero_succ = succ;
2788             }
2789           else
2790             {
2791               av_set_iterator i;
2792               expr_t expr;
2793 
2794               FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
2795                 if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
2796                   av_set_iter_remove (&i);
2797             }
2798 	}
2799 
2800       /* Union the av_sets.  Check liveness restrictions on target registers
2801          in special case of two successors.  */
2802       if (sinfo->succs_ok_n == 2 && is == 1)
2803         {
2804           basic_block bb0 = BLOCK_FOR_INSN (zero_succ);
2805           basic_block bb1 = BLOCK_FOR_INSN (succ);
2806 
2807           gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
2808           av_set_union_and_live (&av1, &succ_set,
2809                                  BB_LV_SET (bb0),
2810                                  BB_LV_SET (bb1),
2811                                  insn);
2812         }
2813       else
2814         av_set_union_and_clear (&av1, &succ_set, insn);
2815     }
2816 
2817   /* Check liveness restrictions via hard way when there are more than
2818      two successors.  */
2819   if (sinfo->succs_ok_n > 2)
2820     FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
2821       {
2822         basic_block succ_bb = BLOCK_FOR_INSN (succ);
2823 	av_set_t av_succ = (is_ineligible_successor (succ, p)
2824 			    ? NULL
2825 			    : BB_AV_SET (succ_bb));
2826 
2827         gcc_assert (BB_LV_SET_VALID_P (succ_bb));
2828 	mark_unavailable_targets (av1, av_succ, BB_LV_SET (succ_bb));
2829       }
2830 
2831   /* Finally, check liveness restrictions on paths leaving the region.  */
2832   if (sinfo->all_succs_n > sinfo->succs_ok_n)
2833     FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ)
2834       mark_unavailable_targets
2835         (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
2836 
2837   if (sinfo->all_succs_n > 1)
2838     {
2839       av_set_iterator i;
2840       expr_t expr;
2841 
2842       /* Increase the spec attribute of all EXPR'es that didn't come
2843 	 from all successors.  */
2844       FOR_EACH_EXPR (expr, i, av1)
2845 	if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
2846 	  EXPR_SPEC (expr)++;
2847 
2848       av_set_clear (&expr_in_all_succ_branches);
2849 
2850       /* Do not move conditional branches through other
2851 	 conditional branches.  So, remove all conditional
2852 	 branches from av_set if current operator is a conditional
2853 	 branch.  */
2854       av_set_substract_cond_branches (&av1);
2855     }
2856 
2857   ilist_remove (&p);
2858   free_succs_info (sinfo);
2859 
2860   if (sched_verbose >= 6)
2861     {
2862       sel_print ("av_succs (%d): ", INSN_UID (insn));
2863       dump_av_set (av1);
2864       sel_print ("\n");
2865     }
2866 
2867   return av1;
2868 }
2869 
2870 /* This function computes av_set for the FIRST_INSN by dragging valid
2871    av_set through all basic block insns either from the end of basic block
2872    (computed using compute_av_set_at_bb_end) or from the insn on which
2873    MAX_WS was exceeded.  It uses compute_av_set_at_bb_end to compute av_set
2874    below the basic block and handling conditional branches.
2875    FIRST_INSN - the basic block head, P - path consisting of the insns
2876    traversed on the way to the FIRST_INSN (the path is sparse, only bb heads
2877    and bb ends are added to the path), WS - current window size,
2878    NEED_COPY_P - true if we'll make a copy of av_set before returning it.  */
2879 static av_set_t
compute_av_set_inside_bb(insn_t first_insn,ilist_t p,int ws,bool need_copy_p)2880 compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
2881 			  bool need_copy_p)
2882 {
2883   insn_t cur_insn;
2884   int end_ws = ws;
2885   insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn));
2886   insn_t after_bb_end = NEXT_INSN (bb_end);
2887   insn_t last_insn;
2888   av_set_t av = NULL;
2889   basic_block cur_bb = BLOCK_FOR_INSN (first_insn);
2890 
2891   /* Return NULL if insn is not on the legitimate downward path.  */
2892   if (is_ineligible_successor (first_insn, p))
2893     {
2894       if (sched_verbose >= 6)
2895         sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn));
2896 
2897       return NULL;
2898     }
2899 
2900   /* If insn already has valid av(insn) computed, just return it.  */
2901   if (AV_SET_VALID_P (first_insn))
2902     {
2903       av_set_t av_set;
2904 
2905       if (sel_bb_head_p (first_insn))
2906 	av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn));
2907       else
2908 	av_set = NULL;
2909 
2910       if (sched_verbose >= 6)
2911         {
2912           sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn));
2913           dump_av_set (av_set);
2914           sel_print ("\n");
2915         }
2916 
2917       return need_copy_p ? av_set_copy (av_set) : av_set;
2918     }
2919 
2920   ilist_add (&p, first_insn);
2921 
2922   /* As the result after this loop have completed, in LAST_INSN we'll
2923      have the insn which has valid av_set to start backward computation
2924      from: it either will be NULL because on it the window size was exceeded
2925      or other valid av_set as returned by compute_av_set for the last insn
2926      of the basic block.  */
2927   for (last_insn = first_insn; last_insn != after_bb_end;
2928        last_insn = NEXT_INSN (last_insn))
2929     {
2930       /* We may encounter valid av_set not only on bb_head, but also on
2931 	 those insns on which previously MAX_WS was exceeded.  */
2932       if (AV_SET_VALID_P (last_insn))
2933 	{
2934           if (sched_verbose >= 6)
2935             sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn));
2936 	  break;
2937 	}
2938 
2939       /* The special case: the last insn of the BB may be an
2940          ineligible_successor due to its SEQ_NO that was set on
2941 	 it as a bookkeeping.  */
2942       if (last_insn != first_insn
2943           && is_ineligible_successor (last_insn, p))
2944 	{
2945           if (sched_verbose >= 6)
2946             sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
2947 	  break;
2948 	}
2949 
2950       if (DEBUG_INSN_P (last_insn))
2951 	continue;
2952 
2953       if (end_ws > max_ws)
2954 	{
2955 	  /* We can reach max lookahead size at bb_header, so clean av_set
2956 	     first.  */
2957 	  INSN_WS_LEVEL (last_insn) = global_level;
2958 
2959 	  if (sched_verbose >= 6)
2960             sel_print ("Insn %d is beyond the software lookahead window size\n",
2961                        INSN_UID (last_insn));
2962 	  break;
2963 	}
2964 
2965       end_ws++;
2966     }
2967 
2968   /* Get the valid av_set into AV above the LAST_INSN to start backward
2969      computation from.  It either will be empty av_set or av_set computed from
2970      the successors on the last insn of the current bb.  */
2971   if (last_insn != after_bb_end)
2972     {
2973       av = NULL;
2974 
2975       /* This is needed only to obtain av_sets that are identical to
2976          those computed by the old compute_av_set version.  */
2977       if (last_insn == first_insn && !INSN_NOP_P (last_insn))
2978         av_set_add (&av, INSN_EXPR (last_insn));
2979     }
2980   else
2981     /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END.  */
2982     av = compute_av_set_at_bb_end (bb_end, p, end_ws);
2983 
2984   /* Compute av_set in AV starting from below the LAST_INSN up to
2985      location above the FIRST_INSN.  */
2986   for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
2987        cur_insn = PREV_INSN (cur_insn))
2988     if (!INSN_NOP_P (cur_insn))
2989       {
2990         expr_t expr;
2991 
2992         moveup_set_expr (&av, cur_insn, false);
2993 
2994         /* If the expression for CUR_INSN is already in the set,
2995            replace it by the new one.  */
2996         expr = av_set_lookup (av, INSN_VINSN (cur_insn));
2997         if (expr != NULL)
2998           {
2999             clear_expr (expr);
3000             copy_expr (expr, INSN_EXPR (cur_insn));
3001           }
3002         else
3003           av_set_add (&av, INSN_EXPR (cur_insn));
3004       }
3005 
3006   /* Clear stale bb_av_set.  */
3007   if (sel_bb_head_p (first_insn))
3008     {
3009       av_set_clear (&BB_AV_SET (cur_bb));
3010       BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av;
3011       BB_AV_LEVEL (cur_bb) = global_level;
3012     }
3013 
3014   if (sched_verbose >= 6)
3015     {
3016       sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn));
3017       dump_av_set (av);
3018       sel_print ("\n");
3019     }
3020 
3021   ilist_remove (&p);
3022   return av;
3023 }
3024 
3025 /* Compute av set before INSN.
3026    INSN - the current operation (actual rtx INSN)
3027    P - the current path, which is list of insns visited so far
3028    WS - software lookahead window size.
3029    UNIQUE_P - TRUE, if returned av_set will be changed, hence
3030    if we want to save computed av_set in s_i_d, we should make a copy of it.
3031 
3032    In the resulting set we will have only expressions that don't have delay
3033    stalls and nonsubstitutable dependences.  */
3034 static av_set_t
compute_av_set(insn_t insn,ilist_t p,int ws,bool unique_p)3035 compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
3036 {
3037   return compute_av_set_inside_bb (insn, p, ws, unique_p);
3038 }
3039 
3040 /* Propagate a liveness set LV through INSN.  */
3041 static void
propagate_lv_set(regset lv,insn_t insn)3042 propagate_lv_set (regset lv, insn_t insn)
3043 {
3044   gcc_assert (INSN_P (insn));
3045 
3046   if (INSN_NOP_P (insn))
3047     return;
3048 
3049   df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv);
3050 }
3051 
3052 /* Return livness set at the end of BB.  */
3053 static regset
compute_live_after_bb(basic_block bb)3054 compute_live_after_bb (basic_block bb)
3055 {
3056   edge e;
3057   edge_iterator ei;
3058   regset lv = get_clear_regset_from_pool ();
3059 
3060   gcc_assert (!ignore_first);
3061 
3062   FOR_EACH_EDGE (e, ei, bb->succs)
3063     if (sel_bb_empty_p (e->dest))
3064       {
3065         if (! BB_LV_SET_VALID_P (e->dest))
3066           {
3067             gcc_unreachable ();
3068             gcc_assert (BB_LV_SET (e->dest) == NULL);
3069             BB_LV_SET (e->dest) = compute_live_after_bb (e->dest);
3070             BB_LV_SET_VALID_P (e->dest) = true;
3071           }
3072         IOR_REG_SET (lv, BB_LV_SET (e->dest));
3073       }
3074     else
3075       IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)));
3076 
3077   return lv;
3078 }
3079 
3080 /* Compute the set of all live registers at the point before INSN and save
3081    it at INSN if INSN is bb header.  */
3082 regset
compute_live(insn_t insn)3083 compute_live (insn_t insn)
3084 {
3085   basic_block bb = BLOCK_FOR_INSN (insn);
3086   insn_t final, temp;
3087   regset lv;
3088 
3089   /* Return the valid set if we're already on it.  */
3090   if (!ignore_first)
3091     {
3092       regset src = NULL;
3093 
3094       if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
3095         src = BB_LV_SET (bb);
3096       else
3097         {
3098           gcc_assert (in_current_region_p (bb));
3099           if (INSN_LIVE_VALID_P (insn))
3100             src = INSN_LIVE (insn);
3101         }
3102 
3103       if (src)
3104 	{
3105 	  lv = get_regset_from_pool ();
3106 	  COPY_REG_SET (lv, src);
3107 
3108           if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb))
3109             {
3110               COPY_REG_SET (BB_LV_SET (bb), lv);
3111               BB_LV_SET_VALID_P (bb) = true;
3112             }
3113 
3114 	  return_regset_to_pool (lv);
3115 	  return lv;
3116 	}
3117     }
3118 
3119   /* We've skipped the wrong lv_set.  Don't skip the right one.  */
3120   ignore_first = false;
3121   gcc_assert (in_current_region_p (bb));
3122 
3123   /* Find a valid LV set in this block or below, if needed.
3124      Start searching from the next insn: either ignore_first is true, or
3125      INSN doesn't have a correct live set.  */
3126   temp = NEXT_INSN (insn);
3127   final = NEXT_INSN (BB_END (bb));
3128   while (temp != final && ! INSN_LIVE_VALID_P (temp))
3129     temp = NEXT_INSN (temp);
3130   if (temp == final)
3131     {
3132       lv = compute_live_after_bb (bb);
3133       temp = PREV_INSN (temp);
3134     }
3135   else
3136     {
3137       lv = get_regset_from_pool ();
3138       COPY_REG_SET (lv, INSN_LIVE (temp));
3139     }
3140 
3141   /* Put correct lv sets on the insns which have bad sets.  */
3142   final = PREV_INSN (insn);
3143   while (temp != final)
3144     {
3145       propagate_lv_set (lv, temp);
3146       COPY_REG_SET (INSN_LIVE (temp), lv);
3147       INSN_LIVE_VALID_P (temp) = true;
3148       temp = PREV_INSN (temp);
3149     }
3150 
3151   /* Also put it in a BB.  */
3152   if (sel_bb_head_p (insn))
3153     {
3154       basic_block bb = BLOCK_FOR_INSN (insn);
3155 
3156       COPY_REG_SET (BB_LV_SET (bb), lv);
3157       BB_LV_SET_VALID_P (bb) = true;
3158     }
3159 
3160   /* We return LV to the pool, but will not clear it there.  Thus we can
3161      legimatelly use LV till the next use of regset_pool_get ().  */
3162   return_regset_to_pool (lv);
3163   return lv;
3164 }
3165 
3166 /* Update liveness sets for INSN.  */
3167 static inline void
update_liveness_on_insn(rtx_insn * insn)3168 update_liveness_on_insn (rtx_insn *insn)
3169 {
3170   ignore_first = true;
3171   compute_live (insn);
3172 }
3173 
3174 /* Compute liveness below INSN and write it into REGS.  */
3175 static inline void
compute_live_below_insn(rtx_insn * insn,regset regs)3176 compute_live_below_insn (rtx_insn *insn, regset regs)
3177 {
3178   rtx_insn *succ;
3179   succ_iterator si;
3180 
3181   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
3182     IOR_REG_SET (regs, compute_live (succ));
3183 }
3184 
3185 /* Update the data gathered in av and lv sets starting from INSN.  */
3186 static void
update_data_sets(rtx_insn * insn)3187 update_data_sets (rtx_insn *insn)
3188 {
3189   update_liveness_on_insn (insn);
3190   if (sel_bb_head_p (insn))
3191     {
3192       gcc_assert (AV_LEVEL (insn) != 0);
3193       BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1;
3194       compute_av_set (insn, NULL, 0, 0);
3195     }
3196 }
3197 
3198 
3199 /* Helper for move_op () and find_used_regs ().
3200    Return speculation type for which a check should be created on the place
3201    of INSN.  EXPR is one of the original ops we are searching for.  */
3202 static ds_t
get_spec_check_type_for_insn(insn_t insn,expr_t expr)3203 get_spec_check_type_for_insn (insn_t insn, expr_t expr)
3204 {
3205   ds_t to_check_ds;
3206   ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn));
3207 
3208   to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr);
3209 
3210   if (targetm.sched.get_insn_checked_ds)
3211     already_checked_ds |= targetm.sched.get_insn_checked_ds (insn);
3212 
3213   if (spec_info != NULL
3214       && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL))
3215     already_checked_ds |= BEGIN_CONTROL;
3216 
3217   already_checked_ds = ds_get_speculation_types (already_checked_ds);
3218 
3219   to_check_ds &= ~already_checked_ds;
3220 
3221   return to_check_ds;
3222 }
3223 
3224 /* Find the set of registers that are unavailable for storing expres
3225    while moving ORIG_OPS up on the path starting from INSN due to
3226    liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
3227 
3228    All the original operations found during the traversal are saved in the
3229    ORIGINAL_INSNS list.
3230 
3231    REG_RENAME_P denotes the set of hardware registers that
3232    cannot be used with renaming due to the register class restrictions,
3233    mode restrictions and other (the register we'll choose should be
3234    compatible class with the original uses, shouldn't be in call_used_regs,
3235    should be HARD_REGNO_RENAME_OK etc).
3236 
3237    Returns TRUE if we've found all original insns, FALSE otherwise.
3238 
3239    This function utilizes code_motion_path_driver (formerly find_used_regs_1)
3240    to traverse the code motion paths.  This helper function finds registers
3241    that are not available for storing expres while moving ORIG_OPS up on the
3242    path starting from INSN.  A register considered as used on the moving path,
3243    if one of the following conditions is not satisfied:
3244 
3245       (1) a register not set or read on any path from xi to an instance of
3246 	  the original operation,
3247       (2) not among the live registers of the point immediately following the
3248           first original operation on a given downward path, except for the
3249 	  original target register of the operation,
3250       (3) not live on the other path of any conditional branch that is passed
3251 	  by the operation, in case original operations are not present on
3252 	  both paths of the conditional branch.
3253 
3254    All the original operations found during the traversal are saved in the
3255    ORIGINAL_INSNS list.
3256 
3257    REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
3258    from INSN to original insn. In this case CALL_USED_REG_SET will be added
3259    to unavailable hard regs at the point original operation is found.  */
3260 
3261 static bool
find_used_regs(insn_t insn,av_set_t orig_ops,regset used_regs,struct reg_rename * reg_rename_p,def_list_t * original_insns)3262 find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
3263 		struct reg_rename  *reg_rename_p, def_list_t *original_insns)
3264 {
3265   def_list_iterator i;
3266   def_t def;
3267   int res;
3268   bool needs_spec_check_p = false;
3269   expr_t expr;
3270   av_set_iterator expr_iter;
3271   struct fur_static_params sparams;
3272   struct cmpd_local_params lparams;
3273 
3274   /* We haven't visited any blocks yet.  */
3275   bitmap_clear (code_motion_visited_blocks);
3276 
3277   /* Init parameters for code_motion_path_driver.  */
3278   sparams.crosses_call = false;
3279   sparams.original_insns = original_insns;
3280   sparams.used_regs = used_regs;
3281 
3282   /* Set the appropriate hooks and data.  */
3283   code_motion_path_driver_info = &fur_hooks;
3284 
3285   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
3286 
3287   reg_rename_p->crosses_call |= sparams.crosses_call;
3288 
3289   gcc_assert (res == 1);
3290   gcc_assert (original_insns && *original_insns);
3291 
3292   /* ??? We calculate whether an expression needs a check when computing
3293      av sets.  This information is not as precise as it could be due to
3294      merging this bit in merge_expr.  We can do better in find_used_regs,
3295      but we want to avoid multiple traversals of the same code motion
3296      paths.  */
3297   FOR_EACH_EXPR (expr, expr_iter, orig_ops)
3298     needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
3299 
3300   /* Mark hardware regs in REG_RENAME_P that are not suitable
3301      for renaming expr in INSN due to hardware restrictions (register class,
3302      modes compatibility etc).  */
3303   FOR_EACH_DEF (def, i, *original_insns)
3304     {
3305       vinsn_t vinsn = INSN_VINSN (def->orig_insn);
3306 
3307       if (VINSN_SEPARABLE_P (vinsn))
3308 	mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
3309 
3310       /* Do not allow clobbering of ld.[sa] address in case some of the
3311          original operations need a check.  */
3312       if (needs_spec_check_p)
3313 	IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
3314     }
3315 
3316   return true;
3317 }
3318 
3319 
3320 /* Functions to choose the best insn from available ones.  */
3321 
3322 /* Adjusts the priority for EXPR using the backend *_adjust_priority hook.  */
3323 static int
sel_target_adjust_priority(expr_t expr)3324 sel_target_adjust_priority (expr_t expr)
3325 {
3326   int priority = EXPR_PRIORITY (expr);
3327   int new_priority;
3328 
3329   if (targetm.sched.adjust_priority)
3330     new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority);
3331   else
3332     new_priority = priority;
3333 
3334   /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly.  */
3335   EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
3336 
3337   if (sched_verbose >= 4)
3338     sel_print ("sel_target_adjust_priority: insn %d,  %d+%d = %d.\n",
3339 	       INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
3340 	       EXPR_PRIORITY_ADJ (expr), new_priority);
3341 
3342   return new_priority;
3343 }
3344 
3345 /* Rank two available exprs for schedule.  Never return 0 here.  */
3346 static int
sel_rank_for_schedule(const void * x,const void * y)3347 sel_rank_for_schedule (const void *x, const void *y)
3348 {
3349   expr_t tmp = *(const expr_t *) y;
3350   expr_t tmp2 = *(const expr_t *) x;
3351   insn_t tmp_insn, tmp2_insn;
3352   vinsn_t tmp_vinsn, tmp2_vinsn;
3353   int val;
3354 
3355   tmp_vinsn = EXPR_VINSN (tmp);
3356   tmp2_vinsn = EXPR_VINSN (tmp2);
3357   tmp_insn = EXPR_INSN_RTX (tmp);
3358   tmp2_insn = EXPR_INSN_RTX (tmp2);
3359 
3360   /* Schedule debug insns as early as possible.  */
3361   if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
3362     return -1;
3363   else if (DEBUG_INSN_P (tmp2_insn))
3364     return 1;
3365 
3366   /* Prefer SCHED_GROUP_P insns to any others.  */
3367   if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
3368     {
3369       if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
3370         return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
3371 
3372       /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
3373          cannot be cloned.  */
3374       if (VINSN_UNIQUE_P (tmp2_vinsn))
3375         return 1;
3376       return -1;
3377     }
3378 
3379   /* Discourage scheduling of speculative checks.  */
3380   val = (sel_insn_is_speculation_check (tmp_insn)
3381 	 - sel_insn_is_speculation_check (tmp2_insn));
3382   if (val)
3383     return val;
3384 
3385   /* Prefer not scheduled insn over scheduled one.  */
3386   if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0)
3387     {
3388       val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2);
3389       if (val)
3390 	return val;
3391     }
3392 
3393   /* Prefer jump over non-jump instruction.  */
3394   if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn))
3395     return -1;
3396   else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
3397     return 1;
3398 
3399   /* Prefer an expr with non-zero usefulness.  */
3400   int u1 = EXPR_USEFULNESS (tmp), u2 = EXPR_USEFULNESS (tmp2);
3401 
3402   if (u1 == 0)
3403     {
3404       if (u2 == 0)
3405         u1 = u2 = 1;
3406       else
3407         return 1;
3408     }
3409   else if (u2 == 0)
3410     return -1;
3411 
3412   /* Prefer an expr with greater priority.  */
3413   val = (u2 * (EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2))
3414          - u1 * (EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp)));
3415   if (val)
3416     return val;
3417 
3418   if (spec_info != NULL && spec_info->mask != 0)
3419     /* This code was taken from haifa-sched.c: rank_for_schedule ().  */
3420     {
3421       ds_t ds1, ds2;
3422       dw_t dw1, dw2;
3423       int dw;
3424 
3425       ds1 = EXPR_SPEC_DONE_DS (tmp);
3426       if (ds1)
3427 	dw1 = ds_weak (ds1);
3428       else
3429 	dw1 = NO_DEP_WEAK;
3430 
3431       ds2 = EXPR_SPEC_DONE_DS (tmp2);
3432       if (ds2)
3433 	dw2 = ds_weak (ds2);
3434       else
3435 	dw2 = NO_DEP_WEAK;
3436 
3437       dw = dw2 - dw1;
3438       if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
3439 	return dw;
3440     }
3441 
3442   /* Prefer an old insn to a bookkeeping insn.  */
3443   if (INSN_UID (tmp_insn) < first_emitted_uid
3444       && INSN_UID (tmp2_insn) >= first_emitted_uid)
3445     return -1;
3446   if (INSN_UID (tmp_insn) >= first_emitted_uid
3447       && INSN_UID (tmp2_insn) < first_emitted_uid)
3448     return 1;
3449 
3450   /* Prefer an insn with smaller UID, as a last resort.
3451      We can't safely use INSN_LUID as it is defined only for those insns
3452      that are in the stream.  */
3453   return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
3454 }
3455 
3456 /* Filter out expressions from av set pointed to by AV_PTR
3457    that are pipelined too many times.  */
3458 static void
process_pipelined_exprs(av_set_t * av_ptr)3459 process_pipelined_exprs (av_set_t *av_ptr)
3460 {
3461   expr_t expr;
3462   av_set_iterator si;
3463 
3464   /* Don't pipeline already pipelined code as that would increase
3465      number of unnecessary register moves.  */
3466   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3467     {
3468       if (EXPR_SCHED_TIMES (expr)
3469 	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
3470 	av_set_iter_remove (&si);
3471     }
3472 }
3473 
3474 /* Filter speculative insns from AV_PTR if we don't want them.  */
3475 static void
process_spec_exprs(av_set_t * av_ptr)3476 process_spec_exprs (av_set_t *av_ptr)
3477 {
3478   expr_t expr;
3479   av_set_iterator si;
3480 
3481   if (spec_info == NULL)
3482     return;
3483 
3484   /* Scan *AV_PTR to find out if we want to consider speculative
3485      instructions for scheduling.  */
3486   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3487     {
3488       ds_t ds;
3489 
3490       ds = EXPR_SPEC_DONE_DS (expr);
3491 
3492       /* The probability of a success is too low - don't speculate.  */
3493       if ((ds & SPECULATIVE)
3494           && (ds_weak (ds) < spec_info->data_weakness_cutoff
3495               || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff
3496 	      || (pipelining_p && false
3497 		  && (ds & DATA_SPEC)
3498 		  && (ds & CONTROL_SPEC))))
3499         {
3500           av_set_iter_remove (&si);
3501           continue;
3502         }
3503     }
3504 }
3505 
3506 /* Search for any use-like insns in AV_PTR and decide on scheduling
3507    them.  Return one when found, and NULL otherwise.
3508    Note that we check here whether a USE could be scheduled to avoid
3509    an infinite loop later.  */
3510 static expr_t
process_use_exprs(av_set_t * av_ptr)3511 process_use_exprs (av_set_t *av_ptr)
3512 {
3513   expr_t expr;
3514   av_set_iterator si;
3515   bool uses_present_p = false;
3516   bool try_uses_p = true;
3517 
3518   FOR_EACH_EXPR_1 (expr, si, av_ptr)
3519     {
3520       /* This will also initialize INSN_CODE for later use.  */
3521       if (recog_memoized (EXPR_INSN_RTX (expr)) < 0)
3522         {
3523           /* If we have a USE in *AV_PTR that was not scheduled yet,
3524              do so because it will do good only.  */
3525           if (EXPR_SCHED_TIMES (expr) <= 0)
3526             {
3527               if (EXPR_TARGET_AVAILABLE (expr) == 1)
3528                 return expr;
3529 
3530               av_set_iter_remove (&si);
3531             }
3532           else
3533             {
3534               gcc_assert (pipelining_p);
3535 
3536               uses_present_p = true;
3537             }
3538         }
3539       else
3540         try_uses_p = false;
3541     }
3542 
3543   if (uses_present_p)
3544     {
3545       /* If we don't want to schedule any USEs right now and we have some
3546            in *AV_PTR, remove them, else just return the first one found.  */
3547       if (!try_uses_p)
3548         {
3549           FOR_EACH_EXPR_1 (expr, si, av_ptr)
3550             if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)
3551               av_set_iter_remove (&si);
3552         }
3553       else
3554         {
3555           FOR_EACH_EXPR_1 (expr, si, av_ptr)
3556             {
3557               gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0);
3558 
3559               if (EXPR_TARGET_AVAILABLE (expr) == 1)
3560                 return expr;
3561 
3562               av_set_iter_remove (&si);
3563             }
3564         }
3565     }
3566 
3567   return NULL;
3568 }
3569 
3570 /* Lookup EXPR in VINSN_VEC and return TRUE if found.  Also check patterns from
3571    EXPR's history of changes.  */
3572 static bool
vinsn_vec_has_expr_p(vinsn_vec_t vinsn_vec,expr_t expr)3573 vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
3574 {
3575   vinsn_t vinsn, expr_vinsn;
3576   int n;
3577   unsigned i;
3578 
3579   /* Start with checking expr itself and then proceed with all the old forms
3580      of expr taken from its history vector.  */
3581   for (i = 0, expr_vinsn = EXPR_VINSN (expr);
3582        expr_vinsn;
3583        expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr).length ()
3584 		     ? EXPR_HISTORY_OF_CHANGES (expr)[i++].old_expr_vinsn
3585 		     : NULL))
3586     FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn)
3587       if (VINSN_SEPARABLE_P (vinsn))
3588 	{
3589 	  if (vinsn_equal_p (vinsn, expr_vinsn))
3590 	    return true;
3591 	}
3592       else
3593 	{
3594 	  /* For non-separable instructions, the blocking insn can have
3595 	     another pattern due to substitution, and we can't choose
3596 	     different register as in the above case.  Check all registers
3597 	     being written instead.  */
3598 	  if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
3599 				  VINSN_REG_SETS (expr_vinsn)))
3600 	    return true;
3601 	}
3602 
3603   return false;
3604 }
3605 
3606 /* Return true if either of expressions from ORIG_OPS can be blocked
3607    by previously created bookkeeping code.  STATIC_PARAMS points to static
3608    parameters of move_op.  */
3609 static bool
av_set_could_be_blocked_by_bookkeeping_p(av_set_t orig_ops,void * static_params)3610 av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params)
3611 {
3612   expr_t expr;
3613   av_set_iterator iter;
3614   moveop_static_params_p sparams;
3615 
3616   /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping
3617      created while scheduling on another fence.  */
3618   FOR_EACH_EXPR (expr, iter, orig_ops)
3619     if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3620       return true;
3621 
3622   gcc_assert (code_motion_path_driver_info == &move_op_hooks);
3623   sparams = (moveop_static_params_p) static_params;
3624 
3625   /* Expressions can be also blocked by bookkeeping created during current
3626      move_op.  */
3627   if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn)))
3628     FOR_EACH_EXPR (expr, iter, orig_ops)
3629       if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL)
3630         return true;
3631 
3632   /* Expressions in ORIG_OPS may have wrong destination register due to
3633      renaming.  Check with the right register instead.  */
3634   if (sparams->dest && REG_P (sparams->dest))
3635     {
3636       rtx reg = sparams->dest;
3637       vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
3638 
3639       if (register_unavailable_p (VINSN_REG_SETS (failed_vinsn), reg)
3640 	  || register_unavailable_p (VINSN_REG_USES (failed_vinsn), reg)
3641 	  || register_unavailable_p (VINSN_REG_CLOBBERS (failed_vinsn), reg))
3642 	return true;
3643     }
3644 
3645   return false;
3646 }
3647 
3648 /* Clear VINSN_VEC and detach vinsns.  */
3649 static void
vinsn_vec_clear(vinsn_vec_t * vinsn_vec)3650 vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
3651 {
3652   unsigned len = vinsn_vec->length ();
3653   if (len > 0)
3654     {
3655       vinsn_t vinsn;
3656       int n;
3657 
3658       FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn)
3659         vinsn_detach (vinsn);
3660       vinsn_vec->block_remove (0, len);
3661     }
3662 }
3663 
3664 /* Add the vinsn of EXPR to the VINSN_VEC.  */
3665 static void
vinsn_vec_add(vinsn_vec_t * vinsn_vec,expr_t expr)3666 vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
3667 {
3668   vinsn_attach (EXPR_VINSN (expr));
3669   vinsn_vec->safe_push (EXPR_VINSN (expr));
3670 }
3671 
3672 /* Free the vector representing blocked expressions.  */
3673 static void
vinsn_vec_free(vinsn_vec_t & vinsn_vec)3674 vinsn_vec_free (vinsn_vec_t &vinsn_vec)
3675 {
3676   vinsn_vec.release ();
3677 }
3678 
3679 /* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT.  */
3680 
sel_add_to_insn_priority(rtx insn,int amount)3681 void sel_add_to_insn_priority (rtx insn, int amount)
3682 {
3683   EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
3684 
3685   if (sched_verbose >= 2)
3686     sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
3687 	       INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
3688 	       EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
3689 }
3690 
3691 /* Turn AV into a vector, filter inappropriate insns and sort it.  Return
3692    true if there is something to schedule.  BNDS and FENCE are current
3693    boundaries and fence, respectively.  If we need to stall for some cycles
3694    before an expr from AV would become available, write this number to
3695    *PNEED_STALL.  */
3696 static bool
fill_vec_av_set(av_set_t av,blist_t bnds,fence_t fence,int * pneed_stall)3697 fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
3698                  int *pneed_stall)
3699 {
3700   av_set_iterator si;
3701   expr_t expr;
3702   int sched_next_worked = 0, stalled, n;
3703   static int av_max_prio, est_ticks_till_branch;
3704   int min_need_stall = -1;
3705   deps_t dc = BND_DC (BLIST_BND (bnds));
3706 
3707   /* Bail out early when the ready list contained only USEs/CLOBBERs that are
3708      already scheduled.  */
3709   if (av == NULL)
3710     return false;
3711 
3712   /* Empty vector from the previous stuff.  */
3713   if (vec_av_set.length () > 0)
3714     vec_av_set.block_remove (0, vec_av_set.length ());
3715 
3716   /* Turn the set into a vector for sorting and call sel_target_adjust_priority
3717      for each insn.  */
3718   gcc_assert (vec_av_set.is_empty ());
3719   FOR_EACH_EXPR (expr, si, av)
3720     {
3721       vec_av_set.safe_push (expr);
3722 
3723       gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
3724 
3725       /* Adjust priority using target backend hook.  */
3726       sel_target_adjust_priority (expr);
3727     }
3728 
3729   /* Sort the vector.  */
3730   vec_av_set.qsort (sel_rank_for_schedule);
3731 
3732   /* We record maximal priority of insns in av set for current instruction
3733      group.  */
3734   if (FENCE_STARTS_CYCLE_P (fence))
3735     av_max_prio = est_ticks_till_branch = INT_MIN;
3736 
3737   /* Filter out inappropriate expressions.  Loop's direction is reversed to
3738      visit "best" instructions first.  We assume that vec::unordered_remove
3739      moves last element in place of one being deleted.  */
3740   for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--)
3741     {
3742       expr_t expr = vec_av_set[n];
3743       insn_t insn = EXPR_INSN_RTX (expr);
3744       signed char target_available;
3745       bool is_orig_reg_p = true;
3746       int need_cycles, new_prio;
3747       bool fence_insn_p = INSN_UID (insn) == INSN_UID (FENCE_INSN (fence));
3748 
3749       /* Don't allow any insns other than from SCHED_GROUP if we have one.  */
3750       if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
3751         {
3752           vec_av_set.unordered_remove (n);
3753           continue;
3754         }
3755 
3756       /* Set number of sched_next insns (just in case there
3757          could be several).  */
3758       if (FENCE_SCHED_NEXT (fence))
3759         sched_next_worked++;
3760 
3761       /* Check all liveness requirements and try renaming.
3762          FIXME: try to minimize calls to this.  */
3763       target_available = EXPR_TARGET_AVAILABLE (expr);
3764 
3765       /* If insn was already scheduled on the current fence,
3766 	 set TARGET_AVAILABLE to -1 no matter what expr's attribute says.  */
3767       if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr)
3768 	  && !fence_insn_p)
3769 	target_available = -1;
3770 
3771       /* If the availability of the EXPR is invalidated by the insertion of
3772 	 bookkeeping earlier, make sure that we won't choose this expr for
3773 	 scheduling if it's not separable, and if it is separable, then
3774 	 we have to recompute the set of available registers for it.  */
3775       if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3776 	{
3777           vec_av_set.unordered_remove (n);
3778           if (sched_verbose >= 4)
3779             sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
3780                        INSN_UID (insn));
3781           continue;
3782         }
3783 
3784       if (target_available == true)
3785 	{
3786           /* Do nothing -- we can use an existing register.  */
3787 	  is_orig_reg_p = EXPR_SEPARABLE_P (expr);
3788         }
3789       else if (/* Non-separable instruction will never
3790                   get another register. */
3791                (target_available == false
3792                 && !EXPR_SEPARABLE_P (expr))
3793                /* Don't try to find a register for low-priority expression.  */
3794                || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename
3795                /* ??? FIXME: Don't try to rename data speculation.  */
3796                || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
3797                || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
3798         {
3799           vec_av_set.unordered_remove (n);
3800           if (sched_verbose >= 4)
3801             sel_print ("Expr %d has no suitable target register\n",
3802                        INSN_UID (insn));
3803 
3804 	  /* A fence insn should not get here.  */
3805 	  gcc_assert (!fence_insn_p);
3806 	  continue;
3807         }
3808 
3809       /* At this point a fence insn should always be available.  */
3810       gcc_assert (!fence_insn_p
3811 		  || INSN_UID (FENCE_INSN (fence)) == INSN_UID (EXPR_INSN_RTX (expr)));
3812 
3813       /* Filter expressions that need to be renamed or speculated when
3814 	 pipelining, because compensating register copies or speculation
3815 	 checks are likely to be placed near the beginning of the loop,
3816 	 causing a stall.  */
3817       if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0
3818 	  && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0))
3819 	{
3820 	  /* Estimation of number of cycles until loop branch for
3821 	     renaming/speculation to be successful.  */
3822 	  int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr));
3823 
3824 	  if ((int) current_loop_nest->ninsns < 9)
3825 	    {
3826 	      vec_av_set.unordered_remove (n);
3827 	      if (sched_verbose >= 4)
3828 		sel_print ("Pipelining expr %d will likely cause stall\n",
3829 			   INSN_UID (insn));
3830 	      continue;
3831 	    }
3832 
3833 	  if ((int) current_loop_nest->ninsns - num_insns_scheduled
3834 	      < need_n_ticks_till_branch * issue_rate / 2
3835 	      && est_ticks_till_branch < need_n_ticks_till_branch)
3836 	     {
3837 	       vec_av_set.unordered_remove (n);
3838 	       if (sched_verbose >= 4)
3839 		 sel_print ("Pipelining expr %d will likely cause stall\n",
3840 			    INSN_UID (insn));
3841 	       continue;
3842 	     }
3843 	}
3844 
3845       /* We want to schedule speculation checks as late as possible.  Discard
3846 	 them from av set if there are instructions with higher priority.  */
3847       if (sel_insn_is_speculation_check (insn)
3848 	  && EXPR_PRIORITY (expr) < av_max_prio)
3849 	{
3850           stalled++;
3851           min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
3852           vec_av_set.unordered_remove (n);
3853 	  if (sched_verbose >= 4)
3854 	    sel_print ("Delaying speculation check %d until its first use\n",
3855 		       INSN_UID (insn));
3856 	  continue;
3857 	}
3858 
3859       /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO.  */
3860       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3861 	av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr));
3862 
3863       /* Don't allow any insns whose data is not yet ready.
3864          Check first whether we've already tried them and failed.  */
3865       if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
3866 	{
3867           need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3868 			 - FENCE_CYCLE (fence));
3869 	  if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3870 	    est_ticks_till_branch = MAX (est_ticks_till_branch,
3871 					 EXPR_PRIORITY (expr) + need_cycles);
3872 
3873 	  if (need_cycles > 0)
3874 	    {
3875 	      stalled++;
3876 	      min_need_stall = (min_need_stall < 0
3877 				? need_cycles
3878 				: MIN (min_need_stall, need_cycles));
3879 	      vec_av_set.unordered_remove (n);
3880 
3881 	      if (sched_verbose >= 4)
3882 		sel_print ("Expr %d is not ready until cycle %d (cached)\n",
3883 			   INSN_UID (insn),
3884 			   FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3885 	      continue;
3886 	    }
3887 	}
3888 
3889       /* Now resort to dependence analysis to find whether EXPR might be
3890          stalled due to dependencies from FENCE's context.  */
3891       need_cycles = tick_check_p (expr, dc, fence);
3892       new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
3893 
3894       if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3895 	est_ticks_till_branch = MAX (est_ticks_till_branch,
3896 				     new_prio);
3897 
3898       if (need_cycles > 0)
3899         {
3900           if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
3901             {
3902               int new_size = INSN_UID (insn) * 3 / 2;
3903 
3904               FENCE_READY_TICKS (fence)
3905                 = (int *) xrecalloc (FENCE_READY_TICKS (fence),
3906                                      new_size, FENCE_READY_TICKS_SIZE (fence),
3907                                      sizeof (int));
3908             }
3909           FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3910             = FENCE_CYCLE (fence) + need_cycles;
3911 
3912           stalled++;
3913           min_need_stall = (min_need_stall < 0
3914                             ? need_cycles
3915                             : MIN (min_need_stall, need_cycles));
3916 
3917           vec_av_set.unordered_remove (n);
3918 
3919           if (sched_verbose >= 4)
3920             sel_print ("Expr %d is not ready yet until cycle %d\n",
3921                        INSN_UID (insn),
3922                        FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3923           continue;
3924         }
3925 
3926       if (sched_verbose >= 4)
3927         sel_print ("Expr %d is ok\n", INSN_UID (insn));
3928       min_need_stall = 0;
3929     }
3930 
3931   /* Clear SCHED_NEXT.  */
3932   if (FENCE_SCHED_NEXT (fence))
3933     {
3934       gcc_assert (sched_next_worked == 1);
3935       FENCE_SCHED_NEXT (fence) = NULL;
3936     }
3937 
3938   /* No need to stall if this variable was not initialized.  */
3939   if (min_need_stall < 0)
3940     min_need_stall = 0;
3941 
3942   if (vec_av_set.is_empty ())
3943     {
3944       /* We need to set *pneed_stall here, because later we skip this code
3945          when ready list is empty.  */
3946       *pneed_stall = min_need_stall;
3947       return false;
3948     }
3949   else
3950     gcc_assert (min_need_stall == 0);
3951 
3952   /* Sort the vector.  */
3953   vec_av_set.qsort (sel_rank_for_schedule);
3954 
3955   if (sched_verbose >= 4)
3956     {
3957       sel_print ("Total ready exprs: %d, stalled: %d\n",
3958                  vec_av_set.length (), stalled);
3959       sel_print ("Sorted av set (%d): ", vec_av_set.length ());
3960       FOR_EACH_VEC_ELT (vec_av_set, n, expr)
3961         dump_expr (expr);
3962       sel_print ("\n");
3963     }
3964 
3965   *pneed_stall = 0;
3966   return true;
3967 }
3968 
3969 /* Convert a vectored and sorted av set to the ready list that
3970    the rest of the backend wants to see.  */
3971 static void
convert_vec_av_set_to_ready(void)3972 convert_vec_av_set_to_ready (void)
3973 {
3974   int n;
3975   expr_t expr;
3976 
3977   /* Allocate and fill the ready list from the sorted vector.  */
3978   ready.n_ready = vec_av_set.length ();
3979   ready.first = ready.n_ready - 1;
3980 
3981   gcc_assert (ready.n_ready > 0);
3982 
3983   if (ready.n_ready > max_issue_size)
3984     {
3985       max_issue_size = ready.n_ready;
3986       sched_extend_ready_list (ready.n_ready);
3987     }
3988 
3989   FOR_EACH_VEC_ELT (vec_av_set, n, expr)
3990     {
3991       vinsn_t vi = EXPR_VINSN (expr);
3992       insn_t insn = VINSN_INSN_RTX (vi);
3993 
3994       ready_try[n] = 0;
3995       ready.vec[n] = insn;
3996     }
3997 }
3998 
3999 /* Initialize ready list from *AV_PTR for the max_issue () call.
4000    If any unrecognizable insn found in *AV_PTR, return it (and skip
4001    max_issue).  BND and FENCE are current boundary and fence,
4002    respectively.  If we need to stall for some cycles before an expr
4003    from *AV_PTR would become available, write this number to *PNEED_STALL.  */
4004 static expr_t
fill_ready_list(av_set_t * av_ptr,blist_t bnds,fence_t fence,int * pneed_stall)4005 fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
4006                  int *pneed_stall)
4007 {
4008   expr_t expr;
4009 
4010   /* We do not support multiple boundaries per fence.  */
4011   gcc_assert (BLIST_NEXT (bnds) == NULL);
4012 
4013   /* Process expressions required special handling, i.e.  pipelined,
4014      speculative and recog() < 0 expressions first.  */
4015   process_pipelined_exprs (av_ptr);
4016   process_spec_exprs (av_ptr);
4017 
4018   /* A USE could be scheduled immediately.  */
4019   expr = process_use_exprs (av_ptr);
4020   if (expr)
4021     {
4022       *pneed_stall = 0;
4023       return expr;
4024     }
4025 
4026   /* Turn the av set to a vector for sorting.  */
4027   if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall))
4028     {
4029       ready.n_ready = 0;
4030       return NULL;
4031     }
4032 
4033   /* Build the final ready list.  */
4034   convert_vec_av_set_to_ready ();
4035   return NULL;
4036 }
4037 
4038 /* Wrapper for dfa_new_cycle ().  Returns TRUE if cycle was advanced.  */
4039 static bool
sel_dfa_new_cycle(insn_t insn,fence_t fence)4040 sel_dfa_new_cycle (insn_t insn, fence_t fence)
4041 {
4042   int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
4043                              ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
4044                              : FENCE_CYCLE (fence) - 1;
4045   bool res = false;
4046   int sort_p = 0;
4047 
4048   if (!targetm.sched.dfa_new_cycle)
4049     return false;
4050 
4051   memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4052 
4053   while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4054                                                  insn, last_scheduled_cycle,
4055                                                  FENCE_CYCLE (fence), &sort_p))
4056     {
4057       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4058       advance_one_cycle (fence);
4059       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4060       res = true;
4061     }
4062 
4063   return res;
4064 }
4065 
4066 /* Invoke reorder* target hooks on the ready list.  Return the number of insns
4067    we can issue.  FENCE is the current fence.  */
4068 static int
invoke_reorder_hooks(fence_t fence)4069 invoke_reorder_hooks (fence_t fence)
4070 {
4071   int issue_more;
4072   bool ran_hook = false;
4073 
4074   /* Call the reorder hook at the beginning of the cycle, and call
4075      the reorder2 hook in the middle of the cycle.  */
4076   if (FENCE_ISSUED_INSNS (fence) == 0)
4077     {
4078       if (targetm.sched.reorder
4079           && !SCHED_GROUP_P (ready_element (&ready, 0))
4080           && ready.n_ready > 1)
4081         {
4082           /* Don't give reorder the most prioritized insn as it can break
4083              pipelining.  */
4084           if (pipelining_p)
4085             --ready.n_ready;
4086 
4087           issue_more
4088             = targetm.sched.reorder (sched_dump, sched_verbose,
4089                                      ready_lastpos (&ready),
4090                                      &ready.n_ready, FENCE_CYCLE (fence));
4091 
4092           if (pipelining_p)
4093             ++ready.n_ready;
4094 
4095           ran_hook = true;
4096         }
4097       else
4098         /* Initialize can_issue_more for variable_issue.  */
4099         issue_more = issue_rate;
4100     }
4101   else if (targetm.sched.reorder2
4102            && !SCHED_GROUP_P (ready_element (&ready, 0)))
4103     {
4104       if (ready.n_ready == 1)
4105         issue_more =
4106           targetm.sched.reorder2 (sched_dump, sched_verbose,
4107                                   ready_lastpos (&ready),
4108                                   &ready.n_ready, FENCE_CYCLE (fence));
4109       else
4110         {
4111           if (pipelining_p)
4112             --ready.n_ready;
4113 
4114           issue_more =
4115             targetm.sched.reorder2 (sched_dump, sched_verbose,
4116                                     ready.n_ready
4117                                     ? ready_lastpos (&ready) : NULL,
4118                                     &ready.n_ready, FENCE_CYCLE (fence));
4119 
4120           if (pipelining_p)
4121             ++ready.n_ready;
4122         }
4123 
4124       ran_hook = true;
4125     }
4126   else
4127     issue_more = FENCE_ISSUE_MORE (fence);
4128 
4129   /* Ensure that ready list and vec_av_set are in line with each other,
4130      i.e. vec_av_set[i] == ready_element (&ready, i).  */
4131   if (issue_more && ran_hook)
4132     {
4133       int i, j, n;
4134       rtx_insn **arr = ready.vec;
4135       expr_t *vec = vec_av_set.address ();
4136 
4137       for (i = 0, n = ready.n_ready; i < n; i++)
4138         if (EXPR_INSN_RTX (vec[i]) != arr[i])
4139           {
4140             for (j = i; j < n; j++)
4141               if (EXPR_INSN_RTX (vec[j]) == arr[i])
4142                 break;
4143             gcc_assert (j < n);
4144 
4145 	    std::swap (vec[i], vec[j]);
4146           }
4147     }
4148 
4149   return issue_more;
4150 }
4151 
4152 /* Return an EXPR corresponding to INDEX element of ready list, if
4153    FOLLOW_READY_ELEMENT is true (i.e., an expr of
4154    ready_element (&ready, INDEX) will be returned), and to INDEX element of
4155    ready.vec otherwise.  */
4156 static inline expr_t
find_expr_for_ready(int index,bool follow_ready_element)4157 find_expr_for_ready (int index, bool follow_ready_element)
4158 {
4159   expr_t expr;
4160   int real_index;
4161 
4162   real_index = follow_ready_element ? ready.first - index : index;
4163 
4164   expr = vec_av_set[real_index];
4165   gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
4166 
4167   return expr;
4168 }
4169 
4170 /* Calculate insns worth trying via lookahead_guard hook.  Return a number
4171    of such insns found.  */
4172 static int
invoke_dfa_lookahead_guard(void)4173 invoke_dfa_lookahead_guard (void)
4174 {
4175   int i, n;
4176   bool have_hook
4177     = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
4178 
4179   if (sched_verbose >= 2)
4180     sel_print ("ready after reorder: ");
4181 
4182   for (i = 0, n = 0; i < ready.n_ready; i++)
4183     {
4184       expr_t expr;
4185       insn_t insn;
4186       int r;
4187 
4188       /* In this loop insn is Ith element of the ready list given by
4189          ready_element, not Ith element of ready.vec.  */
4190       insn = ready_element (&ready, i);
4191 
4192       if (! have_hook || i == 0)
4193         r = 0;
4194       else
4195         r = targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn, i);
4196 
4197       gcc_assert (INSN_CODE (insn) >= 0);
4198 
4199       /* Only insns with ready_try = 0 can get here
4200          from fill_ready_list.  */
4201       gcc_assert (ready_try [i] == 0);
4202       ready_try[i] = r;
4203       if (!r)
4204         n++;
4205 
4206       expr = find_expr_for_ready (i, true);
4207 
4208       if (sched_verbose >= 2)
4209         {
4210           dump_vinsn (EXPR_VINSN (expr));
4211           sel_print (":%d; ", ready_try[i]);
4212         }
4213     }
4214 
4215   if (sched_verbose >= 2)
4216     sel_print ("\n");
4217   return n;
4218 }
4219 
4220 /* Calculate the number of privileged insns and return it.  */
4221 static int
calculate_privileged_insns(void)4222 calculate_privileged_insns (void)
4223 {
4224   expr_t cur_expr, min_spec_expr = NULL;
4225   int privileged_n = 0, i;
4226 
4227   for (i = 0; i < ready.n_ready; i++)
4228     {
4229       if (ready_try[i])
4230         continue;
4231 
4232       if (! min_spec_expr)
4233 	min_spec_expr = find_expr_for_ready (i, true);
4234 
4235       cur_expr = find_expr_for_ready (i, true);
4236 
4237       if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr))
4238         break;
4239 
4240       ++privileged_n;
4241     }
4242 
4243   if (i == ready.n_ready)
4244     privileged_n = 0;
4245 
4246   if (sched_verbose >= 2)
4247     sel_print ("privileged_n: %d insns with SPEC %d\n",
4248                privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1);
4249   return privileged_n;
4250 }
4251 
4252 /* Call the rest of the hooks after the choice was made.  Return
4253    the number of insns that still can be issued given that the current
4254    number is ISSUE_MORE.  FENCE and BEST_INSN are the current fence
4255    and the insn chosen for scheduling, respectively.  */
4256 static int
invoke_aftermath_hooks(fence_t fence,rtx_insn * best_insn,int issue_more)4257 invoke_aftermath_hooks (fence_t fence, rtx_insn *best_insn, int issue_more)
4258 {
4259   gcc_assert (INSN_P (best_insn));
4260 
4261   /* First, call dfa_new_cycle, and then variable_issue, if available.  */
4262   sel_dfa_new_cycle (best_insn, fence);
4263 
4264   if (targetm.sched.variable_issue)
4265     {
4266       memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4267       issue_more =
4268         targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
4269                                       issue_more);
4270       memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4271     }
4272   else if (!DEBUG_INSN_P (best_insn)
4273 	   && GET_CODE (PATTERN (best_insn)) != USE
4274 	   && GET_CODE (PATTERN (best_insn)) != CLOBBER)
4275     issue_more--;
4276 
4277   return issue_more;
4278 }
4279 
4280 /* Estimate the cost of issuing INSN on DFA state STATE.  */
4281 static int
estimate_insn_cost(rtx_insn * insn,state_t state)4282 estimate_insn_cost (rtx_insn *insn, state_t state)
4283 {
4284   static state_t temp = NULL;
4285   int cost;
4286 
4287   if (!temp)
4288     temp = xmalloc (dfa_state_size);
4289 
4290   memcpy (temp, state, dfa_state_size);
4291   cost = state_transition (temp, insn);
4292 
4293   if (cost < 0)
4294     return 0;
4295   else if (cost == 0)
4296     return 1;
4297   return cost;
4298 }
4299 
4300 /* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
4301    This function properly handles ASMs, USEs etc.  */
4302 static int
get_expr_cost(expr_t expr,fence_t fence)4303 get_expr_cost (expr_t expr, fence_t fence)
4304 {
4305   rtx_insn *insn = EXPR_INSN_RTX (expr);
4306 
4307   if (recog_memoized (insn) < 0)
4308     {
4309       if (!FENCE_STARTS_CYCLE_P (fence)
4310 	  && INSN_ASM_P (insn))
4311 	/* This is asm insn which is tryed to be issued on the
4312 	   cycle not first.  Issue it on the next cycle.  */
4313 	return 1;
4314       else
4315 	/* A USE insn, or something else we don't need to
4316 	   understand.  We can't pass these directly to
4317 	   state_transition because it will trigger a
4318 	   fatal error for unrecognizable insns.  */
4319 	return 0;
4320     }
4321   else
4322     return estimate_insn_cost (insn, FENCE_STATE (fence));
4323 }
4324 
4325 /* Find the best insn for scheduling, either via max_issue or just take
4326    the most prioritized available.  */
4327 static int
choose_best_insn(fence_t fence,int privileged_n,int * index)4328 choose_best_insn (fence_t fence, int privileged_n, int *index)
4329 {
4330   int can_issue = 0;
4331 
4332   if (dfa_lookahead > 0)
4333     {
4334       cycle_issued_insns = FENCE_ISSUED_INSNS (fence);
4335       /* TODO: pass equivalent of first_cycle_insn_p to max_issue ().  */
4336       can_issue = max_issue (&ready, privileged_n,
4337                              FENCE_STATE (fence), true, index);
4338       if (sched_verbose >= 2)
4339         sel_print ("max_issue: we can issue %d insns, already did %d insns\n",
4340                    can_issue, FENCE_ISSUED_INSNS (fence));
4341     }
4342   else
4343     {
4344       /* We can't use max_issue; just return the first available element.  */
4345       int i;
4346 
4347       for (i = 0; i < ready.n_ready; i++)
4348 	{
4349 	  expr_t expr = find_expr_for_ready (i, true);
4350 
4351 	  if (get_expr_cost (expr, fence) < 1)
4352 	    {
4353 	      can_issue = can_issue_more;
4354 	      *index = i;
4355 
4356 	      if (sched_verbose >= 2)
4357 		sel_print ("using %dth insn from the ready list\n", i + 1);
4358 
4359 	      break;
4360 	    }
4361 	}
4362 
4363       if (i == ready.n_ready)
4364 	{
4365 	  can_issue = 0;
4366 	  *index = -1;
4367 	}
4368     }
4369 
4370   return can_issue;
4371 }
4372 
4373 /* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
4374    BNDS and FENCE are current boundaries and scheduling fence respectively.
4375    Return the expr found and NULL if nothing can be issued atm.
4376    Write to PNEED_STALL the number of cycles to stall if no expr was found.  */
4377 static expr_t
find_best_expr(av_set_t * av_vliw_ptr,blist_t bnds,fence_t fence,int * pneed_stall)4378 find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
4379                 int *pneed_stall)
4380 {
4381   expr_t best;
4382 
4383   /* Choose the best insn for scheduling via:
4384      1) sorting the ready list based on priority;
4385      2) calling the reorder hook;
4386      3) calling max_issue.  */
4387   best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall);
4388   if (best == NULL && ready.n_ready > 0)
4389     {
4390       int privileged_n, index;
4391 
4392       can_issue_more = invoke_reorder_hooks (fence);
4393       if (can_issue_more > 0)
4394         {
4395           /* Try choosing the best insn until we find one that is could be
4396              scheduled due to liveness restrictions on its destination register.
4397              In the future, we'd like to choose once and then just probe insns
4398              in the order of their priority.  */
4399           invoke_dfa_lookahead_guard ();
4400           privileged_n = calculate_privileged_insns ();
4401           can_issue_more = choose_best_insn (fence, privileged_n, &index);
4402           if (can_issue_more)
4403             best = find_expr_for_ready (index, true);
4404         }
4405       /* We had some available insns, so if we can't issue them,
4406          we have a stall.  */
4407       if (can_issue_more == 0)
4408         {
4409           best = NULL;
4410           *pneed_stall = 1;
4411         }
4412     }
4413 
4414   if (best != NULL)
4415     {
4416       can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
4417                                                can_issue_more);
4418       if (targetm.sched.variable_issue
4419 	  && can_issue_more == 0)
4420         *pneed_stall = 1;
4421     }
4422 
4423   if (sched_verbose >= 2)
4424     {
4425       if (best != NULL)
4426         {
4427           sel_print ("Best expression (vliw form): ");
4428           dump_expr (best);
4429           sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
4430         }
4431       else
4432         sel_print ("No best expr found!\n");
4433     }
4434 
4435   return best;
4436 }
4437 
4438 
4439 /* Functions that implement the core of the scheduler.  */
4440 
4441 
4442 /* Emit an instruction from EXPR with SEQNO and VINSN after
4443    PLACE_TO_INSERT.  */
4444 static insn_t
emit_insn_from_expr_after(expr_t expr,vinsn_t vinsn,int seqno,insn_t place_to_insert)4445 emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
4446                            insn_t place_to_insert)
4447 {
4448   /* This assert fails when we have identical instructions
4449      one of which dominates the other.  In this case move_op ()
4450      finds the first instruction and doesn't search for second one.
4451      The solution would be to compute av_set after the first found
4452      insn and, if insn present in that set, continue searching.
4453      For now we workaround this issue in move_op.  */
4454   gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)));
4455 
4456   if (EXPR_WAS_RENAMED (expr))
4457     {
4458       unsigned regno = expr_dest_regno (expr);
4459 
4460       if (HARD_REGISTER_NUM_P (regno))
4461 	{
4462 	  df_set_regs_ever_live (regno, true);
4463 	  reg_rename_tick[regno] = ++reg_rename_this_tick;
4464 	}
4465     }
4466 
4467   return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
4468                                        place_to_insert);
4469 }
4470 
4471 /* Return TRUE if BB can hold bookkeeping code.  */
4472 static bool
block_valid_for_bookkeeping_p(basic_block bb)4473 block_valid_for_bookkeeping_p (basic_block bb)
4474 {
4475   insn_t bb_end = BB_END (bb);
4476 
4477   if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1)
4478     return false;
4479 
4480   if (INSN_P (bb_end))
4481     {
4482       if (INSN_SCHED_TIMES (bb_end) > 0)
4483 	return false;
4484     }
4485   else
4486     gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end));
4487 
4488   return true;
4489 }
4490 
4491 /* Attempt to find a block that can hold bookkeeping code for path(s) incoming
4492    into E2->dest, except from E1->src (there may be a sequence of empty basic
4493    blocks between E1->src and E2->dest).  Return found block, or NULL if new
4494    one must be created.  If LAX holds, don't assume there is a simple path
4495    from E1->src to E2->dest.  */
4496 static basic_block
find_block_for_bookkeeping(edge e1,edge e2,bool lax)4497 find_block_for_bookkeeping (edge e1, edge e2, bool lax)
4498 {
4499   basic_block candidate_block = NULL;
4500   edge e;
4501 
4502   /* Loop over edges from E1 to E2, inclusive.  */
4503   for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun); e =
4504        EDGE_SUCC (e->dest, 0))
4505     {
4506       if (EDGE_COUNT (e->dest->preds) == 2)
4507 	{
4508 	  if (candidate_block == NULL)
4509 	    candidate_block = (EDGE_PRED (e->dest, 0) == e
4510 			       ? EDGE_PRED (e->dest, 1)->src
4511 			       : EDGE_PRED (e->dest, 0)->src);
4512 	  else
4513 	    /* Found additional edge leading to path from e1 to e2
4514 	       from aside.  */
4515 	    return NULL;
4516 	}
4517       else if (EDGE_COUNT (e->dest->preds) > 2)
4518 	/* Several edges leading to path from e1 to e2 from aside.  */
4519 	return NULL;
4520 
4521       if (e == e2)
4522 	return ((!lax || candidate_block)
4523 		&& block_valid_for_bookkeeping_p (candidate_block)
4524 		? candidate_block
4525 		: NULL);
4526 
4527       if (lax && EDGE_COUNT (e->dest->succs) != 1)
4528 	return NULL;
4529     }
4530 
4531   if (lax)
4532     return NULL;
4533 
4534   gcc_unreachable ();
4535 }
4536 
4537 /* Create new basic block for bookkeeping code for path(s) incoming into
4538    E2->dest, except from E1->src.  Return created block.  */
4539 static basic_block
create_block_for_bookkeeping(edge e1,edge e2)4540 create_block_for_bookkeeping (edge e1, edge e2)
4541 {
4542   basic_block new_bb, bb = e2->dest;
4543 
4544   /* Check that we don't spoil the loop structure.  */
4545   if (current_loop_nest)
4546     {
4547       basic_block latch = current_loop_nest->latch;
4548 
4549       /* We do not split header.  */
4550       gcc_assert (e2->dest != current_loop_nest->header);
4551 
4552       /* We do not redirect the only edge to the latch block.  */
4553       gcc_assert (e1->dest != latch
4554 		  || !single_pred_p (latch)
4555 		  || e1 != single_pred_edge (latch));
4556     }
4557 
4558   /* Split BB to insert BOOK_INSN there.  */
4559   new_bb = sched_split_block (bb, NULL);
4560 
4561   /* Move note_list from the upper bb.  */
4562   gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX);
4563   BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb);
4564   BB_NOTE_LIST (bb) = NULL;
4565 
4566   gcc_assert (e2->dest == bb);
4567 
4568   /* Skip block for bookkeeping copy when leaving E1->src.  */
4569   if (e1->flags & EDGE_FALLTHRU)
4570     sel_redirect_edge_and_branch_force (e1, new_bb);
4571   else
4572     sel_redirect_edge_and_branch (e1, new_bb);
4573 
4574   gcc_assert (e1->dest == new_bb);
4575   gcc_assert (sel_bb_empty_p (bb));
4576 
4577   /* To keep basic block numbers in sync between debug and non-debug
4578      compilations, we have to rotate blocks here.  Consider that we
4579      started from (a,b)->d, (c,d)->e, and d contained only debug
4580      insns.  It would have been removed before if the debug insns
4581      weren't there, so we'd have split e rather than d.  So what we do
4582      now is to swap the block numbers of new_bb and
4583      single_succ(new_bb) == e, so that the insns that were in e before
4584      get the new block number.  */
4585 
4586   if (MAY_HAVE_DEBUG_INSNS)
4587     {
4588       basic_block succ;
4589       insn_t insn = sel_bb_head (new_bb);
4590       insn_t last;
4591 
4592       if (DEBUG_INSN_P (insn)
4593 	  && single_succ_p (new_bb)
4594 	  && (succ = single_succ (new_bb))
4595 	  && succ != EXIT_BLOCK_PTR_FOR_FN (cfun)
4596 	  && DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
4597 	{
4598 	  while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4599 	    insn = NEXT_INSN (insn);
4600 
4601 	  if (insn == last)
4602 	    {
4603 	      sel_global_bb_info_def gbi;
4604 	      sel_region_bb_info_def rbi;
4605 
4606 	      if (sched_verbose >= 2)
4607 		sel_print ("Swapping block ids %i and %i\n",
4608 			   new_bb->index, succ->index);
4609 
4610 	      std::swap (new_bb->index, succ->index);
4611 
4612 	      SET_BASIC_BLOCK_FOR_FN (cfun, new_bb->index, new_bb);
4613 	      SET_BASIC_BLOCK_FOR_FN (cfun, succ->index, succ);
4614 
4615 	      memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
4616 	      memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
4617 		      sizeof (gbi));
4618 	      memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi));
4619 
4620 	      memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi));
4621 	      memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ),
4622 		      sizeof (rbi));
4623 	      memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi));
4624 
4625 	      std::swap (BLOCK_TO_BB (new_bb->index),
4626 			 BLOCK_TO_BB (succ->index));
4627 
4628 	      std::swap (CONTAINING_RGN (new_bb->index),
4629 			 CONTAINING_RGN (succ->index));
4630 
4631 	      for (int i = 0; i < current_nr_blocks; i++)
4632 		if (BB_TO_BLOCK (i) == succ->index)
4633 		  BB_TO_BLOCK (i) = new_bb->index;
4634 		else if (BB_TO_BLOCK (i) == new_bb->index)
4635 		  BB_TO_BLOCK (i) = succ->index;
4636 
4637 	      FOR_BB_INSNS (new_bb, insn)
4638 		if (INSN_P (insn))
4639 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
4640 
4641 	      FOR_BB_INSNS (succ, insn)
4642 		if (INSN_P (insn))
4643 		  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index;
4644 
4645 	      if (bitmap_clear_bit (code_motion_visited_blocks, new_bb->index))
4646 		bitmap_set_bit (code_motion_visited_blocks, succ->index);
4647 
4648 	      gcc_assert (LABEL_P (BB_HEAD (new_bb))
4649 			  && LABEL_P (BB_HEAD (succ)));
4650 
4651 	      if (sched_verbose >= 4)
4652 		sel_print ("Swapping code labels %i and %i\n",
4653 			   CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
4654 			   CODE_LABEL_NUMBER (BB_HEAD (succ)));
4655 
4656 	      std::swap (CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
4657 			 CODE_LABEL_NUMBER (BB_HEAD (succ)));
4658 	    }
4659 	}
4660     }
4661 
4662   return bb;
4663 }
4664 
4665 /* Return insn after which we must insert bookkeeping code for path(s) incoming
4666    into E2->dest, except from E1->src.  If the returned insn immediately
4667    precedes a fence, assign that fence to *FENCE_TO_REWIND.  */
4668 static insn_t
find_place_for_bookkeeping(edge e1,edge e2,fence_t * fence_to_rewind)4669 find_place_for_bookkeeping (edge e1, edge e2, fence_t *fence_to_rewind)
4670 {
4671   insn_t place_to_insert;
4672   /* Find a basic block that can hold bookkeeping.  If it can be found, do not
4673      create new basic block, but insert bookkeeping there.  */
4674   basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE);
4675 
4676   if (book_block)
4677     {
4678       place_to_insert = BB_END (book_block);
4679 
4680       /* Don't use a block containing only debug insns for
4681 	 bookkeeping, this causes scheduling differences between debug
4682 	 and non-debug compilations, for the block would have been
4683 	 removed already.  */
4684       if (DEBUG_INSN_P (place_to_insert))
4685 	{
4686 	  rtx_insn *insn = sel_bb_head (book_block);
4687 
4688 	  while (insn != place_to_insert &&
4689 		 (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4690 	    insn = NEXT_INSN (insn);
4691 
4692 	  if (insn == place_to_insert)
4693 	    book_block = NULL;
4694 	}
4695     }
4696 
4697   if (!book_block)
4698     {
4699       book_block = create_block_for_bookkeeping (e1, e2);
4700       place_to_insert = BB_END (book_block);
4701       if (sched_verbose >= 9)
4702 	sel_print ("New block is %i, split from bookkeeping block %i\n",
4703 		   EDGE_SUCC (book_block, 0)->dest->index, book_block->index);
4704     }
4705   else
4706     {
4707       if (sched_verbose >= 9)
4708 	sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
4709     }
4710 
4711   *fence_to_rewind = NULL;
4712   /* If basic block ends with a jump, insert bookkeeping code right before it.
4713      Notice if we are crossing a fence when taking PREV_INSN.  */
4714   if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
4715     {
4716       *fence_to_rewind = flist_lookup (fences, place_to_insert);
4717       place_to_insert = PREV_INSN (place_to_insert);
4718     }
4719 
4720   return place_to_insert;
4721 }
4722 
4723 /* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT
4724    for JOIN_POINT.   */
4725 static int
find_seqno_for_bookkeeping(insn_t place_to_insert,insn_t join_point)4726 find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
4727 {
4728   int seqno;
4729 
4730   /* Check if we are about to insert bookkeeping copy before a jump, and use
4731      jump's seqno for the copy; otherwise, use JOIN_POINT's seqno.  */
4732   rtx_insn *next = NEXT_INSN (place_to_insert);
4733   if (INSN_P (next)
4734       && JUMP_P (next)
4735       && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
4736     {
4737       gcc_assert (INSN_SCHED_TIMES (next) == 0);
4738       seqno = INSN_SEQNO (next);
4739     }
4740   else if (INSN_SEQNO (join_point) > 0)
4741     seqno = INSN_SEQNO (join_point);
4742   else
4743     {
4744       seqno = get_seqno_by_preds (place_to_insert);
4745 
4746       /* Sometimes the fences can move in such a way that there will be
4747          no instructions with positive seqno around this bookkeeping.
4748          This means that there will be no way to get to it by a regular
4749          fence movement.  Never mind because we pick up such pieces for
4750          rescheduling anyways, so any positive value will do for now.  */
4751       if (seqno < 0)
4752         {
4753           gcc_assert (pipelining_p);
4754           seqno = 1;
4755         }
4756     }
4757 
4758   gcc_assert (seqno > 0);
4759   return seqno;
4760 }
4761 
4762 /* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning
4763    NEW_SEQNO to it.  Return created insn.  */
4764 static insn_t
emit_bookkeeping_insn(insn_t place_to_insert,expr_t c_expr,int new_seqno)4765 emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno)
4766 {
4767   rtx_insn *new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr));
4768 
4769   vinsn_t new_vinsn
4770     = create_vinsn_from_insn_rtx (new_insn_rtx,
4771 				  VINSN_UNIQUE_P (EXPR_VINSN (c_expr)));
4772 
4773   insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno,
4774 					       place_to_insert);
4775 
4776   INSN_SCHED_TIMES (new_insn) = 0;
4777   bitmap_set_bit (current_copies, INSN_UID (new_insn));
4778 
4779   return new_insn;
4780 }
4781 
4782 /* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to
4783    E2->dest, except from E1->src (there may be a sequence of empty blocks
4784    between E1->src and E2->dest).  Return block containing the copy.
4785    All scheduler data is initialized for the newly created insn.  */
4786 static basic_block
generate_bookkeeping_insn(expr_t c_expr,edge e1,edge e2)4787 generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
4788 {
4789   insn_t join_point, place_to_insert, new_insn;
4790   int new_seqno;
4791   bool need_to_exchange_data_sets;
4792   fence_t fence_to_rewind;
4793 
4794   if (sched_verbose >= 4)
4795     sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
4796 	       e2->dest->index);
4797 
4798   join_point = sel_bb_head (e2->dest);
4799   place_to_insert = find_place_for_bookkeeping (e1, e2, &fence_to_rewind);
4800   new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
4801   need_to_exchange_data_sets
4802     = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
4803 
4804   new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
4805 
4806   if (fence_to_rewind)
4807     FENCE_INSN (fence_to_rewind) = new_insn;
4808 
4809   /* When inserting bookkeeping insn in new block, av sets should be
4810      following: old basic block (that now holds bookkeeping) data sets are
4811      the same as was before generation of bookkeeping, and new basic block
4812      (that now hold all other insns of old basic block) data sets are
4813      invalid.  So exchange data sets for these basic blocks as sel_split_block
4814      mistakenly exchanges them in this case.  Cannot do it earlier because
4815      when single instruction is added to new basic block it should hold NULL
4816      lv_set.  */
4817   if (need_to_exchange_data_sets)
4818     exchange_data_sets (BLOCK_FOR_INSN (new_insn),
4819 			BLOCK_FOR_INSN (join_point));
4820 
4821   stat_bookkeeping_copies++;
4822   return BLOCK_FOR_INSN (new_insn);
4823 }
4824 
4825 /* Remove from AV_PTR all insns that may need bookkeeping when scheduling
4826    on FENCE, but we are unable to copy them.  */
4827 static void
remove_insns_that_need_bookkeeping(fence_t fence,av_set_t * av_ptr)4828 remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
4829 {
4830   expr_t expr;
4831   av_set_iterator i;
4832 
4833   /*  An expression does not need bookkeeping if it is available on all paths
4834       from current block to original block and current block dominates
4835       original block.  We check availability on all paths by examining
4836       EXPR_SPEC; this is not equivalent, because it may be positive even
4837       if expr is available on all paths (but if expr is not available on
4838       any path, EXPR_SPEC will be positive).  */
4839 
4840   FOR_EACH_EXPR_1 (expr, i, av_ptr)
4841     {
4842       if (!control_flow_insn_p (EXPR_INSN_RTX (expr))
4843 	  && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr)))
4844 	  && (EXPR_SPEC (expr)
4845 	      || !EXPR_ORIG_BB_INDEX (expr)
4846 	      || !dominated_by_p (CDI_DOMINATORS,
4847 				  BASIC_BLOCK_FOR_FN (cfun,
4848 						      EXPR_ORIG_BB_INDEX (expr)),
4849 				  BLOCK_FOR_INSN (FENCE_INSN (fence)))))
4850 	{
4851           if (sched_verbose >= 4)
4852             sel_print ("Expr %d removed because it would need bookkeeping, which "
4853                        "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)));
4854 	  av_set_iter_remove (&i);
4855 	}
4856     }
4857 }
4858 
4859 /* Moving conditional jump through some instructions.
4860 
4861    Consider example:
4862 
4863        ...                     <- current scheduling point
4864        NOTE BASIC BLOCK:       <- bb header
4865        (p8)  add r14=r14+0x9;;
4866        (p8)  mov [r14]=r23
4867        (!p8) jump L1;;
4868        NOTE BASIC BLOCK:
4869        ...
4870 
4871    We can schedule jump one cycle earlier, than mov, because they cannot be
4872    executed together as their predicates are mutually exclusive.
4873 
4874    This is done in this way: first, new fallthrough basic block is created
4875    after jump (it is always can be done, because there already should be a
4876    fallthrough block, where control flow goes in case of predicate being true -
4877    in our example; otherwise there should be a dependence between those
4878    instructions and jump and we cannot schedule jump right now);
4879    next, all instructions between jump and current scheduling point are moved
4880    to this new block.  And the result is this:
4881 
4882       NOTE BASIC BLOCK:
4883       (!p8) jump L1           <- current scheduling point
4884       NOTE BASIC BLOCK:       <- bb header
4885       (p8)  add r14=r14+0x9;;
4886       (p8)  mov [r14]=r23
4887       NOTE BASIC BLOCK:
4888       ...
4889 */
4890 static void
move_cond_jump(rtx_insn * insn,bnd_t bnd)4891 move_cond_jump (rtx_insn *insn, bnd_t bnd)
4892 {
4893   edge ft_edge;
4894   basic_block block_from, block_next, block_new, block_bnd, bb;
4895   rtx_insn *next, *prev, *link, *head;
4896 
4897   block_from = BLOCK_FOR_INSN (insn);
4898   block_bnd = BLOCK_FOR_INSN (BND_TO (bnd));
4899   prev = BND_TO (bnd);
4900 
4901   /* Moving of jump should not cross any other jumps or beginnings of new
4902      basic blocks.  The only exception is when we move a jump through
4903      mutually exclusive insns along fallthru edges.  */
4904   if (flag_checking && block_from != block_bnd)
4905     {
4906       bb = block_from;
4907       for (link = PREV_INSN (insn); link != PREV_INSN (prev);
4908            link = PREV_INSN (link))
4909         {
4910           if (INSN_P (link))
4911             gcc_assert (sched_insns_conditions_mutex_p (insn, link));
4912           if (BLOCK_FOR_INSN (link) && BLOCK_FOR_INSN (link) != bb)
4913             {
4914               gcc_assert (single_pred (bb) == BLOCK_FOR_INSN (link));
4915               bb = BLOCK_FOR_INSN (link);
4916             }
4917         }
4918     }
4919 
4920   /* Jump is moved to the boundary.  */
4921   next = PREV_INSN (insn);
4922   BND_TO (bnd) = insn;
4923 
4924   ft_edge = find_fallthru_edge_from (block_from);
4925   block_next = ft_edge->dest;
4926   /* There must be a fallthrough block (or where should go
4927   control flow in case of false jump predicate otherwise?).  */
4928   gcc_assert (block_next);
4929 
4930   /* Create new empty basic block after source block.  */
4931   block_new = sel_split_edge (ft_edge);
4932   gcc_assert (block_new->next_bb == block_next
4933               && block_from->next_bb == block_new);
4934 
4935   /* Move all instructions except INSN to BLOCK_NEW.  */
4936   bb = block_bnd;
4937   head = BB_HEAD (block_new);
4938   while (bb != block_from->next_bb)
4939     {
4940       rtx_insn *from, *to;
4941       from = bb == block_bnd ? prev : sel_bb_head (bb);
4942       to = bb == block_from ? next : sel_bb_end (bb);
4943 
4944       /* The jump being moved can be the first insn in the block.
4945          In this case we don't have to move anything in this block.  */
4946       if (NEXT_INSN (to) != from)
4947         {
4948           reorder_insns (from, to, head);
4949 
4950           for (link = to; link != head; link = PREV_INSN (link))
4951             EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index;
4952           head = to;
4953         }
4954 
4955       /* Cleanup possibly empty blocks left.  */
4956       block_next = bb->next_bb;
4957       if (bb != block_from)
4958 	tidy_control_flow (bb, false);
4959       bb = block_next;
4960     }
4961 
4962   /* Assert there is no jump to BLOCK_NEW, only fallthrough edge.  */
4963   gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)));
4964 
4965   gcc_assert (!sel_bb_empty_p (block_from)
4966               && !sel_bb_empty_p (block_new));
4967 
4968   /* Update data sets for BLOCK_NEW to represent that INSN and
4969      instructions from the other branch of INSN is no longer
4970      available at BLOCK_NEW.  */
4971   BB_AV_LEVEL (block_new) = global_level;
4972   gcc_assert (BB_LV_SET (block_new) == NULL);
4973   BB_LV_SET (block_new) = get_clear_regset_from_pool ();
4974   update_data_sets (sel_bb_head (block_new));
4975 
4976   /* INSN is a new basic block header - so prepare its data
4977      structures and update availability and liveness sets.  */
4978   update_data_sets (insn);
4979 
4980   if (sched_verbose >= 4)
4981     sel_print ("Moving jump %d\n", INSN_UID (insn));
4982 }
4983 
4984 /* Remove nops generated during move_op for preventing removal of empty
4985    basic blocks.  */
4986 static void
remove_temp_moveop_nops(bool full_tidying)4987 remove_temp_moveop_nops (bool full_tidying)
4988 {
4989   int i;
4990   insn_t insn;
4991 
4992   FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn)
4993     {
4994       gcc_assert (INSN_NOP_P (insn));
4995       return_nop_to_pool (insn, full_tidying);
4996     }
4997 
4998   /* Empty the vector.  */
4999   if (vec_temp_moveop_nops.length () > 0)
5000     vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ());
5001 }
5002 
5003 /* Records the maximal UID before moving up an instruction.  Used for
5004    distinguishing between bookkeeping copies and original insns.  */
5005 static int max_uid_before_move_op = 0;
5006 
5007 /* When true, we're always scheduling next insn on the already scheduled code
5008    to get the right insn data for the following bundling or other passes.  */
5009 static int force_next_insn = 0;
5010 
5011 /* Remove from AV_VLIW_P all instructions but next when debug counter
5012    tells us so.  Next instruction is fetched from BNDS.  */
5013 static void
remove_insns_for_debug(blist_t bnds,av_set_t * av_vliw_p)5014 remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
5015 {
5016   if (! dbg_cnt (sel_sched_insn_cnt) || force_next_insn)
5017     /* Leave only the next insn in av_vliw.  */
5018     {
5019       av_set_iterator av_it;
5020       expr_t expr;
5021       bnd_t bnd = BLIST_BND (bnds);
5022       insn_t next = BND_TO (bnd);
5023 
5024       gcc_assert (BLIST_NEXT (bnds) == NULL);
5025 
5026       FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
5027         if (EXPR_INSN_RTX (expr) != next)
5028           av_set_iter_remove (&av_it);
5029     }
5030 }
5031 
5032 /* Compute available instructions on BNDS.  FENCE is the current fence.  Write
5033    the computed set to *AV_VLIW_P.  */
5034 static void
compute_av_set_on_boundaries(fence_t fence,blist_t bnds,av_set_t * av_vliw_p)5035 compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
5036 {
5037   if (sched_verbose >= 2)
5038     {
5039       sel_print ("Boundaries: ");
5040       dump_blist (bnds);
5041       sel_print ("\n");
5042     }
5043 
5044   for (; bnds; bnds = BLIST_NEXT (bnds))
5045     {
5046       bnd_t bnd = BLIST_BND (bnds);
5047       av_set_t av1_copy;
5048       insn_t bnd_to = BND_TO (bnd);
5049 
5050       /* Rewind BND->TO to the basic block header in case some bookkeeping
5051          instructions were inserted before BND->TO and it needs to be
5052          adjusted.  */
5053       if (sel_bb_head_p (bnd_to))
5054         gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
5055       else
5056         while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0)
5057           {
5058             bnd_to = PREV_INSN (bnd_to);
5059             if (sel_bb_head_p (bnd_to))
5060               break;
5061           }
5062 
5063       if (BND_TO (bnd) != bnd_to)
5064 	{
5065   	  gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
5066 	  FENCE_INSN (fence) = bnd_to;
5067 	  BND_TO (bnd) = bnd_to;
5068 	}
5069 
5070       av_set_clear (&BND_AV (bnd));
5071       BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
5072 
5073       av_set_clear (&BND_AV1 (bnd));
5074       BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
5075 
5076       moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
5077 
5078       av1_copy = av_set_copy (BND_AV1 (bnd));
5079       av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
5080     }
5081 
5082   if (sched_verbose >= 2)
5083     {
5084       sel_print ("Available exprs (vliw form): ");
5085       dump_av_set (*av_vliw_p);
5086       sel_print ("\n");
5087     }
5088 }
5089 
5090 /* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
5091    expression.  When FOR_MOVEOP is true, also replace the register of
5092    expressions found with the register from EXPR_VLIW.  */
5093 static av_set_t
find_sequential_best_exprs(bnd_t bnd,expr_t expr_vliw,bool for_moveop)5094 find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
5095 {
5096   av_set_t expr_seq = NULL;
5097   expr_t expr;
5098   av_set_iterator i;
5099 
5100   FOR_EACH_EXPR (expr, i, BND_AV (bnd))
5101     {
5102       if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
5103         {
5104           if (for_moveop)
5105             {
5106               /* The sequential expression has the right form to pass
5107                  to move_op except when renaming happened.  Put the
5108                  correct register in EXPR then.  */
5109               if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
5110 		{
5111                   if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
5112 		    {
5113 		      replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
5114 		      stat_renamed_scheduled++;
5115 		    }
5116 		  /* Also put the correct TARGET_AVAILABLE bit on the expr.
5117                      This is needed when renaming came up with original
5118                      register.  */
5119                   else if (EXPR_TARGET_AVAILABLE (expr)
5120                            != EXPR_TARGET_AVAILABLE (expr_vliw))
5121 		    {
5122 		      gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
5123 		      EXPR_TARGET_AVAILABLE (expr) = 1;
5124 		    }
5125 		}
5126               if (EXPR_WAS_SUBSTITUTED (expr))
5127                 stat_substitutions_total++;
5128             }
5129 
5130           av_set_add (&expr_seq, expr);
5131 
5132           /* With substitution inside insn group, it is possible
5133              that more than one expression in expr_seq will correspond
5134              to expr_vliw.  In this case, choose one as the attempt to
5135              move both leads to miscompiles.  */
5136           break;
5137         }
5138     }
5139 
5140   if (for_moveop && sched_verbose >= 2)
5141     {
5142       sel_print ("Best expression(s) (sequential form): ");
5143       dump_av_set (expr_seq);
5144       sel_print ("\n");
5145     }
5146 
5147   return expr_seq;
5148 }
5149 
5150 
5151 /* Move nop to previous block.  */
5152 static void ATTRIBUTE_UNUSED
move_nop_to_previous_block(insn_t nop,basic_block prev_bb)5153 move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
5154 {
5155   insn_t prev_insn, next_insn;
5156 
5157   gcc_assert (sel_bb_head_p (nop)
5158               && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
5159   rtx_note *note = bb_note (BLOCK_FOR_INSN (nop));
5160   prev_insn = sel_bb_end (prev_bb);
5161   next_insn = NEXT_INSN (nop);
5162   gcc_assert (prev_insn != NULL_RTX
5163               && PREV_INSN (note) == prev_insn);
5164 
5165   SET_NEXT_INSN (prev_insn) = nop;
5166   SET_PREV_INSN (nop) = prev_insn;
5167 
5168   SET_PREV_INSN (note) = nop;
5169   SET_NEXT_INSN (note) = next_insn;
5170 
5171   SET_NEXT_INSN (nop) = note;
5172   SET_PREV_INSN (next_insn) = note;
5173 
5174   BB_END (prev_bb) = nop;
5175   BLOCK_FOR_INSN (nop) = prev_bb;
5176 }
5177 
5178 /* Prepare a place to insert the chosen expression on BND.  */
5179 static insn_t
prepare_place_to_insert(bnd_t bnd)5180 prepare_place_to_insert (bnd_t bnd)
5181 {
5182   insn_t place_to_insert;
5183 
5184   /* Init place_to_insert before calling move_op, as the later
5185      can possibly remove BND_TO (bnd).  */
5186   if (/* If this is not the first insn scheduled.  */
5187       BND_PTR (bnd))
5188     {
5189       /* Add it after last scheduled.  */
5190       place_to_insert = ILIST_INSN (BND_PTR (bnd));
5191       if (DEBUG_INSN_P (place_to_insert))
5192 	{
5193 	  ilist_t l = BND_PTR (bnd);
5194 	  while ((l = ILIST_NEXT (l)) &&
5195 		 DEBUG_INSN_P (ILIST_INSN (l)))
5196 	    ;
5197 	  if (!l)
5198 	    place_to_insert = NULL;
5199 	}
5200     }
5201   else
5202     place_to_insert = NULL;
5203 
5204   if (!place_to_insert)
5205     {
5206       /* Add it before BND_TO.  The difference is in the
5207          basic block, where INSN will be added.  */
5208       place_to_insert = get_nop_from_pool (BND_TO (bnd));
5209       gcc_assert (BLOCK_FOR_INSN (place_to_insert)
5210                   == BLOCK_FOR_INSN (BND_TO (bnd)));
5211     }
5212 
5213   return place_to_insert;
5214 }
5215 
5216 /* Find original instructions for EXPR_SEQ and move it to BND boundary.
5217    Return the expression to emit in C_EXPR.  */
5218 static bool
move_exprs_to_boundary(bnd_t bnd,expr_t expr_vliw,av_set_t expr_seq,expr_t c_expr)5219 move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
5220                         av_set_t expr_seq, expr_t c_expr)
5221 {
5222   bool b, should_move;
5223   unsigned book_uid;
5224   bitmap_iterator bi;
5225   int n_bookkeeping_copies_before_moveop;
5226 
5227   /* Make a move.  This call will remove the original operation,
5228      insert all necessary bookkeeping instructions and update the
5229      data sets.  After that all we have to do is add the operation
5230      at before BND_TO (BND).  */
5231   n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
5232   max_uid_before_move_op = get_max_uid ();
5233   bitmap_clear (current_copies);
5234   bitmap_clear (current_originators);
5235 
5236   b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
5237                get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
5238 
5239   /* We should be able to find the expression we've chosen for
5240      scheduling.  */
5241   gcc_assert (b);
5242 
5243   if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
5244     stat_insns_needed_bookkeeping++;
5245 
5246   EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
5247     {
5248       unsigned uid;
5249       bitmap_iterator bi;
5250 
5251       /* We allocate these bitmaps lazily.  */
5252       if (! INSN_ORIGINATORS_BY_UID (book_uid))
5253         INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
5254 
5255       bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
5256                    current_originators);
5257 
5258       /* Transitively add all originators' originators.  */
5259       EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)
5260        if (INSN_ORIGINATORS_BY_UID (uid))
5261 	 bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid),
5262 			  INSN_ORIGINATORS_BY_UID (uid));
5263     }
5264 
5265   return should_move;
5266 }
5267 
5268 
5269 /* Debug a DFA state as an array of bytes.  */
5270 static void
debug_state(state_t state)5271 debug_state (state_t state)
5272 {
5273   unsigned char *p;
5274   unsigned int i, size = dfa_state_size;
5275 
5276   sel_print ("state (%u):", size);
5277   for (i = 0, p = (unsigned char *) state; i < size; i++)
5278     sel_print (" %d", p[i]);
5279   sel_print ("\n");
5280 }
5281 
5282 /* Advance state on FENCE with INSN.  Return true if INSN is
5283    an ASM, and we should advance state once more.  */
5284 static bool
advance_state_on_fence(fence_t fence,insn_t insn)5285 advance_state_on_fence (fence_t fence, insn_t insn)
5286 {
5287   bool asm_p;
5288 
5289   if (recog_memoized (insn) >= 0)
5290     {
5291       int res;
5292       state_t temp_state = alloca (dfa_state_size);
5293 
5294       gcc_assert (!INSN_ASM_P (insn));
5295       asm_p = false;
5296 
5297       memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
5298       res = state_transition (FENCE_STATE (fence), insn);
5299       gcc_assert (res < 0);
5300 
5301       if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
5302         {
5303           FENCE_ISSUED_INSNS (fence)++;
5304 
5305           /* We should never issue more than issue_rate insns.  */
5306           if (FENCE_ISSUED_INSNS (fence) > issue_rate)
5307             gcc_unreachable ();
5308         }
5309     }
5310   else
5311     {
5312       /* This could be an ASM insn which we'd like to schedule
5313          on the next cycle.  */
5314       asm_p = INSN_ASM_P (insn);
5315       if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
5316         advance_one_cycle (fence);
5317     }
5318 
5319   if (sched_verbose >= 2)
5320     debug_state (FENCE_STATE (fence));
5321   if (!DEBUG_INSN_P (insn))
5322     FENCE_STARTS_CYCLE_P (fence) = 0;
5323   FENCE_ISSUE_MORE (fence) = can_issue_more;
5324   return asm_p;
5325 }
5326 
5327 /* Update FENCE on which INSN was scheduled and this INSN, too.  NEED_STALL
5328    is nonzero if we need to stall after issuing INSN.  */
5329 static void
update_fence_and_insn(fence_t fence,insn_t insn,int need_stall)5330 update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
5331 {
5332   bool asm_p;
5333 
5334   /* First, reflect that something is scheduled on this fence.  */
5335   asm_p = advance_state_on_fence (fence, insn);
5336   FENCE_LAST_SCHEDULED_INSN (fence) = insn;
5337   vec_safe_push (FENCE_EXECUTING_INSNS (fence), insn);
5338   if (SCHED_GROUP_P (insn))
5339     {
5340       FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
5341       SCHED_GROUP_P (insn) = 0;
5342     }
5343   else
5344     FENCE_SCHED_NEXT (fence) = NULL;
5345   if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
5346     FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
5347 
5348   /* Set instruction scheduling info.  This will be used in bundling,
5349      pipelining, tick computations etc.  */
5350   ++INSN_SCHED_TIMES (insn);
5351   EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
5352   EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
5353   INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
5354   INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
5355 
5356   /* This does not account for adjust_cost hooks, just add the biggest
5357      constant the hook may add to the latency.  TODO: make this
5358      a target dependent constant.  */
5359   INSN_READY_CYCLE (insn)
5360     = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
5361                                  ? 1
5362                                  : maximal_insn_latency (insn) + 1);
5363 
5364   /* Change these fields last, as they're used above.  */
5365   FENCE_AFTER_STALL_P (fence) = 0;
5366   if (asm_p || need_stall)
5367     advance_one_cycle (fence);
5368 
5369   /* Indicate that we've scheduled something on this fence.  */
5370   FENCE_SCHEDULED_P (fence) = true;
5371   scheduled_something_on_previous_fence = true;
5372 
5373   /* Print debug information when insn's fields are updated.  */
5374   if (sched_verbose >= 2)
5375     {
5376       sel_print ("Scheduling insn: ");
5377       dump_insn_1 (insn, 1);
5378       sel_print ("\n");
5379     }
5380 }
5381 
5382 /* Update boundary BND (and, if needed, FENCE) with INSN, remove the
5383    old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and
5384    return it.  */
5385 static blist_t *
update_boundaries(fence_t fence,bnd_t bnd,insn_t insn,blist_t * bndsp,blist_t * bnds_tailp)5386 update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
5387                    blist_t *bnds_tailp)
5388 {
5389   succ_iterator si;
5390   insn_t succ;
5391 
5392   advance_deps_context (BND_DC (bnd), insn);
5393   FOR_EACH_SUCC_1 (succ, si, insn,
5394                    SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
5395     {
5396       ilist_t ptr = ilist_copy (BND_PTR (bnd));
5397 
5398       ilist_add (&ptr, insn);
5399 
5400       if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
5401 	  && is_ineligible_successor (succ, ptr))
5402 	{
5403 	  ilist_clear (&ptr);
5404 	  continue;
5405 	}
5406 
5407       if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn))
5408 	{
5409 	  if (sched_verbose >= 9)
5410 	    sel_print ("Updating fence insn from %i to %i\n",
5411 		       INSN_UID (insn), INSN_UID (succ));
5412 	  FENCE_INSN (fence) = succ;
5413 	}
5414       blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
5415       bnds_tailp = &BLIST_NEXT (*bnds_tailp);
5416     }
5417 
5418   blist_remove (bndsp);
5419   return bnds_tailp;
5420 }
5421 
5422 /* Schedule EXPR_VLIW on BND.  Return the insn emitted.  */
5423 static insn_t
schedule_expr_on_boundary(bnd_t bnd,expr_t expr_vliw,int seqno)5424 schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
5425 {
5426   av_set_t expr_seq;
5427   expr_t c_expr = XALLOCA (expr_def);
5428   insn_t place_to_insert;
5429   insn_t insn;
5430   bool should_move;
5431 
5432   expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
5433 
5434   /* In case of scheduling a jump skipping some other instructions,
5435      prepare CFG.  After this, jump is at the boundary and can be
5436      scheduled as usual insn by MOVE_OP.  */
5437   if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
5438     {
5439       insn = EXPR_INSN_RTX (expr_vliw);
5440 
5441       /* Speculative jumps are not handled.  */
5442       if (insn != BND_TO (bnd)
5443           && !sel_insn_is_speculation_check (insn))
5444         move_cond_jump (insn, bnd);
5445     }
5446 
5447   /* Find a place for C_EXPR to schedule.  */
5448   place_to_insert = prepare_place_to_insert (bnd);
5449   should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
5450   clear_expr (c_expr);
5451 
5452   /* Add the instruction.  The corner case to care about is when
5453      the expr_seq set has more than one expr, and we chose the one that
5454      is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and
5455      we can't use it.  Generate the new vinsn.  */
5456   if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
5457     {
5458       vinsn_t vinsn_new;
5459 
5460       vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
5461       change_vinsn_in_expr (expr_vliw, vinsn_new);
5462       should_move = false;
5463     }
5464   if (should_move)
5465     insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
5466   else
5467     insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
5468                                       place_to_insert);
5469 
5470   /* Return the nops generated for preserving of data sets back
5471      into pool.  */
5472   if (INSN_NOP_P (place_to_insert))
5473     return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn));
5474   remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
5475 
5476   av_set_clear (&expr_seq);
5477 
5478   /* Save the expression scheduled so to reset target availability if we'll
5479      meet it later on the same fence.  */
5480   if (EXPR_WAS_RENAMED (expr_vliw))
5481     vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
5482 
5483   /* Check that the recent movement didn't destroyed loop
5484      structure.  */
5485   gcc_assert (!pipelining_p
5486               || current_loop_nest == NULL
5487               || loop_latch_edge (current_loop_nest));
5488   return insn;
5489 }
5490 
5491 /* Stall for N cycles on FENCE.  */
5492 static void
stall_for_cycles(fence_t fence,int n)5493 stall_for_cycles (fence_t fence, int n)
5494 {
5495   int could_more;
5496 
5497   could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
5498   while (n--)
5499     advance_one_cycle (fence);
5500   if (could_more)
5501     FENCE_AFTER_STALL_P (fence) = 1;
5502 }
5503 
5504 /* Gather a parallel group of insns at FENCE and assign their seqno
5505    to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
5506    list for later recalculation of seqnos.  */
5507 static void
fill_insns(fence_t fence,int seqno,ilist_t ** scheduled_insns_tailpp)5508 fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
5509 {
5510   blist_t bnds = NULL, *bnds_tailp;
5511   av_set_t av_vliw = NULL;
5512   insn_t insn = FENCE_INSN (fence);
5513 
5514   if (sched_verbose >= 2)
5515     sel_print ("Starting fill_insns for insn %d, cycle %d\n",
5516                INSN_UID (insn), FENCE_CYCLE (fence));
5517 
5518   blist_add (&bnds, insn, NULL, FENCE_DC (fence));
5519   bnds_tailp = &BLIST_NEXT (bnds);
5520   set_target_context (FENCE_TC (fence));
5521   can_issue_more = FENCE_ISSUE_MORE (fence);
5522   target_bb = INSN_BB (insn);
5523 
5524   /* Do while we can add any operation to the current group.  */
5525   do
5526     {
5527       blist_t *bnds_tailp1, *bndsp;
5528       expr_t expr_vliw;
5529       int need_stall = false;
5530       int was_stall = 0, scheduled_insns = 0;
5531       int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
5532       int max_stall = pipelining_p ? 1 : 3;
5533       bool last_insn_was_debug = false;
5534       bool was_debug_bb_end_p = false;
5535 
5536       compute_av_set_on_boundaries (fence, bnds, &av_vliw);
5537       remove_insns_that_need_bookkeeping (fence, &av_vliw);
5538       remove_insns_for_debug (bnds, &av_vliw);
5539 
5540       /* Return early if we have nothing to schedule.  */
5541       if (av_vliw == NULL)
5542         break;
5543 
5544       /* Choose the best expression and, if needed, destination register
5545 	 for it.  */
5546       do
5547         {
5548           expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall);
5549           if (! expr_vliw && need_stall)
5550             {
5551               /* All expressions required a stall.  Do not recompute av sets
5552                  as we'll get the same answer (modulo the insns between
5553                  the fence and its boundary, which will not be available for
5554                  pipelining).
5555 		 If we are going to stall for too long, break to recompute av
5556 		 sets and bring more insns for pipelining.  */
5557               was_stall++;
5558 	      if (need_stall <= 3)
5559 		stall_for_cycles (fence, need_stall);
5560 	      else
5561 		{
5562 		  stall_for_cycles (fence, 1);
5563 		  break;
5564 		}
5565             }
5566         }
5567       while (! expr_vliw && need_stall);
5568 
5569       /* Now either we've selected expr_vliw or we have nothing to schedule.  */
5570       if (!expr_vliw)
5571         {
5572 	  av_set_clear (&av_vliw);
5573           break;
5574         }
5575 
5576       bndsp = &bnds;
5577       bnds_tailp1 = bnds_tailp;
5578 
5579       do
5580 	/* This code will be executed only once until we'd have several
5581            boundaries per fence.  */
5582         {
5583 	  bnd_t bnd = BLIST_BND (*bndsp);
5584 
5585 	  if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
5586 	    {
5587 	      bndsp = &BLIST_NEXT (*bndsp);
5588 	      continue;
5589 	    }
5590 
5591           insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
5592 	  last_insn_was_debug = DEBUG_INSN_P (insn);
5593 	  if (last_insn_was_debug)
5594 	    was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn));
5595           update_fence_and_insn (fence, insn, need_stall);
5596           bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp);
5597 
5598 	  /* Add insn to the list of scheduled on this cycle instructions.  */
5599 	  ilist_add (*scheduled_insns_tailpp, insn);
5600 	  *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
5601         }
5602       while (*bndsp != *bnds_tailp1);
5603 
5604       av_set_clear (&av_vliw);
5605       if (!last_insn_was_debug)
5606 	scheduled_insns++;
5607 
5608       /* We currently support information about candidate blocks only for
5609 	 one 'target_bb' block.  Hence we can't schedule after jump insn,
5610 	 as this will bring two boundaries and, hence, necessity to handle
5611 	 information for two or more blocks concurrently.  */
5612       if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
5613           || (was_stall
5614               && (was_stall >= max_stall
5615                   || scheduled_insns >= max_insns)))
5616         break;
5617     }
5618   while (bnds);
5619 
5620   gcc_assert (!FENCE_BNDS (fence));
5621 
5622   /* Update boundaries of the FENCE.  */
5623   while (bnds)
5624     {
5625       ilist_t ptr = BND_PTR (BLIST_BND (bnds));
5626 
5627       if (ptr)
5628 	{
5629 	  insn = ILIST_INSN (ptr);
5630 
5631 	  if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
5632 	    ilist_add (&FENCE_BNDS (fence), insn);
5633 	}
5634 
5635       blist_remove (&bnds);
5636     }
5637 
5638   /* Update target context on the fence.  */
5639   reset_target_context (FENCE_TC (fence), false);
5640 }
5641 
5642 /* All exprs in ORIG_OPS must have the same destination register or memory.
5643    Return that destination.  */
5644 static rtx
get_dest_from_orig_ops(av_set_t orig_ops)5645 get_dest_from_orig_ops (av_set_t orig_ops)
5646 {
5647   rtx dest = NULL_RTX;
5648   av_set_iterator av_it;
5649   expr_t expr;
5650   bool first_p = true;
5651 
5652   FOR_EACH_EXPR (expr, av_it, orig_ops)
5653     {
5654       rtx x = EXPR_LHS (expr);
5655 
5656       if (first_p)
5657 	{
5658 	  first_p = false;
5659 	  dest = x;
5660 	}
5661       else
5662 	gcc_assert (dest == x
5663 		    || (dest != NULL_RTX && x != NULL_RTX
5664 			&& rtx_equal_p (dest, x)));
5665     }
5666 
5667   return dest;
5668 }
5669 
5670 /* Update data sets for the bookkeeping block and record those expressions
5671    which become no longer available after inserting this bookkeeping.  */
5672 static void
update_and_record_unavailable_insns(basic_block book_block)5673 update_and_record_unavailable_insns (basic_block book_block)
5674 {
5675   av_set_iterator i;
5676   av_set_t old_av_set = NULL;
5677   expr_t cur_expr;
5678   rtx_insn *bb_end = sel_bb_end (book_block);
5679 
5680   /* First, get correct liveness in the bookkeeping block.  The problem is
5681      the range between the bookeeping insn and the end of block.  */
5682   update_liveness_on_insn (bb_end);
5683   if (control_flow_insn_p (bb_end))
5684     update_liveness_on_insn (PREV_INSN (bb_end));
5685 
5686   /* If there's valid av_set on BOOK_BLOCK, then there might exist another
5687      fence above, where we may choose to schedule an insn which is
5688      actually blocked from moving up with the bookkeeping we create here.  */
5689   if (AV_SET_VALID_P (sel_bb_head (book_block)))
5690     {
5691       old_av_set = av_set_copy (BB_AV_SET (book_block));
5692       update_data_sets (sel_bb_head (book_block));
5693 
5694       /* Traverse all the expressions in the old av_set and check whether
5695 	 CUR_EXPR is in new AV_SET.  */
5696       FOR_EACH_EXPR (cur_expr, i, old_av_set)
5697         {
5698           expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
5699 					   EXPR_VINSN (cur_expr));
5700 
5701           if (! new_expr
5702               /* In this case, we can just turn off the E_T_A bit, but we can't
5703                  represent this information with the current vector.  */
5704               || EXPR_TARGET_AVAILABLE (new_expr)
5705 		 != EXPR_TARGET_AVAILABLE (cur_expr))
5706 	    /* Unfortunately, the below code could be also fired up on
5707 	       separable insns, e.g. when moving insns through the new
5708 	       speculation check as in PR 53701.  */
5709             vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
5710         }
5711 
5712       av_set_clear (&old_av_set);
5713     }
5714 }
5715 
5716 /* The main effect of this function is that sparams->c_expr is merged
5717    with (or copied to) lparams->c_expr_merged.  If there's only one successor,
5718    we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
5719    lparams->c_expr_merged is copied back to sparams->c_expr after all
5720    successors has been traversed.  lparams->c_expr_local is an expr allocated
5721    on stack in the caller function, and is used if there is more than one
5722    successor.
5723 
5724    SUCC is one of the SUCCS_NORMAL successors of INSN,
5725    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
5726    LPARAMS and STATIC_PARAMS contain the parameters described above.  */
5727 static void
move_op_merge_succs(insn_t insn ATTRIBUTE_UNUSED,insn_t succ ATTRIBUTE_UNUSED,int moveop_drv_call_res,cmpd_local_params_p lparams,void * static_params)5728 move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
5729                      insn_t succ ATTRIBUTE_UNUSED,
5730 		     int moveop_drv_call_res,
5731 		     cmpd_local_params_p lparams, void *static_params)
5732 {
5733   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
5734 
5735   /* Nothing to do, if original expr wasn't found below.  */
5736   if (moveop_drv_call_res != 1)
5737     return;
5738 
5739   /* If this is a first successor.  */
5740   if (!lparams->c_expr_merged)
5741     {
5742       lparams->c_expr_merged = sparams->c_expr;
5743       sparams->c_expr = lparams->c_expr_local;
5744     }
5745   else
5746     {
5747       /* We must merge all found expressions to get reasonable
5748 	 EXPR_SPEC_DONE_DS for the resulting insn.  If we don't
5749 	 do so then we can first find the expr with epsilon
5750 	 speculation success probability and only then with the
5751 	 good probability.  As a result the insn will get epsilon
5752 	 probability and will never be scheduled because of
5753 	 weakness_cutoff in find_best_expr.
5754 
5755 	 We call merge_expr_data here instead of merge_expr
5756 	 because due to speculation C_EXPR and X may have the
5757 	 same insns with different speculation types.  And as of
5758 	 now such insns are considered non-equal.
5759 
5760 	 However, EXPR_SCHED_TIMES is different -- we must get
5761 	 SCHED_TIMES from a real insn, not a bookkeeping copy.
5762 	 We force this here.  Instead, we may consider merging
5763 	 SCHED_TIMES to the maximum instead of minimum in the
5764 	 below function.  */
5765       int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
5766 
5767       merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL);
5768       if (EXPR_SCHED_TIMES (sparams->c_expr) == 0)
5769 	EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times;
5770 
5771       clear_expr (sparams->c_expr);
5772     }
5773 }
5774 
5775 /*  Add used regs for the successor SUCC into SPARAMS->USED_REGS.
5776 
5777    SUCC is one of the SUCCS_NORMAL successors of INSN,
5778    MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0,
5779      if SUCC is one of SUCCS_BACK or SUCCS_OUT.
5780    STATIC_PARAMS contain USED_REGS set.  */
5781 static void
fur_merge_succs(insn_t insn ATTRIBUTE_UNUSED,insn_t succ,int moveop_drv_call_res,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params)5782 fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
5783 		 int moveop_drv_call_res,
5784 		 cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5785 		 void *static_params)
5786 {
5787   regset succ_live;
5788   fur_static_params_p sparams = (fur_static_params_p) static_params;
5789 
5790   /* Here we compute live regsets only for branches that do not lie
5791      on the code motion paths.  These branches correspond to value
5792      MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
5793      for such branches code_motion_path_driver is not called.  */
5794   if (moveop_drv_call_res != 0)
5795     return;
5796 
5797   /* Mark all registers that do not meet the following condition:
5798      (3) not live on the other path of any conditional branch
5799      that is passed by the operation, in case original
5800      operations are not present on both paths of the
5801      conditional branch.  */
5802   succ_live = compute_live (succ);
5803   IOR_REG_SET (sparams->used_regs, succ_live);
5804 }
5805 
5806 /* This function is called after the last successor.  Copies LP->C_EXPR_MERGED
5807    into SP->CEXPR.  */
5808 static void
move_op_after_merge_succs(cmpd_local_params_p lp,void * sparams)5809 move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
5810 {
5811   moveop_static_params_p sp = (moveop_static_params_p) sparams;
5812 
5813   sp->c_expr = lp->c_expr_merged;
5814 }
5815 
5816 /* Track bookkeeping copies created, insns scheduled, and blocks for
5817    rescheduling when INSN is found by move_op.  */
5818 static void
track_scheduled_insns_and_blocks(rtx_insn * insn)5819 track_scheduled_insns_and_blocks (rtx_insn *insn)
5820 {
5821   /* Even if this insn can be a copy that will be removed during current move_op,
5822      we still need to count it as an originator.  */
5823   bitmap_set_bit (current_originators, INSN_UID (insn));
5824 
5825   if (!bitmap_clear_bit (current_copies, INSN_UID (insn)))
5826     {
5827       /* Note that original block needs to be rescheduled, as we pulled an
5828 	 instruction out of it.  */
5829       if (INSN_SCHED_TIMES (insn) > 0)
5830 	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
5831       else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn))
5832 	num_insns_scheduled++;
5833     }
5834 
5835   /* For instructions we must immediately remove insn from the
5836      stream, so subsequent update_data_sets () won't include this
5837      insn into av_set.
5838      For expr we must make insn look like "INSN_REG (insn) := c_expr".  */
5839   if (INSN_UID (insn) > max_uid_before_move_op)
5840     stat_bookkeeping_copies--;
5841 }
5842 
5843 /* Emit a register-register copy for INSN if needed.  Return true if
5844    emitted one.  PARAMS is the move_op static parameters.  */
5845 static bool
maybe_emit_renaming_copy(rtx_insn * insn,moveop_static_params_p params)5846 maybe_emit_renaming_copy (rtx_insn *insn,
5847                           moveop_static_params_p params)
5848 {
5849   bool insn_emitted  = false;
5850   rtx cur_reg;
5851 
5852   /* Bail out early when expression cannot be renamed at all.  */
5853   if (!EXPR_SEPARABLE_P (params->c_expr))
5854     return false;
5855 
5856   cur_reg = expr_dest_reg (params->c_expr);
5857   gcc_assert (cur_reg && params->dest && REG_P (params->dest));
5858 
5859   /* If original operation has expr and the register chosen for
5860      that expr is not original operation's dest reg, substitute
5861      operation's right hand side with the register chosen.  */
5862   if (REGNO (params->dest) != REGNO (cur_reg))
5863     {
5864       insn_t reg_move_insn, reg_move_insn_rtx;
5865 
5866       reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
5867                                                     params->dest);
5868       reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
5869                                                    INSN_EXPR (insn),
5870                                                    INSN_SEQNO (insn),
5871                                                    insn);
5872       EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
5873       replace_dest_with_reg_in_expr (params->c_expr, params->dest);
5874 
5875       insn_emitted = true;
5876       params->was_renamed = true;
5877     }
5878 
5879   return insn_emitted;
5880 }
5881 
5882 /* Emit a speculative check for INSN speculated as EXPR if needed.
5883    Return true if we've  emitted one.  PARAMS is the move_op static
5884    parameters.  */
5885 static bool
maybe_emit_speculative_check(rtx_insn * insn,expr_t expr,moveop_static_params_p params)5886 maybe_emit_speculative_check (rtx_insn *insn, expr_t expr,
5887                               moveop_static_params_p params)
5888 {
5889   bool insn_emitted = false;
5890   insn_t x;
5891   ds_t check_ds;
5892 
5893   check_ds = get_spec_check_type_for_insn (insn, expr);
5894   if (check_ds != 0)
5895     {
5896       /* A speculation check should be inserted.  */
5897       x = create_speculation_check (params->c_expr, check_ds, insn);
5898       insn_emitted = true;
5899     }
5900   else
5901     {
5902       EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
5903       x = insn;
5904     }
5905 
5906   gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
5907               && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
5908   return insn_emitted;
5909 }
5910 
5911 /* Handle transformations that leave an insn in place of original
5912    insn such as renaming/speculation.  Return true if one of such
5913    transformations actually happened, and we have emitted this insn.  */
5914 static bool
handle_emitting_transformations(rtx_insn * insn,expr_t expr,moveop_static_params_p params)5915 handle_emitting_transformations (rtx_insn *insn, expr_t expr,
5916                                  moveop_static_params_p params)
5917 {
5918   bool insn_emitted = false;
5919 
5920   insn_emitted = maybe_emit_renaming_copy (insn, params);
5921   insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
5922 
5923   return insn_emitted;
5924 }
5925 
5926 /* If INSN is the only insn in the basic block (not counting JUMP,
5927    which may be a jump to next insn, and DEBUG_INSNs), we want to
5928    leave a NOP there till the return to fill_insns.  */
5929 
5930 static bool
need_nop_to_preserve_insn_bb(rtx_insn * insn)5931 need_nop_to_preserve_insn_bb (rtx_insn *insn)
5932 {
5933   insn_t bb_head, bb_end, bb_next, in_next;
5934   basic_block bb = BLOCK_FOR_INSN (insn);
5935 
5936   bb_head = sel_bb_head (bb);
5937   bb_end = sel_bb_end (bb);
5938 
5939   if (bb_head == bb_end)
5940     return true;
5941 
5942   while (bb_head != bb_end && DEBUG_INSN_P (bb_head))
5943     bb_head = NEXT_INSN (bb_head);
5944 
5945   if (bb_head == bb_end)
5946     return true;
5947 
5948   while (bb_head != bb_end && DEBUG_INSN_P (bb_end))
5949     bb_end = PREV_INSN (bb_end);
5950 
5951   if (bb_head == bb_end)
5952     return true;
5953 
5954   bb_next = NEXT_INSN (bb_head);
5955   while (bb_next != bb_end && DEBUG_INSN_P (bb_next))
5956     bb_next = NEXT_INSN (bb_next);
5957 
5958   if (bb_next == bb_end && JUMP_P (bb_end))
5959     return true;
5960 
5961   in_next = NEXT_INSN (insn);
5962   while (DEBUG_INSN_P (in_next))
5963     in_next = NEXT_INSN (in_next);
5964 
5965   if (IN_CURRENT_FENCE_P (in_next))
5966     return true;
5967 
5968   return false;
5969 }
5970 
5971 /* Remove INSN from stream.  When ONLY_DISCONNECT is true, its data
5972    is not removed but reused when INSN is re-emitted.  */
5973 static void
remove_insn_from_stream(rtx_insn * insn,bool only_disconnect)5974 remove_insn_from_stream (rtx_insn *insn, bool only_disconnect)
5975 {
5976   /* If there's only one insn in the BB, make sure that a nop is
5977      inserted into it, so the basic block won't disappear when we'll
5978      delete INSN below with sel_remove_insn. It should also survive
5979      till the return to fill_insns.  */
5980   if (need_nop_to_preserve_insn_bb (insn))
5981     {
5982       insn_t nop = get_nop_from_pool (insn);
5983       gcc_assert (INSN_NOP_P (nop));
5984       vec_temp_moveop_nops.safe_push (nop);
5985     }
5986 
5987   sel_remove_insn (insn, only_disconnect, false);
5988 }
5989 
5990 /* This function is called when original expr is found.
5991    INSN - current insn traversed, EXPR - the corresponding expr found.
5992    LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
5993    is static parameters of move_op.  */
5994 static void
move_op_orig_expr_found(insn_t insn,expr_t expr,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params)5995 move_op_orig_expr_found (insn_t insn, expr_t expr,
5996                          cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5997                          void *static_params)
5998 {
5999   bool only_disconnect;
6000   moveop_static_params_p params = (moveop_static_params_p) static_params;
6001 
6002   copy_expr_onside (params->c_expr, INSN_EXPR (insn));
6003   track_scheduled_insns_and_blocks (insn);
6004   handle_emitting_transformations (insn, expr, params);
6005   only_disconnect = params->uid == INSN_UID (insn);
6006 
6007   /* Mark that we've disconnected an insn.  */
6008   if (only_disconnect)
6009     params->uid = -1;
6010   remove_insn_from_stream (insn, only_disconnect);
6011 }
6012 
6013 /* The function is called when original expr is found.
6014    INSN - current insn traversed, EXPR - the corresponding expr found,
6015    crosses_call and original_insns in STATIC_PARAMS are updated.  */
6016 static void
fur_orig_expr_found(insn_t insn,expr_t expr ATTRIBUTE_UNUSED,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params)6017 fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
6018                      cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6019                      void *static_params)
6020 {
6021   fur_static_params_p params = (fur_static_params_p) static_params;
6022   regset tmp;
6023 
6024   if (CALL_P (insn))
6025     params->crosses_call = true;
6026 
6027   def_list_add (params->original_insns, insn, params->crosses_call);
6028 
6029   /* Mark the registers that do not meet the following condition:
6030     (2) not among the live registers of the point
6031 	immediately following the first original operation on
6032 	a given downward path, except for the original target
6033 	register of the operation.  */
6034   tmp = get_clear_regset_from_pool ();
6035   compute_live_below_insn (insn, tmp);
6036   AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn));
6037   AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn));
6038   IOR_REG_SET (params->used_regs, tmp);
6039   return_regset_to_pool (tmp);
6040 
6041   /* (*1) We need to add to USED_REGS registers that are read by
6042      INSN's lhs. This may lead to choosing wrong src register.
6043      E.g. (scheduling const expr enabled):
6044 
6045 	429: ax=0x0	<- Can't use AX for this expr (0x0)
6046 	433: dx=[bp-0x18]
6047 	427: [ax+dx+0x1]=ax
6048 	  REG_DEAD: ax
6049 	168: di=dx
6050 	  REG_DEAD: dx
6051      */
6052   /* FIXME: see comment above and enable MEM_P
6053      in vinsn_separable_p.  */
6054   gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
6055 	      || !MEM_P (INSN_LHS (insn)));
6056 }
6057 
6058 /* This function is called on the ascending pass, before returning from
6059    current basic block.  */
6060 static void
move_op_at_first_insn(insn_t insn,cmpd_local_params_p lparams,void * static_params)6061 move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
6062                        void *static_params)
6063 {
6064   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6065   basic_block book_block = NULL;
6066 
6067   /* When we have removed the boundary insn for scheduling, which also
6068      happened to be the end insn in its bb, we don't need to update sets.  */
6069   if (!lparams->removed_last_insn
6070       && lparams->e1
6071       && sel_bb_head_p (insn))
6072     {
6073       /* We should generate bookkeeping code only if we are not at the
6074          top level of the move_op.  */
6075       if (sel_num_cfg_preds_gt_1 (insn))
6076         book_block = generate_bookkeeping_insn (sparams->c_expr,
6077                                                 lparams->e1, lparams->e2);
6078       /* Update data sets for the current insn.  */
6079       update_data_sets (insn);
6080     }
6081 
6082   /* If bookkeeping code was inserted, we need to update av sets of basic
6083      block that received bookkeeping.  After generation of bookkeeping insn,
6084      bookkeeping block does not contain valid av set because we are not following
6085      the original algorithm in every detail with regards to e.g. renaming
6086      simple reg-reg copies.  Consider example:
6087 
6088      bookkeeping block           scheduling fence
6089      \            /
6090       \    join  /
6091        ----------
6092        |        |
6093        ----------
6094       /           \
6095      /             \
6096      r1 := r2          r1 := r3
6097 
6098      We try to schedule insn "r1 := r3" on the current
6099      scheduling fence.  Also, note that av set of bookkeeping block
6100      contain both insns "r1 := r2" and "r1 := r3".  When the insn has
6101      been scheduled, the CFG is as follows:
6102 
6103      r1 := r3               r1 := r3
6104      bookkeeping block           scheduling fence
6105      \            /
6106       \    join  /
6107        ----------
6108        |        |
6109        ----------
6110       /          \
6111      /            \
6112      r1 := r2
6113 
6114      Here, insn "r1 := r3" was scheduled at the current scheduling point
6115      and bookkeeping code was generated at the bookeeping block.  This
6116      way insn "r1 := r2" is no longer available as a whole instruction
6117      (but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
6118      This situation is handled by calling update_data_sets.
6119 
6120      Since update_data_sets is called only on the bookkeeping block, and
6121      it also may have predecessors with av_sets, containing instructions that
6122      are no longer available, we save all such expressions that become
6123      unavailable during data sets update on the bookkeeping block in
6124      VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such
6125      expressions for scheduling.  This allows us to avoid recomputation of
6126      av_sets outside the code motion path.  */
6127 
6128   if (book_block)
6129     update_and_record_unavailable_insns (book_block);
6130 
6131   /* If INSN was previously marked for deletion, it's time to do it.  */
6132   if (lparams->removed_last_insn)
6133     insn = PREV_INSN (insn);
6134 
6135   /* Do not tidy control flow at the topmost moveop, as we can erroneously
6136      kill a block with a single nop in which the insn should be emitted.  */
6137   if (lparams->e1)
6138     tidy_control_flow (BLOCK_FOR_INSN (insn), true);
6139 }
6140 
6141 /* This function is called on the ascending pass, before returning from the
6142    current basic block.  */
6143 static void
fur_at_first_insn(insn_t insn,cmpd_local_params_p lparams ATTRIBUTE_UNUSED,void * static_params ATTRIBUTE_UNUSED)6144 fur_at_first_insn (insn_t insn,
6145                    cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6146                    void *static_params ATTRIBUTE_UNUSED)
6147 {
6148   gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
6149 	      || AV_LEVEL (insn) == -1);
6150 }
6151 
6152 /* Called on the backward stage of recursion to call moveup_expr for insn
6153    and sparams->c_expr.  */
6154 static void
move_op_ascend(insn_t insn,void * static_params)6155 move_op_ascend (insn_t insn, void *static_params)
6156 {
6157   enum MOVEUP_EXPR_CODE res;
6158   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6159 
6160   if (! INSN_NOP_P (insn))
6161     {
6162       res = moveup_expr_cached (sparams->c_expr, insn, false);
6163       gcc_assert (res != MOVEUP_EXPR_NULL);
6164     }
6165 
6166   /* Update liveness for this insn as it was invalidated.  */
6167   update_liveness_on_insn (insn);
6168 }
6169 
6170 /* This function is called on enter to the basic block.
6171    Returns TRUE if this block already have been visited and
6172    code_motion_path_driver should return 1, FALSE otherwise.  */
6173 static int
fur_on_enter(insn_t insn ATTRIBUTE_UNUSED,cmpd_local_params_p local_params,void * static_params,bool visited_p)6174 fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
6175 	      void *static_params, bool visited_p)
6176 {
6177   fur_static_params_p sparams = (fur_static_params_p) static_params;
6178 
6179   if (visited_p)
6180     {
6181       /* If we have found something below this block, there should be at
6182 	 least one insn in ORIGINAL_INSNS.  */
6183       gcc_assert (*sparams->original_insns);
6184 
6185       /* Adjust CROSSES_CALL, since we may have come to this block along
6186 	 different path.  */
6187       DEF_LIST_DEF (*sparams->original_insns)->crosses_call
6188 	  |= sparams->crosses_call;
6189     }
6190   else
6191     local_params->old_original_insns = *sparams->original_insns;
6192 
6193   return 1;
6194 }
6195 
6196 /* Same as above but for move_op.   */
6197 static int
move_op_on_enter(insn_t insn ATTRIBUTE_UNUSED,cmpd_local_params_p local_params ATTRIBUTE_UNUSED,void * static_params ATTRIBUTE_UNUSED,bool visited_p)6198 move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
6199                   cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
6200                   void *static_params ATTRIBUTE_UNUSED, bool visited_p)
6201 {
6202   if (visited_p)
6203     return -1;
6204   return 1;
6205 }
6206 
6207 /* This function is called while descending current basic block if current
6208    insn is not the original EXPR we're searching for.
6209 
6210    Return value: FALSE, if code_motion_path_driver should perform a local
6211 			cleanup and return 0 itself;
6212 		 TRUE, if code_motion_path_driver should continue.  */
6213 static bool
move_op_orig_expr_not_found(insn_t insn,av_set_t orig_ops ATTRIBUTE_UNUSED,void * static_params)6214 move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
6215 			    void *static_params)
6216 {
6217   moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6218 
6219   sparams->failed_insn = insn;
6220 
6221   /* If we're scheduling separate expr, in order to generate correct code
6222      we need to stop the search at bookkeeping code generated with the
6223      same destination register or memory.  */
6224   if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
6225     return false;
6226   return true;
6227 }
6228 
6229 /* This function is called while descending current basic block if current
6230    insn is not the original EXPR we're searching for.
6231 
6232    Return value: TRUE (code_motion_path_driver should continue).  */
6233 static bool
fur_orig_expr_not_found(insn_t insn,av_set_t orig_ops,void * static_params)6234 fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params)
6235 {
6236   bool mutexed;
6237   expr_t r;
6238   av_set_iterator avi;
6239   fur_static_params_p sparams = (fur_static_params_p) static_params;
6240 
6241   if (CALL_P (insn))
6242     sparams->crosses_call = true;
6243   else if (DEBUG_INSN_P (insn))
6244     return true;
6245 
6246   /* If current insn we are looking at cannot be executed together
6247      with original insn, then we can skip it safely.
6248 
6249      Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); }
6250 	      INSN = (!p6) r14 = r14 + 1;
6251 
6252      Here we can schedule ORIG_OP with lhs = r14, though only
6253      looking at the set of used and set registers of INSN we must
6254      forbid it.  So, add set/used in INSN registers to the
6255      untouchable set only if there is an insn in ORIG_OPS that can
6256      affect INSN.  */
6257   mutexed = true;
6258   FOR_EACH_EXPR (r, avi, orig_ops)
6259     if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r)))
6260       {
6261 	mutexed = false;
6262 	break;
6263       }
6264 
6265   /* Mark all registers that do not meet the following condition:
6266      (1) Not set or read on any path from xi to an instance of the
6267 	 original operation.  */
6268   if (!mutexed)
6269     {
6270       IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn));
6271       IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn));
6272       IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn));
6273     }
6274 
6275   return true;
6276 }
6277 
6278 /* Hooks and data to perform move_op operations with code_motion_path_driver.  */
6279 struct code_motion_path_driver_info_def move_op_hooks = {
6280   move_op_on_enter,
6281   move_op_orig_expr_found,
6282   move_op_orig_expr_not_found,
6283   move_op_merge_succs,
6284   move_op_after_merge_succs,
6285   move_op_ascend,
6286   move_op_at_first_insn,
6287   SUCCS_NORMAL,
6288   "move_op"
6289 };
6290 
6291 /* Hooks and data to perform find_used_regs operations
6292    with code_motion_path_driver.  */
6293 struct code_motion_path_driver_info_def fur_hooks = {
6294   fur_on_enter,
6295   fur_orig_expr_found,
6296   fur_orig_expr_not_found,
6297   fur_merge_succs,
6298   NULL, /* fur_after_merge_succs */
6299   NULL, /* fur_ascend */
6300   fur_at_first_insn,
6301   SUCCS_ALL,
6302   "find_used_regs"
6303 };
6304 
6305 /* Traverse all successors of INSN.  For each successor that is SUCCS_NORMAL
6306    code_motion_path_driver is called recursively.  Original operation
6307    was found at least on one path that is starting with one of INSN's
6308    successors (this fact is asserted).  ORIG_OPS is expressions we're looking
6309    for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
6310    of either move_op or find_used_regs depending on the caller.
6311 
6312    Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
6313    know for sure at this point.  */
6314 static int
code_motion_process_successors(insn_t insn,av_set_t orig_ops,ilist_t path,void * static_params)6315 code_motion_process_successors (insn_t insn, av_set_t orig_ops,
6316                                 ilist_t path, void *static_params)
6317 {
6318   int res = 0;
6319   succ_iterator succ_i;
6320   insn_t succ;
6321   basic_block bb;
6322   int old_index;
6323   unsigned old_succs;
6324 
6325   struct cmpd_local_params lparams;
6326   expr_def _x;
6327 
6328   lparams.c_expr_local = &_x;
6329   lparams.c_expr_merged = NULL;
6330 
6331   /* We need to process only NORMAL succs for move_op, and collect live
6332      registers from ALL branches (including those leading out of the
6333      region) for find_used_regs.
6334 
6335      In move_op, there can be a case when insn's bb number has changed
6336      due to created bookkeeping.  This happens very rare, as we need to
6337      move expression from the beginning to the end of the same block.
6338      Rescan successors in this case.  */
6339 
6340  rescan:
6341   bb = BLOCK_FOR_INSN (insn);
6342   old_index = bb->index;
6343   old_succs = EDGE_COUNT (bb->succs);
6344 
6345   FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
6346     {
6347       int b;
6348 
6349       lparams.e1 = succ_i.e1;
6350       lparams.e2 = succ_i.e2;
6351 
6352       /* Go deep into recursion only for NORMAL edges (non-backedges within the
6353 	 current region).  */
6354       if (succ_i.current_flags == SUCCS_NORMAL)
6355 	b = code_motion_path_driver (succ, orig_ops, path, &lparams,
6356 				     static_params);
6357       else
6358 	b = 0;
6359 
6360       /* Merge c_expres found or unify live register sets from different
6361 	 successors.  */
6362       code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
6363 						 static_params);
6364       if (b == 1)
6365         res = b;
6366       else if (b == -1 && res != 1)
6367         res = b;
6368 
6369       /* We have simplified the control flow below this point.  In this case,
6370          the iterator becomes invalid.  We need to try again.
6371 	 If we have removed the insn itself, it could be only an
6372 	 unconditional jump.  Thus, do not rescan but break immediately --
6373 	 we have already visited the only successor block.  */
6374       if (!BLOCK_FOR_INSN (insn))
6375 	{
6376 	  if (sched_verbose >= 6)
6377 	    sel_print ("Not doing rescan: already visited the only successor"
6378 		       " of block %d\n", old_index);
6379 	  break;
6380 	}
6381       if (BLOCK_FOR_INSN (insn)->index != old_index
6382           || EDGE_COUNT (bb->succs) != old_succs)
6383         {
6384 	  if (sched_verbose >= 6)
6385 	    sel_print ("Rescan: CFG was simplified below insn %d, block %d\n",
6386 		       INSN_UID (insn), BLOCK_FOR_INSN (insn)->index);
6387           insn = sel_bb_end (BLOCK_FOR_INSN (insn));
6388           goto rescan;
6389         }
6390     }
6391 
6392   /* Here, RES==1 if original expr was found at least for one of the
6393      successors.  After the loop, RES may happen to have zero value
6394      only if at some point the expr searched is present in av_set, but is
6395      not found below.  In most cases, this situation is an error.
6396      The exception is when the original operation is blocked by
6397      bookkeeping generated for another fence or for another path in current
6398      move_op.  */
6399   gcc_checking_assert (res == 1
6400 		       || (res == 0
6401 			    && av_set_could_be_blocked_by_bookkeeping_p (orig_ops, static_params))
6402 		       || res == -1);
6403 
6404   /* Merge data, clean up, etc.  */
6405   if (res != -1 && code_motion_path_driver_info->after_merge_succs)
6406     code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
6407 
6408   return res;
6409 }
6410 
6411 
6412 /* Perform a cleanup when the driver is about to terminate.  ORIG_OPS_P
6413    is the pointer to the av set with expressions we were looking for,
6414    PATH_P is the pointer to the traversed path.  */
6415 static inline void
code_motion_path_driver_cleanup(av_set_t * orig_ops_p,ilist_t * path_p)6416 code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
6417 {
6418   ilist_remove (path_p);
6419   av_set_clear (orig_ops_p);
6420 }
6421 
6422 /* The driver function that implements move_op or find_used_regs
6423    functionality dependent whether code_motion_path_driver_INFO is set to
6424    &MOVE_OP_HOOKS or &FUR_HOOKS.  This function implements the common parts
6425    of code (CFG traversal etc) that are shared among both functions.  INSN
6426    is the insn we're starting the search from, ORIG_OPS are the expressions
6427    we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
6428    parameters of the driver, and STATIC_PARAMS are static parameters of
6429    the caller.
6430 
6431    Returns whether original instructions were found.  Note that top-level
6432    code_motion_path_driver always returns true.  */
6433 static int
code_motion_path_driver(insn_t insn,av_set_t orig_ops,ilist_t path,cmpd_local_params_p local_params_in,void * static_params)6434 code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
6435 			 cmpd_local_params_p local_params_in,
6436 			 void *static_params)
6437 {
6438   expr_t expr = NULL;
6439   basic_block bb = BLOCK_FOR_INSN (insn);
6440   insn_t first_insn, original_insn, bb_tail, before_first;
6441   bool removed_last_insn = false;
6442 
6443   if (sched_verbose >= 6)
6444     {
6445       sel_print ("%s (", code_motion_path_driver_info->routine_name);
6446       dump_insn (insn);
6447       sel_print (",");
6448       dump_av_set (orig_ops);
6449       sel_print (")\n");
6450     }
6451 
6452   gcc_assert (orig_ops);
6453 
6454   /* If no original operations exist below this insn, return immediately.  */
6455   if (is_ineligible_successor (insn, path))
6456     {
6457       if (sched_verbose >= 6)
6458         sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
6459       return false;
6460     }
6461 
6462   /* The block can have invalid av set, in which case it was created earlier
6463      during move_op.  Return immediately.  */
6464   if (sel_bb_head_p (insn))
6465     {
6466       if (! AV_SET_VALID_P (insn))
6467         {
6468           if (sched_verbose >= 6)
6469             sel_print ("Returned from block %d as it had invalid av set\n",
6470                        bb->index);
6471           return false;
6472         }
6473 
6474       if (bitmap_bit_p (code_motion_visited_blocks, bb->index))
6475         {
6476           /* We have already found an original operation on this branch, do not
6477              go any further and just return TRUE here.  If we don't stop here,
6478              function can have exponential behavior even on the small code
6479              with many different paths (e.g. with data speculation and
6480              recovery blocks).  */
6481           if (sched_verbose >= 6)
6482             sel_print ("Block %d already visited in this traversal\n", bb->index);
6483           if (code_motion_path_driver_info->on_enter)
6484             return code_motion_path_driver_info->on_enter (insn,
6485                                                            local_params_in,
6486                                                            static_params,
6487                                                            true);
6488         }
6489     }
6490 
6491   if (code_motion_path_driver_info->on_enter)
6492     code_motion_path_driver_info->on_enter (insn, local_params_in,
6493                                             static_params, false);
6494   orig_ops = av_set_copy (orig_ops);
6495 
6496   /* Filter the orig_ops set.  */
6497   if (AV_SET_VALID_P (insn))
6498     av_set_code_motion_filter (&orig_ops, AV_SET (insn));
6499 
6500   /* If no more original ops, return immediately.  */
6501   if (!orig_ops)
6502     {
6503       if (sched_verbose >= 6)
6504         sel_print ("No intersection with av set of block %d\n", bb->index);
6505       return false;
6506     }
6507 
6508   /* For non-speculative insns we have to leave only one form of the
6509      original operation, because if we don't, we may end up with
6510      different C_EXPRes and, consequently, with bookkeepings for different
6511      expression forms along the same code motion path.  That may lead to
6512      generation of incorrect code.  So for each code motion we stick to
6513      the single form of the instruction,  except for speculative insns
6514      which we need to keep in different forms with all speculation
6515      types.  */
6516   av_set_leave_one_nonspec (&orig_ops);
6517 
6518   /* It is not possible that all ORIG_OPS are filtered out.  */
6519   gcc_assert (orig_ops);
6520 
6521   /* It is enough to place only heads and tails of visited basic blocks into
6522      the PATH.  */
6523   ilist_add (&path, insn);
6524   first_insn = original_insn = insn;
6525   bb_tail = sel_bb_end (bb);
6526 
6527   /* Descend the basic block in search of the original expr; this part
6528      corresponds to the part of the original move_op procedure executed
6529      before the recursive call.  */
6530   for (;;)
6531     {
6532       /* Look at the insn and decide if it could be an ancestor of currently
6533 	 scheduling operation.  If it is so, then the insn "dest = op" could
6534 	 either be replaced with "dest = reg", because REG now holds the result
6535 	 of OP, or just removed, if we've scheduled the insn as a whole.
6536 
6537 	 If this insn doesn't contain currently scheduling OP, then proceed
6538 	 with searching and look at its successors.  Operations we're searching
6539 	 for could have changed when moving up through this insn via
6540 	 substituting.  In this case, perform unsubstitution on them first.
6541 
6542 	 When traversing the DAG below this insn is finished, insert
6543 	 bookkeeping code, if the insn is a joint point, and remove
6544 	 leftovers.  */
6545 
6546       expr = av_set_lookup (orig_ops, INSN_VINSN (insn));
6547       if (expr)
6548 	{
6549 	  insn_t last_insn = PREV_INSN (insn);
6550 
6551 	  /* We have found the original operation.   */
6552           if (sched_verbose >= 6)
6553             sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
6554 
6555 	  code_motion_path_driver_info->orig_expr_found
6556             (insn, expr, local_params_in, static_params);
6557 
6558 	  /* Step back, so on the way back we'll start traversing from the
6559 	     previous insn (or we'll see that it's bb_note and skip that
6560 	     loop).  */
6561           if (insn == first_insn)
6562             {
6563               first_insn = NEXT_INSN (last_insn);
6564               removed_last_insn = sel_bb_end_p (last_insn);
6565             }
6566 	  insn = last_insn;
6567 	  break;
6568 	}
6569       else
6570 	{
6571 	  /* We haven't found the original expr, continue descending the basic
6572 	     block.  */
6573 	  if (code_motion_path_driver_info->orig_expr_not_found
6574               (insn, orig_ops, static_params))
6575 	    {
6576 	      /* Av set ops could have been changed when moving through this
6577 	         insn.  To find them below it, we have to un-substitute them.  */
6578 	      undo_transformations (&orig_ops, insn);
6579 	    }
6580 	  else
6581 	    {
6582 	      /* Clean up and return, if the hook tells us to do so.  It may
6583 		 happen if we've encountered the previously created
6584 		 bookkeeping.  */
6585 	      code_motion_path_driver_cleanup (&orig_ops, &path);
6586 	      return -1;
6587 	    }
6588 
6589 	  gcc_assert (orig_ops);
6590         }
6591 
6592       /* Stop at insn if we got to the end of BB.  */
6593       if (insn == bb_tail)
6594 	break;
6595 
6596       insn = NEXT_INSN (insn);
6597     }
6598 
6599   /* Here INSN either points to the insn before the original insn (may be
6600      bb_note, if original insn was a bb_head) or to the bb_end.  */
6601   if (!expr)
6602     {
6603       int res;
6604       rtx_insn *last_insn = PREV_INSN (insn);
6605       bool added_to_path;
6606 
6607       gcc_assert (insn == sel_bb_end (bb));
6608 
6609       /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
6610 	 it's already in PATH then).  */
6611       if (insn != first_insn)
6612 	{
6613 	  ilist_add (&path, insn);
6614 	  added_to_path = true;
6615 	}
6616       else
6617         added_to_path = false;
6618 
6619       /* Process_successors should be able to find at least one
6620 	 successor for which code_motion_path_driver returns TRUE.  */
6621       res = code_motion_process_successors (insn, orig_ops,
6622                                             path, static_params);
6623 
6624       /* Jump in the end of basic block could have been removed or replaced
6625          during code_motion_process_successors, so recompute insn as the
6626          last insn in bb.  */
6627       if (NEXT_INSN (last_insn) != insn)
6628         {
6629           insn = sel_bb_end (bb);
6630           first_insn = sel_bb_head (bb);
6631 	  if (first_insn != original_insn)
6632 	    first_insn = original_insn;
6633         }
6634 
6635       /* Remove bb tail from path.  */
6636       if (added_to_path)
6637 	ilist_remove (&path);
6638 
6639       if (res != 1)
6640 	{
6641 	  /* This is the case when one of the original expr is no longer available
6642 	     due to bookkeeping created on this branch with the same register.
6643 	     In the original algorithm, which doesn't have update_data_sets call
6644 	     on a bookkeeping block, it would simply result in returning
6645 	     FALSE when we've encountered a previously generated bookkeeping
6646 	     insn in moveop_orig_expr_not_found.  */
6647 	  code_motion_path_driver_cleanup (&orig_ops, &path);
6648 	  return res;
6649 	}
6650     }
6651 
6652   /* Don't need it any more.  */
6653   av_set_clear (&orig_ops);
6654 
6655   /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
6656      the beginning of the basic block.  */
6657   before_first = PREV_INSN (first_insn);
6658   while (insn != before_first)
6659     {
6660       if (code_motion_path_driver_info->ascend)
6661 	code_motion_path_driver_info->ascend (insn, static_params);
6662 
6663       insn = PREV_INSN (insn);
6664     }
6665 
6666   /* Now we're at the bb head.  */
6667   insn = first_insn;
6668   ilist_remove (&path);
6669   local_params_in->removed_last_insn = removed_last_insn;
6670   code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
6671 
6672   /* This should be the very last operation as at bb head we could change
6673      the numbering by creating bookkeeping blocks.  */
6674   if (removed_last_insn)
6675     insn = PREV_INSN (insn);
6676 
6677   /* If we have simplified the control flow and removed the first jump insn,
6678      there's no point in marking this block in the visited blocks bitmap.  */
6679   if (BLOCK_FOR_INSN (insn))
6680     bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index);
6681   return true;
6682 }
6683 
6684 /* Move up the operations from ORIG_OPS set traversing the dag starting
6685    from INSN.  PATH represents the edges traversed so far.
6686    DEST is the register chosen for scheduling the current expr.  Insert
6687    bookkeeping code in the join points.  EXPR_VLIW is the chosen expression,
6688    C_EXPR is how it looks like at the given cfg point.
6689    Set *SHOULD_MOVE to indicate whether we have only disconnected
6690    one of the insns found.
6691 
6692    Returns whether original instructions were found, which is asserted
6693    to be true in the caller.  */
6694 static bool
move_op(insn_t insn,av_set_t orig_ops,expr_t expr_vliw,rtx dest,expr_t c_expr,bool * should_move)6695 move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
6696          rtx dest, expr_t c_expr, bool *should_move)
6697 {
6698   struct moveop_static_params sparams;
6699   struct cmpd_local_params lparams;
6700   int res;
6701 
6702   /* Init params for code_motion_path_driver.  */
6703   sparams.dest = dest;
6704   sparams.c_expr = c_expr;
6705   sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
6706   sparams.failed_insn = NULL;
6707   sparams.was_renamed = false;
6708   lparams.e1 = NULL;
6709 
6710   /* We haven't visited any blocks yet.  */
6711   bitmap_clear (code_motion_visited_blocks);
6712 
6713   /* Set appropriate hooks and data.  */
6714   code_motion_path_driver_info = &move_op_hooks;
6715   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
6716 
6717   gcc_assert (res != -1);
6718 
6719   if (sparams.was_renamed)
6720     EXPR_WAS_RENAMED (expr_vliw) = true;
6721 
6722   *should_move = (sparams.uid == -1);
6723 
6724   return res;
6725 }
6726 
6727 
6728 /* Functions that work with regions.  */
6729 
6730 /* Current number of seqno used in init_seqno and init_seqno_1.  */
6731 static int cur_seqno;
6732 
6733 /* A helper for init_seqno.  Traverse the region starting from BB and
6734    compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
6735    Clear visited blocks from BLOCKS_TO_RESCHEDULE.  */
6736 static void
init_seqno_1(basic_block bb,sbitmap visited_bbs,bitmap blocks_to_reschedule)6737 init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
6738 {
6739   int bbi = BLOCK_TO_BB (bb->index);
6740   insn_t insn;
6741   insn_t succ_insn;
6742   succ_iterator si;
6743 
6744   rtx_note *note = bb_note (bb);
6745   bitmap_set_bit (visited_bbs, bbi);
6746   if (blocks_to_reschedule)
6747     bitmap_clear_bit (blocks_to_reschedule, bb->index);
6748 
6749   FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
6750 		   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
6751     {
6752       basic_block succ = BLOCK_FOR_INSN (succ_insn);
6753       int succ_bbi = BLOCK_TO_BB (succ->index);
6754 
6755       gcc_assert (in_current_region_p (succ));
6756 
6757       if (!bitmap_bit_p (visited_bbs, succ_bbi))
6758 	{
6759 	  gcc_assert (succ_bbi > bbi);
6760 
6761 	  init_seqno_1 (succ, visited_bbs, blocks_to_reschedule);
6762 	}
6763       else if (blocks_to_reschedule)
6764         bitmap_set_bit (forced_ebb_heads, succ->index);
6765     }
6766 
6767   for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn))
6768     INSN_SEQNO (insn) = cur_seqno--;
6769 }
6770 
6771 /* Initialize seqnos for the current region.  BLOCKS_TO_RESCHEDULE contains
6772    blocks on which we're rescheduling when pipelining, FROM is the block where
6773    traversing region begins (it may not be the head of the region when
6774    pipelining, but the head of the loop instead).
6775 
6776    Returns the maximal seqno found.  */
6777 static int
init_seqno(bitmap blocks_to_reschedule,basic_block from)6778 init_seqno (bitmap blocks_to_reschedule, basic_block from)
6779 {
6780   bitmap_iterator bi;
6781   unsigned bbi;
6782 
6783   auto_sbitmap visited_bbs (current_nr_blocks);
6784 
6785   if (blocks_to_reschedule)
6786     {
6787       bitmap_ones (visited_bbs);
6788       EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
6789         {
6790 	  gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
6791           bitmap_clear_bit (visited_bbs, BLOCK_TO_BB (bbi));
6792 	}
6793     }
6794   else
6795     {
6796       bitmap_clear (visited_bbs);
6797       from = EBB_FIRST_BB (0);
6798     }
6799 
6800   cur_seqno = sched_max_luid - 1;
6801   init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
6802 
6803   /* cur_seqno may be positive if the number of instructions is less than
6804      sched_max_luid - 1 (when rescheduling or if some instructions have been
6805      removed by the call to purge_empty_blocks in sel_sched_region_1).  */
6806   gcc_assert (cur_seqno >= 0);
6807 
6808   return sched_max_luid - 1;
6809 }
6810 
6811 /* Initialize scheduling parameters for current region.  */
6812 static void
sel_setup_region_sched_flags(void)6813 sel_setup_region_sched_flags (void)
6814 {
6815   enable_schedule_as_rhs_p = 1;
6816   bookkeeping_p = 1;
6817   pipelining_p = (bookkeeping_p
6818                   && (flag_sel_sched_pipelining != 0)
6819 		  && current_loop_nest != NULL
6820 		  && loop_has_exit_edges (current_loop_nest));
6821   max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
6822   max_ws = MAX_WS;
6823 }
6824 
6825 /* Return true if all basic blocks of current region are empty.  */
6826 static bool
current_region_empty_p(void)6827 current_region_empty_p (void)
6828 {
6829   int i;
6830   for (i = 0; i < current_nr_blocks; i++)
6831     if (! sel_bb_empty_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))))
6832       return false;
6833 
6834   return true;
6835 }
6836 
6837 /* Prepare and verify loop nest for pipelining.  */
6838 static void
setup_current_loop_nest(int rgn,bb_vec_t * bbs)6839 setup_current_loop_nest (int rgn, bb_vec_t *bbs)
6840 {
6841   current_loop_nest = get_loop_nest_for_rgn (rgn);
6842 
6843   if (!current_loop_nest)
6844     return;
6845 
6846   /* If this loop has any saved loop preheaders from nested loops,
6847      add these basic blocks to the current region.  */
6848   sel_add_loop_preheaders (bbs);
6849 
6850   /* Check that we're starting with a valid information.  */
6851   gcc_assert (loop_latch_edge (current_loop_nest));
6852   gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
6853 }
6854 
6855 /* Compute instruction priorities for current region.  */
6856 static void
sel_compute_priorities(int rgn)6857 sel_compute_priorities (int rgn)
6858 {
6859   sched_rgn_compute_dependencies (rgn);
6860 
6861   /* Compute insn priorities in haifa style.  Then free haifa style
6862      dependencies that we've calculated for this.  */
6863   compute_priorities ();
6864 
6865   if (sched_verbose >= 5)
6866     debug_rgn_dependencies (0);
6867 
6868   free_rgn_deps ();
6869 }
6870 
6871 /* Init scheduling data for RGN.  Returns true when this region should not
6872    be scheduled.  */
6873 static bool
sel_region_init(int rgn)6874 sel_region_init (int rgn)
6875 {
6876   int i;
6877   bb_vec_t bbs;
6878 
6879   rgn_setup_region (rgn);
6880 
6881   /* Even if sched_is_disabled_for_current_region_p() is true, we still
6882      do region initialization here so the region can be bundled correctly,
6883      but we'll skip the scheduling in sel_sched_region ().  */
6884   if (current_region_empty_p ())
6885     return true;
6886 
6887   bbs.create (current_nr_blocks);
6888 
6889   for (i = 0; i < current_nr_blocks; i++)
6890     bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
6891 
6892   sel_init_bbs (bbs);
6893 
6894   if (flag_sel_sched_pipelining)
6895     setup_current_loop_nest (rgn, &bbs);
6896 
6897   sel_setup_region_sched_flags ();
6898 
6899   /* Initialize luids and dependence analysis which both sel-sched and haifa
6900      need.  */
6901   sched_init_luids (bbs);
6902   sched_deps_init (false);
6903 
6904   /* Initialize haifa data.  */
6905   rgn_setup_sched_infos ();
6906   sel_set_sched_flags ();
6907   haifa_init_h_i_d (bbs);
6908 
6909   sel_compute_priorities (rgn);
6910   init_deps_global ();
6911 
6912   /* Main initialization.  */
6913   sel_setup_sched_infos ();
6914   sel_init_global_and_expr (bbs);
6915 
6916   bbs.release ();
6917 
6918   blocks_to_reschedule = BITMAP_ALLOC (NULL);
6919 
6920   /* Init correct liveness sets on each instruction of a single-block loop.
6921      This is the only situation when we can't update liveness when calling
6922      compute_live for the first insn of the loop.  */
6923   if (current_loop_nest)
6924     {
6925       int header =
6926 	(sel_is_loop_preheader_p (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (0)))
6927 	 ? 1
6928 	 : 0);
6929 
6930       if (current_nr_blocks == header + 1)
6931         update_liveness_on_insn
6932           (sel_bb_head (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (header))));
6933     }
6934 
6935   /* Set hooks so that no newly generated insn will go out unnoticed.  */
6936   sel_register_cfg_hooks ();
6937 
6938   /* !!! We call target.sched.init () for the whole region, but we invoke
6939      targetm.sched.finish () for every ebb.  */
6940   if (targetm.sched.init)
6941     /* None of the arguments are actually used in any target.  */
6942     targetm.sched.init (sched_dump, sched_verbose, -1);
6943 
6944   first_emitted_uid = get_max_uid () + 1;
6945   preheader_removed = false;
6946 
6947   /* Reset register allocation ticks array.  */
6948   memset (reg_rename_tick, 0, sizeof reg_rename_tick);
6949   reg_rename_this_tick = 0;
6950 
6951   forced_ebb_heads = BITMAP_ALLOC (NULL);
6952 
6953   setup_nop_vinsn ();
6954   current_copies = BITMAP_ALLOC (NULL);
6955   current_originators = BITMAP_ALLOC (NULL);
6956   code_motion_visited_blocks = BITMAP_ALLOC (NULL);
6957 
6958   return false;
6959 }
6960 
6961 /* Simplify insns after the scheduling.  */
6962 static void
simplify_changed_insns(void)6963 simplify_changed_insns (void)
6964 {
6965   int i;
6966 
6967   for (i = 0; i < current_nr_blocks; i++)
6968     {
6969       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
6970       rtx_insn *insn;
6971 
6972       FOR_BB_INSNS (bb, insn)
6973 	if (INSN_P (insn))
6974 	  {
6975 	    expr_t expr = INSN_EXPR (insn);
6976 
6977 	    if (EXPR_WAS_SUBSTITUTED (expr))
6978 	      validate_simplify_insn (insn);
6979 	  }
6980     }
6981 }
6982 
6983 /* Find boundaries of the EBB starting from basic block BB, marking blocks of
6984    this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL,
6985    PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure.  */
6986 static void
find_ebb_boundaries(basic_block bb,bitmap scheduled_blocks)6987 find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks)
6988 {
6989   rtx_insn *head, *tail;
6990   basic_block bb1 = bb;
6991   if (sched_verbose >= 2)
6992     sel_print ("Finishing schedule in bbs: ");
6993 
6994   do
6995     {
6996       bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
6997 
6998       if (sched_verbose >= 2)
6999 	sel_print ("%d; ", bb1->index);
7000     }
7001   while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1)));
7002 
7003   if (sched_verbose >= 2)
7004     sel_print ("\n");
7005 
7006   get_ebb_head_tail (bb, bb1, &head, &tail);
7007 
7008   current_sched_info->head = head;
7009   current_sched_info->tail = tail;
7010   current_sched_info->prev_head = PREV_INSN (head);
7011   current_sched_info->next_tail = NEXT_INSN (tail);
7012 }
7013 
7014 /* Regenerate INSN_SCHED_CYCLEs for insns of current EBB.  */
7015 static void
reset_sched_cycles_in_current_ebb(void)7016 reset_sched_cycles_in_current_ebb (void)
7017 {
7018   int last_clock = 0;
7019   int haifa_last_clock = -1;
7020   int haifa_clock = 0;
7021   int issued_insns = 0;
7022   insn_t insn;
7023 
7024   if (targetm.sched.init)
7025     {
7026       /* None of the arguments are actually used in any target.
7027 	 NB: We should have md_reset () hook for cases like this.  */
7028       targetm.sched.init (sched_dump, sched_verbose, -1);
7029     }
7030 
7031   state_reset (curr_state);
7032   advance_state (curr_state);
7033 
7034   for (insn = current_sched_info->head;
7035        insn != current_sched_info->next_tail;
7036        insn = NEXT_INSN (insn))
7037     {
7038       int cost, haifa_cost;
7039       int sort_p;
7040       bool asm_p, real_insn, after_stall, all_issued;
7041       int clock;
7042 
7043       if (!INSN_P (insn))
7044 	continue;
7045 
7046       asm_p = false;
7047       real_insn = recog_memoized (insn) >= 0;
7048       clock = INSN_SCHED_CYCLE (insn);
7049 
7050       cost = clock - last_clock;
7051 
7052       /* Initialize HAIFA_COST.  */
7053       if (! real_insn)
7054 	{
7055 	  asm_p = INSN_ASM_P (insn);
7056 
7057 	  if (asm_p)
7058 	    /* This is asm insn which *had* to be scheduled first
7059 	       on the cycle.  */
7060 	    haifa_cost = 1;
7061 	  else
7062 	    /* This is a use/clobber insn.  It should not change
7063 	       cost.  */
7064 	    haifa_cost = 0;
7065 	}
7066       else
7067         haifa_cost = estimate_insn_cost (insn, curr_state);
7068 
7069       /* Stall for whatever cycles we've stalled before.  */
7070       after_stall = 0;
7071       if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
7072         {
7073           haifa_cost = cost;
7074           after_stall = 1;
7075         }
7076       all_issued = issued_insns == issue_rate;
7077       if (haifa_cost == 0 && all_issued)
7078 	haifa_cost = 1;
7079       if (haifa_cost > 0)
7080 	{
7081 	  int i = 0;
7082 
7083 	  while (haifa_cost--)
7084 	    {
7085 	      advance_state (curr_state);
7086 	      issued_insns = 0;
7087               i++;
7088 
7089 	      if (sched_verbose >= 2)
7090                 {
7091                   sel_print ("advance_state (state_transition)\n");
7092                   debug_state (curr_state);
7093                 }
7094 
7095               /* The DFA may report that e.g. insn requires 2 cycles to be
7096                  issued, but on the next cycle it says that insn is ready
7097                  to go.  Check this here.  */
7098               if (!after_stall
7099                   && real_insn
7100                   && haifa_cost > 0
7101                   && estimate_insn_cost (insn, curr_state) == 0)
7102                 break;
7103 
7104               /* When the data dependency stall is longer than the DFA stall,
7105                  and when we have issued exactly issue_rate insns and stalled,
7106                  it could be that after this longer stall the insn will again
7107                  become unavailable  to the DFA restrictions.  Looks strange
7108                  but happens e.g. on x86-64.  So recheck DFA on the last
7109                  iteration.  */
7110               if ((after_stall || all_issued)
7111                   && real_insn
7112                   && haifa_cost == 0)
7113                 haifa_cost = estimate_insn_cost (insn, curr_state);
7114             }
7115 
7116 	  haifa_clock += i;
7117           if (sched_verbose >= 2)
7118             sel_print ("haifa clock: %d\n", haifa_clock);
7119 	}
7120       else
7121 	gcc_assert (haifa_cost == 0);
7122 
7123       if (sched_verbose >= 2)
7124 	sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost);
7125 
7126       if (targetm.sched.dfa_new_cycle)
7127 	while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn,
7128 					    haifa_last_clock, haifa_clock,
7129 					    &sort_p))
7130 	  {
7131 	    advance_state (curr_state);
7132 	    issued_insns = 0;
7133 	    haifa_clock++;
7134 	    if (sched_verbose >= 2)
7135               {
7136                 sel_print ("advance_state (dfa_new_cycle)\n");
7137                 debug_state (curr_state);
7138 		sel_print ("haifa clock: %d\n", haifa_clock + 1);
7139               }
7140           }
7141 
7142       if (real_insn)
7143 	{
7144 	  static state_t temp = NULL;
7145 
7146 	  if (!temp)
7147 	    temp = xmalloc (dfa_state_size);
7148 	  memcpy (temp, curr_state, dfa_state_size);
7149 
7150 	  cost = state_transition (curr_state, insn);
7151 	  if (memcmp (temp, curr_state, dfa_state_size))
7152 	    issued_insns++;
7153 
7154           if (sched_verbose >= 2)
7155 	    {
7156 	      sel_print ("scheduled insn %d, clock %d\n", INSN_UID (insn),
7157 			 haifa_clock + 1);
7158               debug_state (curr_state);
7159 	    }
7160 	  gcc_assert (cost < 0);
7161 	}
7162 
7163       if (targetm.sched.variable_issue)
7164 	targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0);
7165 
7166       INSN_SCHED_CYCLE (insn) = haifa_clock;
7167 
7168       last_clock = clock;
7169       haifa_last_clock = haifa_clock;
7170     }
7171 }
7172 
7173 /* Put TImode markers on insns starting a new issue group.  */
7174 static void
put_TImodes(void)7175 put_TImodes (void)
7176 {
7177   int last_clock = -1;
7178   insn_t insn;
7179 
7180   for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
7181        insn = NEXT_INSN (insn))
7182     {
7183       int cost, clock;
7184 
7185       if (!INSN_P (insn))
7186 	continue;
7187 
7188       clock = INSN_SCHED_CYCLE (insn);
7189       cost = (last_clock == -1) ? 1 : clock - last_clock;
7190 
7191       gcc_assert (cost >= 0);
7192 
7193       if (issue_rate > 1
7194 	  && GET_CODE (PATTERN (insn)) != USE
7195 	  && GET_CODE (PATTERN (insn)) != CLOBBER)
7196 	{
7197 	  if (reload_completed && cost > 0)
7198 	    PUT_MODE (insn, TImode);
7199 
7200 	  last_clock = clock;
7201 	}
7202 
7203       if (sched_verbose >= 2)
7204 	sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost);
7205     }
7206 }
7207 
7208 /* Perform MD_FINISH on EBBs comprising current region.  When
7209    RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
7210    to produce correct sched cycles on insns.  */
7211 static void
sel_region_target_finish(bool reset_sched_cycles_p)7212 sel_region_target_finish (bool reset_sched_cycles_p)
7213 {
7214   int i;
7215   bitmap scheduled_blocks = BITMAP_ALLOC (NULL);
7216 
7217   for (i = 0; i < current_nr_blocks; i++)
7218     {
7219       if (bitmap_bit_p (scheduled_blocks, i))
7220 	continue;
7221 
7222       /* While pipelining outer loops, skip bundling for loop
7223 	 preheaders.  Those will be rescheduled in the outer loop.  */
7224       if (sel_is_loop_preheader_p (EBB_FIRST_BB (i)))
7225 	continue;
7226 
7227       find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
7228 
7229       if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
7230 	continue;
7231 
7232       if (reset_sched_cycles_p)
7233 	reset_sched_cycles_in_current_ebb ();
7234 
7235       if (targetm.sched.init)
7236 	targetm.sched.init (sched_dump, sched_verbose, -1);
7237 
7238       put_TImodes ();
7239 
7240       if (targetm.sched.finish)
7241 	{
7242 	  targetm.sched.finish (sched_dump, sched_verbose);
7243 
7244 	  /* Extend luids so that insns generated by the target will
7245 	     get zero luid.  */
7246 	  sched_extend_luids ();
7247 	}
7248     }
7249 
7250   BITMAP_FREE (scheduled_blocks);
7251 }
7252 
7253 /* Free the scheduling data for the current region.  When RESET_SCHED_CYCLES_P
7254    is true, make an additional pass emulating scheduler to get correct insn
7255    cycles for md_finish calls.  */
7256 static void
sel_region_finish(bool reset_sched_cycles_p)7257 sel_region_finish (bool reset_sched_cycles_p)
7258 {
7259   simplify_changed_insns ();
7260   sched_finish_ready_list ();
7261   free_nop_pool ();
7262 
7263   /* Free the vectors.  */
7264   vec_av_set.release ();
7265   BITMAP_FREE (current_copies);
7266   BITMAP_FREE (current_originators);
7267   BITMAP_FREE (code_motion_visited_blocks);
7268   vinsn_vec_free (vec_bookkeeping_blocked_vinsns);
7269   vinsn_vec_free (vec_target_unavailable_vinsns);
7270 
7271   /* If LV_SET of the region head should be updated, do it now because
7272      there will be no other chance.  */
7273   {
7274     succ_iterator si;
7275     insn_t insn;
7276 
7277     FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)),
7278                      SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
7279       {
7280 	basic_block bb = BLOCK_FOR_INSN (insn);
7281 
7282 	if (!BB_LV_SET_VALID_P (bb))
7283 	  compute_live (insn);
7284       }
7285   }
7286 
7287   /* Emulate the Haifa scheduler for bundling.  */
7288   if (reload_completed)
7289     sel_region_target_finish (reset_sched_cycles_p);
7290 
7291   sel_finish_global_and_expr ();
7292 
7293   BITMAP_FREE (forced_ebb_heads);
7294 
7295   free_nop_vinsn ();
7296 
7297   finish_deps_global ();
7298   sched_finish_luids ();
7299   h_d_i_d.release ();
7300 
7301   sel_finish_bbs ();
7302   BITMAP_FREE (blocks_to_reschedule);
7303 
7304   sel_unregister_cfg_hooks ();
7305 
7306   max_issue_size = 0;
7307 }
7308 
7309 
7310 /* Functions that implement the scheduler driver.  */
7311 
7312 /* Schedule a parallel instruction group on each of FENCES.  MAX_SEQNO
7313    is the current maximum seqno.  SCHEDULED_INSNS_TAILPP is the list
7314    of insns scheduled -- these would be postprocessed later.  */
7315 static void
schedule_on_fences(flist_t fences,int max_seqno,ilist_t ** scheduled_insns_tailpp)7316 schedule_on_fences (flist_t fences, int max_seqno,
7317                     ilist_t **scheduled_insns_tailpp)
7318 {
7319   flist_t old_fences = fences;
7320 
7321   if (sched_verbose >= 1)
7322     {
7323       sel_print ("\nScheduling on fences: ");
7324       dump_flist (fences);
7325       sel_print ("\n");
7326     }
7327 
7328   scheduled_something_on_previous_fence = false;
7329   for (; fences; fences = FLIST_NEXT (fences))
7330     {
7331       fence_t fence = NULL;
7332       int seqno = 0;
7333       flist_t fences2;
7334       bool first_p = true;
7335 
7336       /* Choose the next fence group to schedule.
7337          The fact that insn can be scheduled only once
7338          on the cycle is guaranteed by two properties:
7339          1. seqnos of parallel groups decrease with each iteration.
7340          2. If is_ineligible_successor () sees the larger seqno, it
7341          checks if candidate insn is_in_current_fence_p ().  */
7342       for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
7343         {
7344           fence_t f = FLIST_FENCE (fences2);
7345 
7346           if (!FENCE_PROCESSED_P (f))
7347             {
7348               int i = INSN_SEQNO (FENCE_INSN (f));
7349 
7350               if (first_p || i > seqno)
7351                 {
7352                   seqno = i;
7353                   fence = f;
7354                   first_p = false;
7355                 }
7356               else
7357                 /* ??? Seqnos of different groups should be different.  */
7358                 gcc_assert (1 || i != seqno);
7359             }
7360         }
7361 
7362       gcc_assert (fence);
7363 
7364       /* As FENCE is nonnull, SEQNO is initialized.  */
7365       seqno -= max_seqno + 1;
7366       fill_insns (fence, seqno, scheduled_insns_tailpp);
7367       FENCE_PROCESSED_P (fence) = true;
7368     }
7369 
7370   /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
7371      don't need to keep bookkeeping-invalidated and target-unavailable
7372      vinsns any more.  */
7373   vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
7374   vinsn_vec_clear (&vec_target_unavailable_vinsns);
7375 }
7376 
7377 /* Calculate MIN_SEQNO and MAX_SEQNO.  */
7378 static void
find_min_max_seqno(flist_t fences,int * min_seqno,int * max_seqno)7379 find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
7380 {
7381   *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7382 
7383   /* The first element is already processed.  */
7384   while ((fences = FLIST_NEXT (fences)))
7385     {
7386       int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7387 
7388       if (*min_seqno > seqno)
7389         *min_seqno = seqno;
7390       else if (*max_seqno < seqno)
7391         *max_seqno = seqno;
7392     }
7393 }
7394 
7395 /* Calculate new fences from FENCES.  Write the current time to PTIME.  */
7396 static flist_t
calculate_new_fences(flist_t fences,int orig_max_seqno,int * ptime)7397 calculate_new_fences (flist_t fences, int orig_max_seqno, int *ptime)
7398 {
7399   flist_t old_fences = fences;
7400   struct flist_tail_def _new_fences, *new_fences = &_new_fences;
7401   int max_time = 0;
7402 
7403   flist_tail_init (new_fences);
7404   for (; fences; fences = FLIST_NEXT (fences))
7405     {
7406       fence_t fence = FLIST_FENCE (fences);
7407       insn_t insn;
7408 
7409       if (!FENCE_BNDS (fence))
7410         {
7411           /* This fence doesn't have any successors.  */
7412           if (!FENCE_SCHEDULED_P (fence))
7413             {
7414               /* Nothing was scheduled on this fence.  */
7415               int seqno;
7416 
7417               insn = FENCE_INSN (fence);
7418               seqno = INSN_SEQNO (insn);
7419               gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
7420 
7421               if (sched_verbose >= 1)
7422                 sel_print ("Fence %d[%d] has not changed\n",
7423                            INSN_UID (insn),
7424                            BLOCK_NUM (insn));
7425               move_fence_to_fences (fences, new_fences);
7426             }
7427         }
7428       else
7429         extract_new_fences_from (fences, new_fences, orig_max_seqno);
7430       max_time = MAX (max_time, FENCE_CYCLE (fence));
7431     }
7432 
7433   flist_clear (&old_fences);
7434   *ptime = max_time;
7435   return FLIST_TAIL_HEAD (new_fences);
7436 }
7437 
7438 /* Update seqnos of insns given by PSCHEDULED_INSNS.  MIN_SEQNO and MAX_SEQNO
7439    are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
7440    the highest seqno used in a region.  Return the updated highest seqno.  */
7441 static int
update_seqnos_and_stage(int min_seqno,int max_seqno,int highest_seqno_in_use,ilist_t * pscheduled_insns)7442 update_seqnos_and_stage (int min_seqno, int max_seqno,
7443                          int highest_seqno_in_use,
7444                          ilist_t *pscheduled_insns)
7445 {
7446   int new_hs;
7447   ilist_iterator ii;
7448   insn_t insn;
7449 
7450   /* Actually, new_hs is the seqno of the instruction, that was
7451      scheduled first (i.e. it is the first one in SCHEDULED_INSNS).  */
7452   if (*pscheduled_insns)
7453     {
7454       new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
7455                 + highest_seqno_in_use + max_seqno - min_seqno + 2);
7456       gcc_assert (new_hs > highest_seqno_in_use);
7457     }
7458   else
7459     new_hs = highest_seqno_in_use;
7460 
7461   FOR_EACH_INSN (insn, ii, *pscheduled_insns)
7462     {
7463       gcc_assert (INSN_SEQNO (insn) < 0);
7464       INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
7465       gcc_assert (INSN_SEQNO (insn) <= new_hs);
7466 
7467       /* When not pipelining, purge unneeded insn info on the scheduled insns.
7468          For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
7469          require > 1GB of memory e.g. on limit-fnargs.c.  */
7470       if (! pipelining_p)
7471         free_data_for_scheduled_insn (insn);
7472     }
7473 
7474   ilist_clear (pscheduled_insns);
7475   global_level++;
7476 
7477   return new_hs;
7478 }
7479 
7480 /* The main driver for scheduling a region.  This function is responsible
7481    for correct propagation of fences (i.e. scheduling points) and creating
7482    a group of parallel insns at each of them.  It also supports
7483    pipelining.  ORIG_MAX_SEQNO is the maximal seqno before this pass
7484    of scheduling.  */
7485 static void
sel_sched_region_2(int orig_max_seqno)7486 sel_sched_region_2 (int orig_max_seqno)
7487 {
7488   int highest_seqno_in_use = orig_max_seqno;
7489   int max_time = 0;
7490 
7491   stat_bookkeeping_copies = 0;
7492   stat_insns_needed_bookkeeping = 0;
7493   stat_renamed_scheduled = 0;
7494   stat_substitutions_total = 0;
7495   num_insns_scheduled = 0;
7496 
7497   while (fences)
7498     {
7499       int min_seqno, max_seqno;
7500       ilist_t scheduled_insns = NULL;
7501       ilist_t *scheduled_insns_tailp = &scheduled_insns;
7502 
7503       find_min_max_seqno (fences, &min_seqno, &max_seqno);
7504       schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
7505       fences = calculate_new_fences (fences, orig_max_seqno, &max_time);
7506       highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
7507                                                       highest_seqno_in_use,
7508                                                       &scheduled_insns);
7509     }
7510 
7511   if (sched_verbose >= 1)
7512     {
7513       sel_print ("Total scheduling time: %d cycles\n", max_time);
7514       sel_print ("Scheduled %d bookkeeping copies, %d insns needed "
7515 		 "bookkeeping, %d insns renamed, %d insns substituted\n",
7516 		 stat_bookkeeping_copies,
7517 		 stat_insns_needed_bookkeeping,
7518 		 stat_renamed_scheduled,
7519 		 stat_substitutions_total);
7520     }
7521 }
7522 
7523 /* Schedule a region.  When pipelining, search for possibly never scheduled
7524    bookkeeping code and schedule it.  Reschedule pipelined code without
7525    pipelining after.  */
7526 static void
sel_sched_region_1(void)7527 sel_sched_region_1 (void)
7528 {
7529   int orig_max_seqno;
7530 
7531   /* Remove empty blocks that might be in the region from the beginning.  */
7532   purge_empty_blocks ();
7533 
7534   orig_max_seqno = init_seqno (NULL, NULL);
7535   gcc_assert (orig_max_seqno >= 1);
7536 
7537   /* When pipelining outer loops, create fences on the loop header,
7538      not preheader.  */
7539   fences = NULL;
7540   if (current_loop_nest)
7541     init_fences (BB_END (EBB_FIRST_BB (0)));
7542   else
7543     init_fences (bb_note (EBB_FIRST_BB (0)));
7544   global_level = 1;
7545 
7546   sel_sched_region_2 (orig_max_seqno);
7547 
7548   gcc_assert (fences == NULL);
7549 
7550   if (pipelining_p)
7551     {
7552       int i;
7553       basic_block bb;
7554       struct flist_tail_def _new_fences;
7555       flist_tail_t new_fences = &_new_fences;
7556       bool do_p = true;
7557 
7558       pipelining_p = false;
7559       max_ws = MIN (max_ws, issue_rate * 3 / 2);
7560       bookkeeping_p = false;
7561       enable_schedule_as_rhs_p = false;
7562 
7563       /* Schedule newly created code, that has not been scheduled yet.  */
7564       do_p = true;
7565 
7566       while (do_p)
7567         {
7568           do_p = false;
7569 
7570           for (i = 0; i < current_nr_blocks; i++)
7571             {
7572               basic_block bb = EBB_FIRST_BB (i);
7573 
7574               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7575                 {
7576                   if (! bb_ends_ebb_p (bb))
7577                     bitmap_set_bit (blocks_to_reschedule, bb_next_bb (bb)->index);
7578                   if (sel_bb_empty_p (bb))
7579                     {
7580                       bitmap_clear_bit (blocks_to_reschedule, bb->index);
7581                       continue;
7582                     }
7583                   clear_outdated_rtx_info (bb);
7584                   if (sel_insn_is_speculation_check (BB_END (bb))
7585                       && JUMP_P (BB_END (bb)))
7586                     bitmap_set_bit (blocks_to_reschedule,
7587                                     BRANCH_EDGE (bb)->dest->index);
7588                 }
7589               else if (! sel_bb_empty_p (bb)
7590                        && INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0)
7591                 bitmap_set_bit (blocks_to_reschedule, bb->index);
7592             }
7593 
7594           for (i = 0; i < current_nr_blocks; i++)
7595             {
7596               bb = EBB_FIRST_BB (i);
7597 
7598               /* While pipelining outer loops, skip bundling for loop
7599                  preheaders.  Those will be rescheduled in the outer
7600                  loop.  */
7601               if (sel_is_loop_preheader_p (bb))
7602                 {
7603                   clear_outdated_rtx_info (bb);
7604                   continue;
7605                 }
7606 
7607               if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7608                 {
7609                   flist_tail_init (new_fences);
7610 
7611                   orig_max_seqno = init_seqno (blocks_to_reschedule, bb);
7612 
7613                   /* Mark BB as head of the new ebb.  */
7614                   bitmap_set_bit (forced_ebb_heads, bb->index);
7615 
7616                   gcc_assert (fences == NULL);
7617 
7618                   init_fences (bb_note (bb));
7619 
7620                   sel_sched_region_2 (orig_max_seqno);
7621 
7622                   do_p = true;
7623                   break;
7624                 }
7625             }
7626         }
7627     }
7628 }
7629 
7630 /* Schedule the RGN region.  */
7631 void
sel_sched_region(int rgn)7632 sel_sched_region (int rgn)
7633 {
7634   bool schedule_p;
7635   bool reset_sched_cycles_p;
7636 
7637   if (sel_region_init (rgn))
7638     return;
7639 
7640   if (sched_verbose >= 1)
7641     sel_print ("Scheduling region %d\n", rgn);
7642 
7643   schedule_p = (!sched_is_disabled_for_current_region_p ()
7644                 && dbg_cnt (sel_sched_region_cnt));
7645   reset_sched_cycles_p = pipelining_p;
7646   if (schedule_p)
7647     sel_sched_region_1 ();
7648   else
7649     {
7650       /* Schedule always selecting the next insn to make the correct data
7651 	 for bundling or other later passes.  */
7652       pipelining_p = false;
7653       reset_sched_cycles_p = false;
7654       force_next_insn = 1;
7655       sel_sched_region_1 ();
7656       force_next_insn = 0;
7657     }
7658   sel_region_finish (reset_sched_cycles_p);
7659 }
7660 
7661 /* Perform global init for the scheduler.  */
7662 static void
sel_global_init(void)7663 sel_global_init (void)
7664 {
7665   /* Remove empty blocks: their presence can break assumptions elsewhere,
7666      e.g. the logic to invoke update_liveness_on_insn in sel_region_init.  */
7667   cleanup_cfg (0);
7668 
7669   calculate_dominance_info (CDI_DOMINATORS);
7670   alloc_sched_pools ();
7671 
7672   /* Setup the infos for sched_init.  */
7673   sel_setup_sched_infos ();
7674   setup_sched_dump ();
7675 
7676   sched_rgn_init (false);
7677   sched_init ();
7678 
7679   sched_init_bbs ();
7680   /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass.  */
7681   after_recovery = 0;
7682   can_issue_more = issue_rate;
7683 
7684   sched_extend_target ();
7685   sched_deps_init (true);
7686   setup_nop_and_exit_insns ();
7687   sel_extend_global_bb_info ();
7688   init_lv_sets ();
7689   init_hard_regs_data ();
7690 }
7691 
7692 /* Free the global data of the scheduler.  */
7693 static void
sel_global_finish(void)7694 sel_global_finish (void)
7695 {
7696   free_bb_note_pool ();
7697   free_lv_sets ();
7698   sel_finish_global_bb_info ();
7699 
7700   free_regset_pool ();
7701   free_nop_and_exit_insns ();
7702 
7703   sched_rgn_finish ();
7704   sched_deps_finish ();
7705   sched_finish ();
7706 
7707   if (current_loops)
7708     sel_finish_pipelining ();
7709 
7710   free_sched_pools ();
7711   free_dominance_info (CDI_DOMINATORS);
7712 }
7713 
7714 /* Return true when we need to skip selective scheduling.  Used for debugging.  */
7715 bool
maybe_skip_selective_scheduling(void)7716 maybe_skip_selective_scheduling (void)
7717 {
7718   return ! dbg_cnt (sel_sched_cnt);
7719 }
7720 
7721 /* The entry point.  */
7722 void
run_selective_scheduling(void)7723 run_selective_scheduling (void)
7724 {
7725   int rgn;
7726 
7727   if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
7728     return;
7729 
7730   sel_global_init ();
7731 
7732   for (rgn = 0; rgn < nr_regions; rgn++)
7733     sel_sched_region (rgn);
7734 
7735   sel_global_finish ();
7736 }
7737 
7738 #endif
7739