1 /* Instruction scheduling pass.  Selective scheduler and pipeliner.
2    Copyright (C) 2006-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "df.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "cfgrtl.h"
31 #include "cfganal.h"
32 #include "cfgbuild.h"
33 #include "insn-config.h"
34 #include "insn-attr.h"
35 #include "recog.h"
36 #include "target.h"
37 #include "sched-int.h"
38 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
39 
40 #ifdef INSN_SCHEDULING
41 #include "regset.h"
42 #include "cfgloop.h"
43 #include "sel-sched-ir.h"
44 /* We don't have to use it except for sel_print_insn.  */
45 #include "sel-sched-dump.h"
46 
47 /* A vector holding bb info for whole scheduling pass.  */
48 vec<sel_global_bb_info_def> sel_global_bb_info;
49 
50 /* A vector holding bb info.  */
51 vec<sel_region_bb_info_def> sel_region_bb_info;
52 
53 /* A pool for allocating all lists.  */
54 object_allocator<_list_node> sched_lists_pool ("sel-sched-lists");
55 
56 /* This contains information about successors for compute_av_set.  */
57 struct succs_info current_succs;
58 
59 /* Data structure to describe interaction with the generic scheduler utils.  */
60 static struct common_sched_info_def sel_common_sched_info;
61 
62 /* The loop nest being pipelined.  */
63 class loop *current_loop_nest;
64 
65 /* LOOP_NESTS is a vector containing the corresponding loop nest for
66    each region.  */
67 static vec<loop_p> loop_nests;
68 
69 /* Saves blocks already in loop regions, indexed by bb->index.  */
70 static sbitmap bbs_in_loop_rgns = NULL;
71 
72 /* CFG hooks that are saved before changing create_basic_block hook.  */
73 static struct cfg_hooks orig_cfg_hooks;
74 
75 
76 /* Array containing reverse topological index of function basic blocks,
77    indexed by BB->INDEX.  */
78 static int *rev_top_order_index = NULL;
79 
80 /* Length of the above array.  */
81 static int rev_top_order_index_len = -1;
82 
83 /* A regset pool structure.  */
84 static struct
85 {
86   /* The stack to which regsets are returned.  */
87   regset *v;
88 
89   /* Its pointer.  */
90   int n;
91 
92   /* Its size.  */
93   int s;
94 
95   /* In VV we save all generated regsets so that, when destructing the
96      pool, we can compare it with V and check that every regset was returned
97      back to pool.  */
98   regset *vv;
99 
100   /* The pointer of VV stack.  */
101   int nn;
102 
103   /* Its size.  */
104   int ss;
105 
106   /* The difference between allocated and returned regsets.  */
107   int diff;
108 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
109 
110 /* This represents the nop pool.  */
111 static struct
112 {
113   /* The vector which holds previously emitted nops.  */
114   insn_t *v;
115 
116   /* Its pointer.  */
117   int n;
118 
119   /* Its size.  */
120   int s;
121 } nop_pool = { NULL, 0, 0 };
122 
123 /* The pool for basic block notes.  */
124 static vec<rtx_note *> bb_note_pool;
125 
126 /* A NOP pattern used to emit placeholder insns.  */
127 rtx nop_pattern = NULL_RTX;
128 /* A special instruction that resides in EXIT_BLOCK.
129    EXIT_INSN is successor of the insns that lead to EXIT_BLOCK.  */
130 rtx_insn *exit_insn = NULL;
131 
132 /* TRUE if while scheduling current region, which is loop, its preheader
133    was removed.  */
134 bool preheader_removed = false;
135 
136 
137 /* Forward static declarations.  */
138 static void fence_clear (fence_t);
139 
140 static void deps_init_id (idata_t, insn_t, bool);
141 static void init_id_from_df (idata_t, insn_t, bool);
142 static expr_t set_insn_init (expr_t, vinsn_t, int);
143 
144 static void cfg_preds (basic_block, insn_t **, int *);
145 static void prepare_insn_expr (insn_t, int);
146 static void free_history_vect (vec<expr_history_def> &);
147 
148 static void move_bb_info (basic_block, basic_block);
149 static void remove_empty_bb (basic_block, bool);
150 static void sel_merge_blocks (basic_block, basic_block);
151 static void sel_remove_loop_preheader (void);
152 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
153 
154 static bool insn_is_the_only_one_in_bb_p (insn_t);
155 static void create_initial_data_sets (basic_block);
156 
157 static void free_av_set (basic_block);
158 static void invalidate_av_set (basic_block);
159 static void extend_insn_data (void);
160 static void sel_init_new_insn (insn_t, int, int = -1);
161 static void finish_insns (void);
162 
163 /* Various list functions.  */
164 
165 /* Copy an instruction list L.  */
166 ilist_t
ilist_copy(ilist_t l)167 ilist_copy (ilist_t l)
168 {
169   ilist_t head = NULL, *tailp = &head;
170 
171   while (l)
172     {
173       ilist_add (tailp, ILIST_INSN (l));
174       tailp = &ILIST_NEXT (*tailp);
175       l = ILIST_NEXT (l);
176     }
177 
178   return head;
179 }
180 
181 /* Invert an instruction list L.  */
182 ilist_t
ilist_invert(ilist_t l)183 ilist_invert (ilist_t l)
184 {
185   ilist_t res = NULL;
186 
187   while (l)
188     {
189       ilist_add (&res, ILIST_INSN (l));
190       l = ILIST_NEXT (l);
191     }
192 
193   return res;
194 }
195 
196 /* Add a new boundary to the LP list with parameters TO, PTR, and DC.  */
197 void
blist_add(blist_t * lp,insn_t to,ilist_t ptr,deps_t dc)198 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
199 {
200   bnd_t bnd;
201 
202   _list_add (lp);
203   bnd = BLIST_BND (*lp);
204 
205   BND_TO (bnd) = to;
206   BND_PTR (bnd) = ptr;
207   BND_AV (bnd) = NULL;
208   BND_AV1 (bnd) = NULL;
209   BND_DC (bnd) = dc;
210 }
211 
212 /* Remove the list note pointed to by LP.  */
213 void
blist_remove(blist_t * lp)214 blist_remove (blist_t *lp)
215 {
216   bnd_t b = BLIST_BND (*lp);
217 
218   av_set_clear (&BND_AV (b));
219   av_set_clear (&BND_AV1 (b));
220   ilist_clear (&BND_PTR (b));
221 
222   _list_remove (lp);
223 }
224 
225 /* Init a fence tail L.  */
226 void
flist_tail_init(flist_tail_t l)227 flist_tail_init (flist_tail_t l)
228 {
229   FLIST_TAIL_HEAD (l) = NULL;
230   FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
231 }
232 
233 /* Try to find fence corresponding to INSN in L.  */
234 fence_t
flist_lookup(flist_t l,insn_t insn)235 flist_lookup (flist_t l, insn_t insn)
236 {
237   while (l)
238     {
239       if (FENCE_INSN (FLIST_FENCE (l)) == insn)
240 	return FLIST_FENCE (l);
241 
242       l = FLIST_NEXT (l);
243     }
244 
245   return NULL;
246 }
247 
248 /* Init the fields of F before running fill_insns.  */
249 static void
init_fence_for_scheduling(fence_t f)250 init_fence_for_scheduling (fence_t f)
251 {
252   FENCE_BNDS (f) = NULL;
253   FENCE_PROCESSED_P (f) = false;
254   FENCE_SCHEDULED_P (f) = false;
255 }
256 
257 /* Add new fence consisting of INSN and STATE to the list pointed to by LP.  */
258 static void
flist_add(flist_t * lp,insn_t insn,state_t state,deps_t dc,void * tc,insn_t last_scheduled_insn,vec<rtx_insn *,va_gc> * executing_insns,int * ready_ticks,int ready_ticks_size,insn_t sched_next,int cycle,int cycle_issued_insns,int issue_more,bool starts_cycle_p,bool after_stall_p)259 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
260            insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns,
261            int *ready_ticks, int ready_ticks_size, insn_t sched_next,
262            int cycle, int cycle_issued_insns, int issue_more,
263            bool starts_cycle_p, bool after_stall_p)
264 {
265   fence_t f;
266 
267   _list_add (lp);
268   f = FLIST_FENCE (*lp);
269 
270   FENCE_INSN (f) = insn;
271 
272   gcc_assert (state != NULL);
273   FENCE_STATE (f) = state;
274 
275   FENCE_CYCLE (f) = cycle;
276   FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
277   FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
278   FENCE_AFTER_STALL_P (f) = after_stall_p;
279 
280   gcc_assert (dc != NULL);
281   FENCE_DC (f) = dc;
282 
283   gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
284   FENCE_TC (f) = tc;
285 
286   FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
287   FENCE_ISSUE_MORE (f) = issue_more;
288   FENCE_EXECUTING_INSNS (f) = executing_insns;
289   FENCE_READY_TICKS (f) = ready_ticks;
290   FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
291   FENCE_SCHED_NEXT (f) = sched_next;
292 
293   init_fence_for_scheduling (f);
294 }
295 
296 /* Remove the head node of the list pointed to by LP.  */
297 static void
flist_remove(flist_t * lp)298 flist_remove (flist_t *lp)
299 {
300   if (FENCE_INSN (FLIST_FENCE (*lp)))
301     fence_clear (FLIST_FENCE (*lp));
302   _list_remove (lp);
303 }
304 
305 /* Clear the fence list pointed to by LP.  */
306 void
flist_clear(flist_t * lp)307 flist_clear (flist_t *lp)
308 {
309   while (*lp)
310     flist_remove (lp);
311 }
312 
313 /* Add ORIGINAL_INSN the def list DL honoring CROSSED_CALL_ABIS.  */
314 void
def_list_add(def_list_t * dl,insn_t original_insn,unsigned int crossed_call_abis)315 def_list_add (def_list_t *dl, insn_t original_insn,
316 	      unsigned int crossed_call_abis)
317 {
318   def_t d;
319 
320   _list_add (dl);
321   d = DEF_LIST_DEF (*dl);
322 
323   d->orig_insn = original_insn;
324   d->crossed_call_abis = crossed_call_abis;
325 }
326 
327 
328 /* Functions to work with target contexts.  */
329 
330 /* Bulk target context.  It is convenient for debugging purposes to ensure
331    that there are no uninitialized (null) target contexts.  */
332 static tc_t bulk_tc = (tc_t) 1;
333 
334 /* Target hooks wrappers.  In the future we can provide some default
335    implementations for them.  */
336 
337 /* Allocate a store for the target context.  */
338 static tc_t
alloc_target_context(void)339 alloc_target_context (void)
340 {
341   return (targetm.sched.alloc_sched_context
342 	  ? targetm.sched.alloc_sched_context () : bulk_tc);
343 }
344 
345 /* Init target context TC.
346    If CLEAN_P is true, then make TC as it is beginning of the scheduler.
347    Overwise, copy current backend context to TC.  */
348 static void
init_target_context(tc_t tc,bool clean_p)349 init_target_context (tc_t tc, bool clean_p)
350 {
351   if (targetm.sched.init_sched_context)
352     targetm.sched.init_sched_context (tc, clean_p);
353 }
354 
355 /* Allocate and initialize a target context.  Meaning of CLEAN_P is the same as
356    int init_target_context ().  */
357 tc_t
create_target_context(bool clean_p)358 create_target_context (bool clean_p)
359 {
360   tc_t tc = alloc_target_context ();
361 
362   init_target_context (tc, clean_p);
363   return tc;
364 }
365 
366 /* Copy TC to the current backend context.  */
367 void
set_target_context(tc_t tc)368 set_target_context (tc_t tc)
369 {
370   if (targetm.sched.set_sched_context)
371     targetm.sched.set_sched_context (tc);
372 }
373 
374 /* TC is about to be destroyed.  Free any internal data.  */
375 static void
clear_target_context(tc_t tc)376 clear_target_context (tc_t tc)
377 {
378   if (targetm.sched.clear_sched_context)
379     targetm.sched.clear_sched_context (tc);
380 }
381 
382 /*  Clear and free it.  */
383 static void
delete_target_context(tc_t tc)384 delete_target_context (tc_t tc)
385 {
386   clear_target_context (tc);
387 
388   if (targetm.sched.free_sched_context)
389     targetm.sched.free_sched_context (tc);
390 }
391 
392 /* Make a copy of FROM in TO.
393    NB: May be this should be a hook.  */
394 static void
copy_target_context(tc_t to,tc_t from)395 copy_target_context (tc_t to, tc_t from)
396 {
397   tc_t tmp = create_target_context (false);
398 
399   set_target_context (from);
400   init_target_context (to, false);
401 
402   set_target_context (tmp);
403   delete_target_context (tmp);
404 }
405 
406 /* Create a copy of TC.  */
407 static tc_t
create_copy_of_target_context(tc_t tc)408 create_copy_of_target_context (tc_t tc)
409 {
410   tc_t copy = alloc_target_context ();
411 
412   copy_target_context (copy, tc);
413 
414   return copy;
415 }
416 
417 /* Clear TC and initialize it according to CLEAN_P.  The meaning of CLEAN_P
418    is the same as in init_target_context ().  */
419 void
reset_target_context(tc_t tc,bool clean_p)420 reset_target_context (tc_t tc, bool clean_p)
421 {
422   clear_target_context (tc);
423   init_target_context (tc, clean_p);
424 }
425 
426 /* Functions to work with dependence contexts.
427    Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence
428    context.  It accumulates information about processed insns to decide if
429    current insn is dependent on the processed ones.  */
430 
431 /* Make a copy of FROM in TO.  */
432 static void
copy_deps_context(deps_t to,deps_t from)433 copy_deps_context (deps_t to, deps_t from)
434 {
435   init_deps (to, false);
436   deps_join (to, from);
437 }
438 
439 /* Allocate store for dep context.  */
440 static deps_t
alloc_deps_context(void)441 alloc_deps_context (void)
442 {
443   return XNEW (class deps_desc);
444 }
445 
446 /* Allocate and initialize dep context.  */
447 static deps_t
create_deps_context(void)448 create_deps_context (void)
449 {
450   deps_t dc = alloc_deps_context ();
451 
452   init_deps (dc, false);
453   return dc;
454 }
455 
456 /* Create a copy of FROM.  */
457 static deps_t
create_copy_of_deps_context(deps_t from)458 create_copy_of_deps_context (deps_t from)
459 {
460   deps_t to = alloc_deps_context ();
461 
462   copy_deps_context (to, from);
463   return to;
464 }
465 
466 /* Clean up internal data of DC.  */
467 static void
clear_deps_context(deps_t dc)468 clear_deps_context (deps_t dc)
469 {
470   free_deps (dc);
471 }
472 
473 /* Clear and free DC.  */
474 static void
delete_deps_context(deps_t dc)475 delete_deps_context (deps_t dc)
476 {
477   clear_deps_context (dc);
478   free (dc);
479 }
480 
481 /* Clear and init DC.  */
482 static void
reset_deps_context(deps_t dc)483 reset_deps_context (deps_t dc)
484 {
485   clear_deps_context (dc);
486   init_deps (dc, false);
487 }
488 
489 /* This structure describes the dependence analysis hooks for advancing
490    dependence context.  */
491 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
492   {
493     NULL,
494 
495     NULL, /* start_insn */
496     NULL, /* finish_insn */
497     NULL, /* start_lhs */
498     NULL, /* finish_lhs */
499     NULL, /* start_rhs */
500     NULL, /* finish_rhs */
501     haifa_note_reg_set,
502     haifa_note_reg_clobber,
503     haifa_note_reg_use,
504     NULL, /* note_mem_dep */
505     NULL, /* note_dep */
506 
507     0, 0, 0
508   };
509 
510 /* Process INSN and add its impact on DC.  */
511 void
advance_deps_context(deps_t dc,insn_t insn)512 advance_deps_context (deps_t dc, insn_t insn)
513 {
514   sched_deps_info = &advance_deps_context_sched_deps_info;
515   deps_analyze_insn (dc, insn);
516 }
517 
518 
519 /* Functions to work with DFA states.  */
520 
521 /* Allocate store for a DFA state.  */
522 static state_t
state_alloc(void)523 state_alloc (void)
524 {
525   return xmalloc (dfa_state_size);
526 }
527 
528 /* Allocate and initialize DFA state.  */
529 static state_t
state_create(void)530 state_create (void)
531 {
532   state_t state = state_alloc ();
533 
534   state_reset (state);
535   advance_state (state);
536   return state;
537 }
538 
539 /* Free DFA state.  */
540 static void
state_free(state_t state)541 state_free (state_t state)
542 {
543   free (state);
544 }
545 
546 /* Make a copy of FROM in TO.  */
547 static void
state_copy(state_t to,state_t from)548 state_copy (state_t to, state_t from)
549 {
550   memcpy (to, from, dfa_state_size);
551 }
552 
553 /* Create a copy of FROM.  */
554 static state_t
state_create_copy(state_t from)555 state_create_copy (state_t from)
556 {
557   state_t to = state_alloc ();
558 
559   state_copy (to, from);
560   return to;
561 }
562 
563 
564 /* Functions to work with fences.  */
565 
566 /* Clear the fence.  */
567 static void
fence_clear(fence_t f)568 fence_clear (fence_t f)
569 {
570   state_t s = FENCE_STATE (f);
571   deps_t dc = FENCE_DC (f);
572   void *tc = FENCE_TC (f);
573 
574   ilist_clear (&FENCE_BNDS (f));
575 
576   gcc_assert ((s != NULL && dc != NULL && tc != NULL)
577 	      || (s == NULL && dc == NULL && tc == NULL));
578 
579   free (s);
580 
581   if (dc != NULL)
582     delete_deps_context (dc);
583 
584   if (tc != NULL)
585     delete_target_context (tc);
586   vec_free (FENCE_EXECUTING_INSNS (f));
587   free (FENCE_READY_TICKS (f));
588   FENCE_READY_TICKS (f) = NULL;
589 }
590 
591 /* Init a list of fences with successors of OLD_FENCE.  */
592 void
init_fences(insn_t old_fence)593 init_fences (insn_t old_fence)
594 {
595   insn_t succ;
596   succ_iterator si;
597   bool first = true;
598   int ready_ticks_size = get_max_uid () + 1;
599 
600   FOR_EACH_SUCC_1 (succ, si, old_fence,
601                    SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
602     {
603 
604       if (first)
605         first = false;
606       else
607         gcc_assert (flag_sel_sched_pipelining_outer_loops);
608 
609       flist_add (&fences, succ,
610 		 state_create (),
611 		 create_deps_context () /* dc */,
612 		 create_target_context (true) /* tc */,
613 		 NULL /* last_scheduled_insn */,
614                  NULL, /* executing_insns */
615                  XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
616                  ready_ticks_size,
617                  NULL /* sched_next */,
618 		 1 /* cycle */, 0 /* cycle_issued_insns */,
619 		 issue_rate, /* issue_more */
620 		 1 /* starts_cycle_p */, 0 /* after_stall_p */);
621     }
622 }
623 
624 /* Merges two fences (filling fields of fence F with resulting values) by
625    following rules: 1) state, target context and last scheduled insn are
626    propagated from fallthrough edge if it is available;
627    2) deps context and cycle is propagated from more probable edge;
628    3) all other fields are set to corresponding constant values.
629 
630    INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
631    READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
632    and AFTER_STALL_P are the corresponding fields of the second fence.  */
633 static void
merge_fences(fence_t f,insn_t insn,state_t state,deps_t dc,void * tc,rtx_insn * last_scheduled_insn,vec<rtx_insn *,va_gc> * executing_insns,int * ready_ticks,int ready_ticks_size,rtx sched_next,int cycle,int issue_more,bool after_stall_p)634 merge_fences (fence_t f, insn_t insn,
635 	      state_t state, deps_t dc, void *tc,
636               rtx_insn *last_scheduled_insn,
637 	      vec<rtx_insn *, va_gc> *executing_insns,
638               int *ready_ticks, int ready_ticks_size,
639 	      rtx sched_next, int cycle, int issue_more, bool after_stall_p)
640 {
641   insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
642 
643   gcc_assert (sel_bb_head_p (FENCE_INSN (f))
644               && !sched_next && !FENCE_SCHED_NEXT (f));
645 
646   /* Check if we can decide which path fences came.
647      If we can't (or don't want to) - reset all.  */
648   if (last_scheduled_insn == NULL
649       || last_scheduled_insn_old == NULL
650       /* This is a case when INSN is reachable on several paths from
651          one insn (this can happen when pipelining of outer loops is on and
652          there are two edges: one going around of inner loop and the other -
653          right through it; in such case just reset everything).  */
654       || last_scheduled_insn == last_scheduled_insn_old)
655     {
656       state_reset (FENCE_STATE (f));
657       state_free (state);
658 
659       reset_deps_context (FENCE_DC (f));
660       delete_deps_context (dc);
661 
662       reset_target_context (FENCE_TC (f), true);
663       delete_target_context (tc);
664 
665       if (cycle > FENCE_CYCLE (f))
666         FENCE_CYCLE (f) = cycle;
667 
668       FENCE_LAST_SCHEDULED_INSN (f) = NULL;
669       FENCE_ISSUE_MORE (f) = issue_rate;
670       vec_free (executing_insns);
671       free (ready_ticks);
672       if (FENCE_EXECUTING_INSNS (f))
673         FENCE_EXECUTING_INSNS (f)->block_remove (0,
674 					  FENCE_EXECUTING_INSNS (f)->length ());
675       if (FENCE_READY_TICKS (f))
676         memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
677     }
678   else
679     {
680       edge edge_old = NULL, edge_new = NULL;
681       edge candidate;
682       succ_iterator si;
683       insn_t succ;
684 
685       /* Find fallthrough edge.  */
686       gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
687       candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
688 
689       if (!candidate
690           || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
691               && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
692         {
693           /* No fallthrough edge leading to basic block of INSN.  */
694           state_reset (FENCE_STATE (f));
695           state_free (state);
696 
697           reset_target_context (FENCE_TC (f), true);
698           delete_target_context (tc);
699 
700           FENCE_LAST_SCHEDULED_INSN (f) = NULL;
701 	  FENCE_ISSUE_MORE (f) = issue_rate;
702         }
703       else
704         if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
705           {
706             state_free (FENCE_STATE (f));
707             FENCE_STATE (f) = state;
708 
709             delete_target_context (FENCE_TC (f));
710             FENCE_TC (f) = tc;
711 
712             FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
713 	    FENCE_ISSUE_MORE (f) = issue_more;
714           }
715         else
716           {
717             /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched.  */
718             state_free (state);
719             delete_target_context (tc);
720 
721             gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
722                         != BLOCK_FOR_INSN (last_scheduled_insn));
723           }
724 
725       /* Find edge of first predecessor (last_scheduled_insn_old->insn).  */
726       FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
727 		       SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
728 	{
729 	  if (succ == insn)
730 	    {
731 	      /* No same successor allowed from several edges.  */
732 	      gcc_assert (!edge_old);
733 	      edge_old = si.e1;
734 	    }
735 	}
736       /* Find edge of second predecessor (last_scheduled_insn->insn).  */
737       FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
738 		       SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
739 	{
740 	  if (succ == insn)
741 	    {
742 	      /* No same successor allowed from several edges.  */
743 	      gcc_assert (!edge_new);
744 	      edge_new = si.e1;
745 	    }
746 	}
747 
748       /* Check if we can choose most probable predecessor.  */
749       if (edge_old == NULL || edge_new == NULL)
750 	{
751 	  reset_deps_context (FENCE_DC (f));
752 	  delete_deps_context (dc);
753 	  vec_free (executing_insns);
754 	  free (ready_ticks);
755 
756 	  FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
757 	  if (FENCE_EXECUTING_INSNS (f))
758 	    FENCE_EXECUTING_INSNS (f)->block_remove (0,
759 			      FENCE_EXECUTING_INSNS (f)->length ());
760 	  if (FENCE_READY_TICKS (f))
761 	    memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
762 	}
763       else
764 	if (edge_new->probability > edge_old->probability)
765 	  {
766 	    delete_deps_context (FENCE_DC (f));
767 	    FENCE_DC (f) = dc;
768 	    vec_free (FENCE_EXECUTING_INSNS (f));
769 	    FENCE_EXECUTING_INSNS (f) = executing_insns;
770 	    free (FENCE_READY_TICKS (f));
771 	    FENCE_READY_TICKS (f) = ready_ticks;
772 	    FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
773 	    FENCE_CYCLE (f) = cycle;
774 	  }
775 	else
776 	  {
777 	    /* Leave DC and CYCLE untouched.  */
778 	    delete_deps_context (dc);
779 	    vec_free (executing_insns);
780 	    free (ready_ticks);
781 	  }
782     }
783 
784   /* Fill remaining invariant fields.  */
785   if (after_stall_p)
786     FENCE_AFTER_STALL_P (f) = 1;
787 
788   FENCE_ISSUED_INSNS (f) = 0;
789   FENCE_STARTS_CYCLE_P (f) = 1;
790   FENCE_SCHED_NEXT (f) = NULL;
791 }
792 
793 /* Add a new fence to NEW_FENCES list, initializing it from all
794    other parameters.  */
795 static void
add_to_fences(flist_tail_t new_fences,insn_t insn,state_t state,deps_t dc,void * tc,rtx_insn * last_scheduled_insn,vec<rtx_insn *,va_gc> * executing_insns,int * ready_ticks,int ready_ticks_size,rtx_insn * sched_next,int cycle,int cycle_issued_insns,int issue_rate,bool starts_cycle_p,bool after_stall_p)796 add_to_fences (flist_tail_t new_fences, insn_t insn,
797                state_t state, deps_t dc, void *tc,
798 	       rtx_insn *last_scheduled_insn,
799                vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks,
800                int ready_ticks_size, rtx_insn *sched_next, int cycle,
801                int cycle_issued_insns, int issue_rate,
802 	       bool starts_cycle_p, bool after_stall_p)
803 {
804   fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
805 
806   if (! f)
807     {
808       flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
809 		 last_scheduled_insn, executing_insns, ready_ticks,
810                  ready_ticks_size, sched_next, cycle, cycle_issued_insns,
811 		 issue_rate, starts_cycle_p, after_stall_p);
812 
813       FLIST_TAIL_TAILP (new_fences)
814 	= &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
815     }
816   else
817     {
818       merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
819                     executing_insns, ready_ticks, ready_ticks_size,
820                     sched_next, cycle, issue_rate, after_stall_p);
821     }
822 }
823 
824 /* Move the first fence in the OLD_FENCES list to NEW_FENCES.  */
825 void
move_fence_to_fences(flist_t old_fences,flist_tail_t new_fences)826 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
827 {
828   fence_t f, old;
829   flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
830 
831   old = FLIST_FENCE (old_fences);
832   f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
833                     FENCE_INSN (FLIST_FENCE (old_fences)));
834   if (f)
835     {
836       merge_fences (f, old->insn, old->state, old->dc, old->tc,
837                     old->last_scheduled_insn, old->executing_insns,
838                     old->ready_ticks, old->ready_ticks_size,
839                     old->sched_next, old->cycle, old->issue_more,
840                     old->after_stall_p);
841     }
842   else
843     {
844       _list_add (tailp);
845       FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
846       *FLIST_FENCE (*tailp) = *old;
847       init_fence_for_scheduling (FLIST_FENCE (*tailp));
848     }
849   FENCE_INSN (old) = NULL;
850 }
851 
852 /* Add a new fence to NEW_FENCES list and initialize most of its data
853    as a clean one.  */
854 void
add_clean_fence_to_fences(flist_tail_t new_fences,insn_t succ,fence_t fence)855 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
856 {
857   int ready_ticks_size = get_max_uid () + 1;
858 
859   add_to_fences (new_fences,
860                  succ, state_create (), create_deps_context (),
861                  create_target_context (true),
862                  NULL, NULL,
863                  XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
864                  NULL, FENCE_CYCLE (fence) + 1,
865                  0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
866 }
867 
868 /* Add a new fence to NEW_FENCES list and initialize all of its data
869    from FENCE and SUCC.  */
870 void
add_dirty_fence_to_fences(flist_tail_t new_fences,insn_t succ,fence_t fence)871 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
872 {
873   int * new_ready_ticks
874     = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
875 
876   memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
877           FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
878   add_to_fences (new_fences,
879                  succ, state_create_copy (FENCE_STATE (fence)),
880                  create_copy_of_deps_context (FENCE_DC (fence)),
881                  create_copy_of_target_context (FENCE_TC (fence)),
882                  FENCE_LAST_SCHEDULED_INSN (fence),
883 		 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
884                  new_ready_ticks,
885                  FENCE_READY_TICKS_SIZE (fence),
886                  FENCE_SCHED_NEXT (fence),
887                  FENCE_CYCLE (fence),
888                  FENCE_ISSUED_INSNS (fence),
889 		 FENCE_ISSUE_MORE (fence),
890                  FENCE_STARTS_CYCLE_P (fence),
891                  FENCE_AFTER_STALL_P (fence));
892 }
893 
894 
895 /* Functions to work with regset and nop pools.  */
896 
897 /* Returns the new regset from pool.  It might have some of the bits set
898    from the previous usage.  */
899 regset
get_regset_from_pool(void)900 get_regset_from_pool (void)
901 {
902   regset rs;
903 
904   if (regset_pool.n != 0)
905     rs = regset_pool.v[--regset_pool.n];
906   else
907     /* We need to create the regset.  */
908     {
909       rs = ALLOC_REG_SET (&reg_obstack);
910 
911       if (regset_pool.nn == regset_pool.ss)
912 	regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
913                                      (regset_pool.ss = 2 * regset_pool.ss + 1));
914       regset_pool.vv[regset_pool.nn++] = rs;
915     }
916 
917   regset_pool.diff++;
918 
919   return rs;
920 }
921 
922 /* Same as above, but returns the empty regset.  */
923 regset
get_clear_regset_from_pool(void)924 get_clear_regset_from_pool (void)
925 {
926   regset rs = get_regset_from_pool ();
927 
928   CLEAR_REG_SET (rs);
929   return rs;
930 }
931 
932 /* Return regset RS to the pool for future use.  */
933 void
return_regset_to_pool(regset rs)934 return_regset_to_pool (regset rs)
935 {
936   gcc_assert (rs);
937   regset_pool.diff--;
938 
939   if (regset_pool.n == regset_pool.s)
940     regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
941                                 (regset_pool.s = 2 * regset_pool.s + 1));
942   regset_pool.v[regset_pool.n++] = rs;
943 }
944 
945 /* This is used as a qsort callback for sorting regset pool stacks.
946    X and XX are addresses of two regsets.  They are never equal.  */
947 static int
cmp_v_in_regset_pool(const void * x,const void * xx)948 cmp_v_in_regset_pool (const void *x, const void *xx)
949 {
950   uintptr_t r1 = (uintptr_t) *((const regset *) x);
951   uintptr_t r2 = (uintptr_t) *((const regset *) xx);
952   if (r1 > r2)
953     return 1;
954   else if (r1 < r2)
955     return -1;
956   gcc_unreachable ();
957 }
958 
959 /* Free the regset pool possibly checking for memory leaks.  */
960 void
free_regset_pool(void)961 free_regset_pool (void)
962 {
963   if (flag_checking)
964     {
965       regset *v = regset_pool.v;
966       int i = 0;
967       int n = regset_pool.n;
968 
969       regset *vv = regset_pool.vv;
970       int ii = 0;
971       int nn = regset_pool.nn;
972 
973       int diff = 0;
974 
975       gcc_assert (n <= nn);
976 
977       /* Sort both vectors so it will be possible to compare them.  */
978       qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
979       qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
980 
981       while (ii < nn)
982 	{
983 	  if (v[i] == vv[ii])
984 	    i++;
985 	  else
986 	    /* VV[II] was lost.  */
987 	    diff++;
988 
989 	  ii++;
990 	}
991 
992       gcc_assert (diff == regset_pool.diff);
993     }
994 
995   /* If not true - we have a memory leak.  */
996   gcc_assert (regset_pool.diff == 0);
997 
998   while (regset_pool.n)
999     {
1000       --regset_pool.n;
1001       FREE_REG_SET (regset_pool.v[regset_pool.n]);
1002     }
1003 
1004   free (regset_pool.v);
1005   regset_pool.v = NULL;
1006   regset_pool.s = 0;
1007 
1008   free (regset_pool.vv);
1009   regset_pool.vv = NULL;
1010   regset_pool.nn = 0;
1011   regset_pool.ss = 0;
1012 
1013   regset_pool.diff = 0;
1014 }
1015 
1016 
1017 /* Functions to work with nop pools.  NOP insns are used as temporary
1018    placeholders of the insns being scheduled to allow correct update of
1019    the data sets.  When update is finished, NOPs are deleted.  */
1020 
1021 /* A vinsn that is used to represent a nop.  This vinsn is shared among all
1022    nops sel-sched generates.  */
1023 static vinsn_t nop_vinsn = NULL;
1024 
1025 /* Emit a nop before INSN, taking it from pool.  */
1026 insn_t
get_nop_from_pool(insn_t insn)1027 get_nop_from_pool (insn_t insn)
1028 {
1029   rtx nop_pat;
1030   insn_t nop;
1031   bool old_p = nop_pool.n != 0;
1032   int flags;
1033 
1034   if (old_p)
1035     nop_pat = nop_pool.v[--nop_pool.n];
1036   else
1037     nop_pat = nop_pattern;
1038 
1039   nop = emit_insn_before (nop_pat, insn);
1040 
1041   if (old_p)
1042     flags = INSN_INIT_TODO_SSID;
1043   else
1044     flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1045 
1046   set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1047   sel_init_new_insn (nop, flags);
1048 
1049   return nop;
1050 }
1051 
1052 /* Remove NOP from the instruction stream and return it to the pool.  */
1053 void
return_nop_to_pool(insn_t nop,bool full_tidying)1054 return_nop_to_pool (insn_t nop, bool full_tidying)
1055 {
1056   gcc_assert (INSN_IN_STREAM_P (nop));
1057   sel_remove_insn (nop, false, full_tidying);
1058 
1059   /* We'll recycle this nop.  */
1060   nop->set_undeleted ();
1061 
1062   if (nop_pool.n == nop_pool.s)
1063     nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v,
1064                              (nop_pool.s = 2 * nop_pool.s + 1));
1065   nop_pool.v[nop_pool.n++] = nop;
1066 }
1067 
1068 /* Free the nop pool.  */
1069 void
free_nop_pool(void)1070 free_nop_pool (void)
1071 {
1072   nop_pool.n = 0;
1073   nop_pool.s = 0;
1074   free (nop_pool.v);
1075   nop_pool.v = NULL;
1076 }
1077 
1078 
1079 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1080    The callback is given two rtxes XX and YY and writes the new rtxes
1081    to NX and NY in case some needs to be skipped.  */
1082 static int
skip_unspecs_callback(const_rtx * xx,const_rtx * yy,rtx * nx,rtx * ny)1083 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1084 {
1085   const_rtx x = *xx;
1086   const_rtx y = *yy;
1087 
1088   if (GET_CODE (x) == UNSPEC
1089       && (targetm.sched.skip_rtx_p == NULL
1090           || targetm.sched.skip_rtx_p (x)))
1091     {
1092       *nx = XVECEXP (x, 0, 0);
1093       *ny = CONST_CAST_RTX (y);
1094       return 1;
1095     }
1096 
1097   if (GET_CODE (y) == UNSPEC
1098       && (targetm.sched.skip_rtx_p == NULL
1099           || targetm.sched.skip_rtx_p (y)))
1100     {
1101       *nx = CONST_CAST_RTX (x);
1102       *ny = XVECEXP (y, 0, 0);
1103       return 1;
1104     }
1105 
1106   return 0;
1107 }
1108 
1109 /* Callback, called from hash_rtx_cb.  Helps to hash UNSPEC rtx X in a correct way
1110    to support ia64 speculation.  When changes are needed, new rtx X and new mode
1111    NMODE are written, and the callback returns true.  */
1112 static int
hash_with_unspec_callback(const_rtx x,machine_mode mode ATTRIBUTE_UNUSED,rtx * nx,machine_mode * nmode)1113 hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED,
1114                            rtx *nx, machine_mode* nmode)
1115 {
1116   if (GET_CODE (x) == UNSPEC
1117       && targetm.sched.skip_rtx_p
1118       && targetm.sched.skip_rtx_p (x))
1119     {
1120       *nx = XVECEXP (x, 0 ,0);
1121       *nmode = VOIDmode;
1122       return 1;
1123     }
1124 
1125   return 0;
1126 }
1127 
1128 /* Returns LHS and RHS are ok to be scheduled separately.  */
1129 static bool
lhs_and_rhs_separable_p(rtx lhs,rtx rhs)1130 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1131 {
1132   if (lhs == NULL || rhs == NULL)
1133     return false;
1134 
1135   /* Do not schedule constants as rhs: no point to use reg, if const
1136      can be used.  Moreover, scheduling const as rhs may lead to mode
1137      mismatch cause consts don't have modes but they could be merged
1138      from branches where the same const used in different modes.  */
1139   if (CONSTANT_P (rhs))
1140     return false;
1141 
1142   /* ??? Do not rename predicate registers to avoid ICEs in bundling.  */
1143   if (COMPARISON_P (rhs))
1144       return false;
1145 
1146   /* Do not allow single REG to be an rhs.  */
1147   if (REG_P (rhs))
1148     return false;
1149 
1150   /* See comment at find_used_regs_1 (*1) for explanation of this
1151      restriction.  */
1152   /* FIXME: remove this later.  */
1153   if (MEM_P (lhs))
1154     return false;
1155 
1156   /* This will filter all tricky things like ZERO_EXTRACT etc.
1157      For now we don't handle it.  */
1158   if (!REG_P (lhs) && !MEM_P (lhs))
1159     return false;
1160 
1161   return true;
1162 }
1163 
1164 /* Initialize vinsn VI for INSN.  Only for use from vinsn_create ().  When
1165    FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable.  This is
1166    used e.g. for insns from recovery blocks.  */
1167 static void
vinsn_init(vinsn_t vi,insn_t insn,bool force_unique_p)1168 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1169 {
1170   hash_rtx_callback_function hrcf;
1171   int insn_class;
1172 
1173   VINSN_INSN_RTX (vi) = insn;
1174   VINSN_COUNT (vi) = 0;
1175   vi->cost = -1;
1176 
1177   if (INSN_NOP_P (insn))
1178     return;
1179 
1180   if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1181     init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1182   else
1183     deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1184 
1185   /* Hash vinsn depending on whether it is separable or not.  */
1186   hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1187   if (VINSN_SEPARABLE_P (vi))
1188     {
1189       rtx rhs = VINSN_RHS (vi);
1190 
1191       VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1192                                      NULL, NULL, false, hrcf);
1193       VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1194                                          VOIDmode, NULL, NULL,
1195                                          false, hrcf);
1196     }
1197   else
1198     {
1199       VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1200                                      NULL, NULL, false, hrcf);
1201       VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1202     }
1203 
1204   insn_class = haifa_classify_insn (insn);
1205   if (insn_class >= 2
1206       && (!targetm.sched.get_insn_spec_ds
1207           || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1208               == 0)))
1209     VINSN_MAY_TRAP_P (vi) = true;
1210   else
1211     VINSN_MAY_TRAP_P (vi) = false;
1212 }
1213 
1214 /* Indicate that VI has become the part of an rtx object.  */
1215 void
vinsn_attach(vinsn_t vi)1216 vinsn_attach (vinsn_t vi)
1217 {
1218   /* Assert that VI is not pending for deletion.  */
1219   gcc_assert (VINSN_INSN_RTX (vi));
1220 
1221   VINSN_COUNT (vi)++;
1222 }
1223 
1224 /* Create and init VI from the INSN.  Use UNIQUE_P for determining the correct
1225    VINSN_TYPE (VI).  */
1226 static vinsn_t
vinsn_create(insn_t insn,bool force_unique_p)1227 vinsn_create (insn_t insn, bool force_unique_p)
1228 {
1229   vinsn_t vi = XCNEW (struct vinsn_def);
1230 
1231   vinsn_init (vi, insn, force_unique_p);
1232   return vi;
1233 }
1234 
1235 /* Return a copy of VI.  When REATTACH_P is true, detach VI and attach
1236    the copy.  */
1237 vinsn_t
vinsn_copy(vinsn_t vi,bool reattach_p)1238 vinsn_copy (vinsn_t vi, bool reattach_p)
1239 {
1240   rtx_insn *copy;
1241   bool unique = VINSN_UNIQUE_P (vi);
1242   vinsn_t new_vi;
1243 
1244   copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1245   new_vi = create_vinsn_from_insn_rtx (copy, unique);
1246   if (reattach_p)
1247     {
1248       vinsn_detach (vi);
1249       vinsn_attach (new_vi);
1250     }
1251 
1252   return new_vi;
1253 }
1254 
1255 /* Delete the VI vinsn and free its data.  */
1256 static void
vinsn_delete(vinsn_t vi)1257 vinsn_delete (vinsn_t vi)
1258 {
1259   gcc_assert (VINSN_COUNT (vi) == 0);
1260 
1261   if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1262     {
1263       return_regset_to_pool (VINSN_REG_SETS (vi));
1264       return_regset_to_pool (VINSN_REG_USES (vi));
1265       return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1266     }
1267 
1268   free (vi);
1269 }
1270 
1271 /* Indicate that VI is no longer a part of some rtx object.
1272    Remove VI if it is no longer needed.  */
1273 void
vinsn_detach(vinsn_t vi)1274 vinsn_detach (vinsn_t vi)
1275 {
1276   gcc_assert (VINSN_COUNT (vi) > 0);
1277 
1278   if (--VINSN_COUNT (vi) == 0)
1279     vinsn_delete (vi);
1280 }
1281 
1282 /* Returns TRUE if VI is a branch.  */
1283 bool
vinsn_cond_branch_p(vinsn_t vi)1284 vinsn_cond_branch_p (vinsn_t vi)
1285 {
1286   insn_t insn;
1287 
1288   if (!VINSN_UNIQUE_P (vi))
1289     return false;
1290 
1291   insn = VINSN_INSN_RTX (vi);
1292   if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1293     return false;
1294 
1295   return control_flow_insn_p (insn);
1296 }
1297 
1298 /* Return latency of INSN.  */
1299 static int
sel_insn_rtx_cost(rtx_insn * insn)1300 sel_insn_rtx_cost (rtx_insn *insn)
1301 {
1302   int cost;
1303 
1304   /* A USE insn, or something else we don't need to
1305      understand.  We can't pass these directly to
1306      result_ready_cost or insn_default_latency because it will
1307      trigger a fatal error for unrecognizable insns.  */
1308   if (recog_memoized (insn) < 0)
1309     cost = 0;
1310   else
1311     {
1312       cost = insn_default_latency (insn);
1313 
1314       if (cost < 0)
1315 	cost = 0;
1316     }
1317 
1318   return cost;
1319 }
1320 
1321 /* Return the cost of the VI.
1322    !!! FIXME: Unify with haifa-sched.c: insn_sched_cost ().  */
1323 int
sel_vinsn_cost(vinsn_t vi)1324 sel_vinsn_cost (vinsn_t vi)
1325 {
1326   int cost = vi->cost;
1327 
1328   if (cost < 0)
1329     {
1330       cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1331       vi->cost = cost;
1332     }
1333 
1334   return cost;
1335 }
1336 
1337 
1338 /* Functions for insn emitting.  */
1339 
1340 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1341    EXPR and SEQNO.  */
1342 insn_t
sel_gen_insn_from_rtx_after(rtx pattern,expr_t expr,int seqno,insn_t after)1343 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1344 {
1345   insn_t new_insn;
1346 
1347   gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1348 
1349   new_insn = emit_insn_after (pattern, after);
1350   set_insn_init (expr, NULL, seqno);
1351   sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1352 
1353   return new_insn;
1354 }
1355 
1356 /* Force newly generated vinsns to be unique.  */
1357 static bool init_insn_force_unique_p = false;
1358 
1359 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1360    initialize its data from EXPR and SEQNO.  */
1361 insn_t
sel_gen_recovery_insn_from_rtx_after(rtx pattern,expr_t expr,int seqno,insn_t after)1362 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1363 				      insn_t after)
1364 {
1365   insn_t insn;
1366 
1367   gcc_assert (!init_insn_force_unique_p);
1368 
1369   init_insn_force_unique_p = true;
1370   insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1371   CANT_MOVE (insn) = 1;
1372   init_insn_force_unique_p = false;
1373 
1374   return insn;
1375 }
1376 
1377 /* Emit new insn after AFTER based on EXPR and SEQNO.  If VINSN is not NULL,
1378    take it as a new vinsn instead of EXPR's vinsn.
1379    We simplify insns later, after scheduling region in
1380    simplify_changed_insns.  */
1381 insn_t
sel_gen_insn_from_expr_after(expr_t expr,vinsn_t vinsn,int seqno,insn_t after)1382 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1383                               insn_t after)
1384 {
1385   expr_t emit_expr;
1386   insn_t insn;
1387   int flags;
1388 
1389   emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1390                              seqno);
1391   insn = EXPR_INSN_RTX (emit_expr);
1392 
1393   /* The insn may come from the transformation cache, which may hold already
1394      deleted insns, so mark it as not deleted.  */
1395   insn->set_undeleted ();
1396 
1397   add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1398 
1399   flags = INSN_INIT_TODO_SSID;
1400   if (INSN_LUID (insn) == 0)
1401     flags |= INSN_INIT_TODO_LUID;
1402   sel_init_new_insn (insn, flags);
1403 
1404   return insn;
1405 }
1406 
1407 /* Move insn from EXPR after AFTER.  */
1408 insn_t
sel_move_insn(expr_t expr,int seqno,insn_t after)1409 sel_move_insn (expr_t expr, int seqno, insn_t after)
1410 {
1411   insn_t insn = EXPR_INSN_RTX (expr);
1412   basic_block bb = BLOCK_FOR_INSN (after);
1413   insn_t next = NEXT_INSN (after);
1414 
1415   /* Assert that in move_op we disconnected this insn properly.  */
1416   gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1417   SET_PREV_INSN (insn) = after;
1418   SET_NEXT_INSN (insn) = next;
1419 
1420   SET_NEXT_INSN (after) = insn;
1421   SET_PREV_INSN (next) = insn;
1422 
1423   /* Update links from insn to bb and vice versa.  */
1424   df_insn_change_bb (insn, bb);
1425   if (BB_END (bb) == after)
1426     BB_END (bb) = insn;
1427 
1428   prepare_insn_expr (insn, seqno);
1429   return insn;
1430 }
1431 
1432 
1433 /* Functions to work with right-hand sides.  */
1434 
1435 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1436    VECT and return true when found.  Use NEW_VINSN for comparison only when
1437    COMPARE_VINSNS is true.  Write to INDP the index on which
1438    the search has stopped, such that inserting the new element at INDP will
1439    retain VECT's sort order.  */
1440 static bool
find_in_history_vect_1(vec<expr_history_def> vect,unsigned uid,vinsn_t new_vinsn,bool compare_vinsns,int * indp)1441 find_in_history_vect_1 (vec<expr_history_def> vect,
1442                         unsigned uid, vinsn_t new_vinsn,
1443                         bool compare_vinsns, int *indp)
1444 {
1445   expr_history_def *arr;
1446   int i, j, len = vect.length ();
1447 
1448   if (len == 0)
1449     {
1450       *indp = 0;
1451       return false;
1452     }
1453 
1454   arr = vect.address ();
1455   i = 0, j = len - 1;
1456 
1457   while (i <= j)
1458     {
1459       unsigned auid = arr[i].uid;
1460       vinsn_t avinsn = arr[i].new_expr_vinsn;
1461 
1462       if (auid == uid
1463           /* When undoing transformation on a bookkeeping copy, the new vinsn
1464              may not be exactly equal to the one that is saved in the vector.
1465              This is because the insn whose copy we're checking was possibly
1466              substituted itself.  */
1467           && (! compare_vinsns
1468               || vinsn_equal_p (avinsn, new_vinsn)))
1469         {
1470           *indp = i;
1471           return true;
1472         }
1473       else if (auid > uid)
1474         break;
1475       i++;
1476     }
1477 
1478   *indp = i;
1479   return false;
1480 }
1481 
1482 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT.  Return
1483    the position found or -1, if no such value is in vector.
1484    Search also for UIDs of insn's originators, if ORIGINATORS_P is true.  */
1485 int
find_in_history_vect(vec<expr_history_def> vect,rtx insn,vinsn_t new_vinsn,bool originators_p)1486 find_in_history_vect (vec<expr_history_def> vect, rtx insn,
1487                       vinsn_t new_vinsn, bool originators_p)
1488 {
1489   int ind;
1490 
1491   if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1492                               false, &ind))
1493     return ind;
1494 
1495   if (INSN_ORIGINATORS (insn) && originators_p)
1496     {
1497       unsigned uid;
1498       bitmap_iterator bi;
1499 
1500       EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1501         if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1502           return ind;
1503     }
1504 
1505   return -1;
1506 }
1507 
1508 /* Insert new element in a sorted history vector pointed to by PVECT,
1509    if it is not there already.  The element is searched using
1510    UID/NEW_EXPR_VINSN pair.  TYPE, OLD_EXPR_VINSN and SPEC_DS save
1511    the history of a transformation.  */
1512 void
insert_in_history_vect(vec<expr_history_def> * pvect,unsigned uid,enum local_trans_type type,vinsn_t old_expr_vinsn,vinsn_t new_expr_vinsn,ds_t spec_ds)1513 insert_in_history_vect (vec<expr_history_def> *pvect,
1514                         unsigned uid, enum local_trans_type type,
1515                         vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1516                         ds_t spec_ds)
1517 {
1518   vec<expr_history_def> vect = *pvect;
1519   expr_history_def temp;
1520   bool res;
1521   int ind;
1522 
1523   res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1524 
1525   if (res)
1526     {
1527       expr_history_def *phist = &vect[ind];
1528 
1529       /* It is possible that speculation types of expressions that were
1530          propagated through different paths will be different here.  In this
1531          case, merge the status to get the correct check later.  */
1532       if (phist->spec_ds != spec_ds)
1533         phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1534       return;
1535     }
1536 
1537   temp.uid = uid;
1538   temp.old_expr_vinsn = old_expr_vinsn;
1539   temp.new_expr_vinsn = new_expr_vinsn;
1540   temp.spec_ds = spec_ds;
1541   temp.type = type;
1542 
1543   vinsn_attach (old_expr_vinsn);
1544   vinsn_attach (new_expr_vinsn);
1545   vect.safe_insert (ind, temp);
1546   *pvect = vect;
1547 }
1548 
1549 /* Free history vector PVECT.  */
1550 static void
free_history_vect(vec<expr_history_def> & pvect)1551 free_history_vect (vec<expr_history_def> &pvect)
1552 {
1553   unsigned i;
1554   expr_history_def *phist;
1555 
1556   if (! pvect.exists ())
1557     return;
1558 
1559   for (i = 0; pvect.iterate (i, &phist); i++)
1560     {
1561       vinsn_detach (phist->old_expr_vinsn);
1562       vinsn_detach (phist->new_expr_vinsn);
1563     }
1564 
1565   pvect.release ();
1566 }
1567 
1568 /* Merge vector FROM to PVECT.  */
1569 static void
merge_history_vect(vec<expr_history_def> * pvect,vec<expr_history_def> from)1570 merge_history_vect (vec<expr_history_def> *pvect,
1571 		    vec<expr_history_def> from)
1572 {
1573   expr_history_def *phist;
1574   int i;
1575 
1576   /* We keep this vector sorted.  */
1577   for (i = 0; from.iterate (i, &phist); i++)
1578     insert_in_history_vect (pvect, phist->uid, phist->type,
1579                             phist->old_expr_vinsn, phist->new_expr_vinsn,
1580                             phist->spec_ds);
1581 }
1582 
1583 /* Compare two vinsns as rhses if possible and as vinsns otherwise.  */
1584 bool
vinsn_equal_p(vinsn_t x,vinsn_t y)1585 vinsn_equal_p (vinsn_t x, vinsn_t y)
1586 {
1587   rtx_equal_p_callback_function repcf;
1588 
1589   if (x == y)
1590     return true;
1591 
1592   if (VINSN_TYPE (x) != VINSN_TYPE (y))
1593     return false;
1594 
1595   if (VINSN_HASH (x) != VINSN_HASH (y))
1596     return false;
1597 
1598   repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1599   if (VINSN_SEPARABLE_P (x))
1600     {
1601       /* Compare RHSes of VINSNs.  */
1602       gcc_assert (VINSN_RHS (x));
1603       gcc_assert (VINSN_RHS (y));
1604 
1605       return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1606     }
1607 
1608   return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1609 }
1610 
1611 
1612 /* Functions for working with expressions.  */
1613 
1614 /* Initialize EXPR.  */
1615 static void
init_expr(expr_t expr,vinsn_t vi,int spec,int use,int priority,int sched_times,int orig_bb_index,ds_t spec_done_ds,ds_t spec_to_check_ds,int orig_sched_cycle,vec<expr_history_def> history,signed char target_available,bool was_substituted,bool was_renamed,bool needs_spec_check_p,bool cant_move)1616 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1617 	   int sched_times, int orig_bb_index, ds_t spec_done_ds,
1618 	   ds_t spec_to_check_ds, int orig_sched_cycle,
1619 	   vec<expr_history_def> history,
1620 	   signed char target_available,
1621            bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1622            bool cant_move)
1623 {
1624   vinsn_attach (vi);
1625 
1626   EXPR_VINSN (expr) = vi;
1627   EXPR_SPEC (expr) = spec;
1628   EXPR_USEFULNESS (expr) = use;
1629   EXPR_PRIORITY (expr) = priority;
1630   EXPR_PRIORITY_ADJ (expr) = 0;
1631   EXPR_SCHED_TIMES (expr) = sched_times;
1632   EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1633   EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1634   EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1635   EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1636 
1637   if (history.exists ())
1638     EXPR_HISTORY_OF_CHANGES (expr) = history;
1639   else
1640     EXPR_HISTORY_OF_CHANGES (expr).create (0);
1641 
1642   EXPR_TARGET_AVAILABLE (expr) = target_available;
1643   EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1644   EXPR_WAS_RENAMED (expr) = was_renamed;
1645   EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1646   EXPR_CANT_MOVE (expr) = cant_move;
1647 }
1648 
1649 /* Make a copy of the expr FROM into the expr TO.  */
1650 void
copy_expr(expr_t to,expr_t from)1651 copy_expr (expr_t to, expr_t from)
1652 {
1653   vec<expr_history_def> temp = vNULL;
1654 
1655   if (EXPR_HISTORY_OF_CHANGES (from).exists ())
1656     {
1657       unsigned i;
1658       expr_history_def *phist;
1659 
1660       temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
1661       for (i = 0;
1662            temp.iterate (i, &phist);
1663            i++)
1664         {
1665           vinsn_attach (phist->old_expr_vinsn);
1666           vinsn_attach (phist->new_expr_vinsn);
1667         }
1668     }
1669 
1670   init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1671              EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1672 	     EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1673 	     EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1674 	     EXPR_ORIG_SCHED_CYCLE (from), temp,
1675              EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1676              EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1677              EXPR_CANT_MOVE (from));
1678 }
1679 
1680 /* Same, but the final expr will not ever be in av sets, so don't copy
1681    "uninteresting" data such as bitmap cache.  */
1682 void
copy_expr_onside(expr_t to,expr_t from)1683 copy_expr_onside (expr_t to, expr_t from)
1684 {
1685   init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1686 	     EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1687 	     EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1688 	     vNULL,
1689 	     EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1690 	     EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1691              EXPR_CANT_MOVE (from));
1692 }
1693 
1694 /* Prepare the expr of INSN for scheduling.  Used when moving insn and when
1695    initializing new insns.  */
1696 static void
prepare_insn_expr(insn_t insn,int seqno)1697 prepare_insn_expr (insn_t insn, int seqno)
1698 {
1699   expr_t expr = INSN_EXPR (insn);
1700   ds_t ds;
1701 
1702   INSN_SEQNO (insn) = seqno;
1703   EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1704   EXPR_SPEC (expr) = 0;
1705   EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1706   EXPR_WAS_SUBSTITUTED (expr) = 0;
1707   EXPR_WAS_RENAMED (expr) = 0;
1708   EXPR_TARGET_AVAILABLE (expr) = 1;
1709   INSN_LIVE_VALID_P (insn) = false;
1710 
1711   /* ??? If this expression is speculative, make its dependence
1712      as weak as possible.  We can filter this expression later
1713      in process_spec_exprs, because we do not distinguish
1714      between the status we got during compute_av_set and the
1715      existing status.  To be fixed.  */
1716   ds = EXPR_SPEC_DONE_DS (expr);
1717   if (ds)
1718     EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1719 
1720   free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1721 }
1722 
1723 /* Update target_available bits when merging exprs TO and FROM.  SPLIT_POINT
1724    is non-null when expressions are merged from different successors at
1725    a split point.  */
1726 static void
update_target_availability(expr_t to,expr_t from,insn_t split_point)1727 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1728 {
1729   if (EXPR_TARGET_AVAILABLE (to) < 0
1730       || EXPR_TARGET_AVAILABLE (from) < 0)
1731     EXPR_TARGET_AVAILABLE (to) = -1;
1732   else
1733     {
1734       /* We try to detect the case when one of the expressions
1735          can only be reached through another one.  In this case,
1736          we can do better.  */
1737       if (split_point == NULL)
1738         {
1739           int toind, fromind;
1740 
1741           toind = EXPR_ORIG_BB_INDEX (to);
1742           fromind = EXPR_ORIG_BB_INDEX (from);
1743 
1744           if (toind && toind == fromind)
1745             /* Do nothing -- everything is done in
1746                merge_with_other_exprs.  */
1747             ;
1748           else
1749             EXPR_TARGET_AVAILABLE (to) = -1;
1750         }
1751       else if (EXPR_TARGET_AVAILABLE (from) == 0
1752 	       && EXPR_LHS (from)
1753 	       && REG_P (EXPR_LHS (from))
1754 	       && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1755 	EXPR_TARGET_AVAILABLE (to) = -1;
1756       else
1757         EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1758     }
1759 }
1760 
1761 /* Update speculation bits when merging exprs TO and FROM.  SPLIT_POINT
1762    is non-null when expressions are merged from different successors at
1763    a split point.  */
1764 static void
update_speculative_bits(expr_t to,expr_t from,insn_t split_point)1765 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1766 {
1767   ds_t old_to_ds, old_from_ds;
1768 
1769   old_to_ds = EXPR_SPEC_DONE_DS (to);
1770   old_from_ds = EXPR_SPEC_DONE_DS (from);
1771 
1772   EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1773   EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1774   EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1775 
1776   /* When merging e.g. control & data speculative exprs, or a control
1777      speculative with a control&data speculative one, we really have
1778      to change vinsn too.  Also, when speculative status is changed,
1779      we also need to record this as a transformation in expr's history.  */
1780   if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1781     {
1782       old_to_ds = ds_get_speculation_types (old_to_ds);
1783       old_from_ds = ds_get_speculation_types (old_from_ds);
1784 
1785       if (old_to_ds != old_from_ds)
1786         {
1787           ds_t record_ds;
1788 
1789           /* When both expressions are speculative, we need to change
1790              the vinsn first.  */
1791           if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1792             {
1793               int res;
1794 
1795               res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1796               gcc_assert (res >= 0);
1797             }
1798 
1799           if (split_point != NULL)
1800             {
1801               /* Record the change with proper status.  */
1802               record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1803               record_ds &= ~(old_to_ds & SPECULATIVE);
1804               record_ds &= ~(old_from_ds & SPECULATIVE);
1805 
1806               insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1807                                       INSN_UID (split_point), TRANS_SPECULATION,
1808                                       EXPR_VINSN (from), EXPR_VINSN (to),
1809                                       record_ds);
1810             }
1811         }
1812     }
1813 }
1814 
1815 
1816 /* Merge bits of FROM expr to TO expr.  When SPLIT_POINT is not NULL,
1817    this is done along different paths.  */
1818 void
merge_expr_data(expr_t to,expr_t from,insn_t split_point)1819 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1820 {
1821   /* Choose the maximum of the specs of merged exprs.  This is required
1822      for correctness of bookkeeping.  */
1823   if (EXPR_SPEC (to) < EXPR_SPEC (from))
1824     EXPR_SPEC (to) = EXPR_SPEC (from);
1825 
1826   if (split_point)
1827     EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1828   else
1829     EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1830                                 EXPR_USEFULNESS (from));
1831 
1832   if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1833     EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1834 
1835   /* We merge sched-times half-way to the larger value to avoid the endless
1836      pipelining of unneeded insns.  The average seems to be good compromise
1837      between pipelining opportunities and avoiding extra work.  */
1838   if (EXPR_SCHED_TIMES (to) != EXPR_SCHED_TIMES (from))
1839     EXPR_SCHED_TIMES (to) = ((EXPR_SCHED_TIMES (from) + EXPR_SCHED_TIMES (to)
1840                              + 1) / 2);
1841 
1842   if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1843     EXPR_ORIG_BB_INDEX (to) = 0;
1844 
1845   EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1846                                     EXPR_ORIG_SCHED_CYCLE (from));
1847 
1848   EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1849   EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1850   EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1851 
1852   merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1853 		      EXPR_HISTORY_OF_CHANGES (from));
1854   update_target_availability (to, from, split_point);
1855   update_speculative_bits (to, from, split_point);
1856 }
1857 
1858 /* Merge bits of FROM expr to TO expr.  Vinsns in the exprs should be equal
1859    in terms of vinsn_equal_p.  SPLIT_POINT is non-null when expressions
1860    are merged from different successors at a split point.  */
1861 void
merge_expr(expr_t to,expr_t from,insn_t split_point)1862 merge_expr (expr_t to, expr_t from, insn_t split_point)
1863 {
1864   vinsn_t to_vi = EXPR_VINSN (to);
1865   vinsn_t from_vi = EXPR_VINSN (from);
1866 
1867   gcc_assert (vinsn_equal_p (to_vi, from_vi));
1868 
1869   /* Make sure that speculative pattern is propagated into exprs that
1870      have non-speculative one.  This will provide us with consistent
1871      speculative bits and speculative patterns inside expr.  */
1872   if (EXPR_SPEC_DONE_DS (to) == 0
1873       && (EXPR_SPEC_DONE_DS (from) != 0
1874 	  /* Do likewise for volatile insns, so that we always retain
1875 	     the may_trap_p bit on the resulting expression.  However,
1876 	     avoid propagating the trapping bit into the instructions
1877 	     already speculated.  This would result in replacing the
1878 	     speculative pattern with the non-speculative one and breaking
1879 	     the speculation support.  */
1880 	  || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to))
1881 	      && VINSN_MAY_TRAP_P (EXPR_VINSN (from)))))
1882     change_vinsn_in_expr (to, EXPR_VINSN (from));
1883 
1884   merge_expr_data (to, from, split_point);
1885   gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1886 }
1887 
1888 /* Clear the information of this EXPR.  */
1889 void
clear_expr(expr_t expr)1890 clear_expr (expr_t expr)
1891 {
1892 
1893   vinsn_detach (EXPR_VINSN (expr));
1894   EXPR_VINSN (expr) = NULL;
1895 
1896   free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1897 }
1898 
1899 /* For a given LV_SET, mark EXPR having unavailable target register.  */
1900 static void
set_unavailable_target_for_expr(expr_t expr,regset lv_set)1901 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1902 {
1903   if (EXPR_SEPARABLE_P (expr))
1904     {
1905       if (REG_P (EXPR_LHS (expr))
1906           && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1907 	{
1908 	  /* If it's an insn like r1 = use (r1, ...), and it exists in
1909 	     different forms in each of the av_sets being merged, we can't say
1910 	     whether original destination register is available or not.
1911 	     However, this still works if destination register is not used
1912 	     in the original expression: if the branch at which LV_SET we're
1913 	     looking here is not actually 'other branch' in sense that same
1914 	     expression is available through it (but it can't be determined
1915 	     at computation stage because of transformations on one of the
1916 	     branches), it still won't affect the availability.
1917 	     Liveness of a register somewhere on a code motion path means
1918 	     it's either read somewhere on a codemotion path, live on
1919 	     'other' branch, live at the point immediately following
1920 	     the original operation, or is read by the original operation.
1921 	     The latter case is filtered out in the condition below.
1922 	     It still doesn't cover the case when register is defined and used
1923 	     somewhere within the code motion path, and in this case we could
1924 	     miss a unifying code motion along both branches using a renamed
1925 	     register, but it won't affect a code correctness since upon
1926 	     an actual code motion a bookkeeping code would be generated.  */
1927 	  if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1928 				      EXPR_LHS (expr)))
1929 	    EXPR_TARGET_AVAILABLE (expr) = -1;
1930 	  else
1931 	    EXPR_TARGET_AVAILABLE (expr) = false;
1932 	}
1933     }
1934   else
1935     {
1936       unsigned regno;
1937       reg_set_iterator rsi;
1938 
1939       EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1940                                  0, regno, rsi)
1941         if (bitmap_bit_p (lv_set, regno))
1942           {
1943             EXPR_TARGET_AVAILABLE (expr) = false;
1944             break;
1945           }
1946 
1947       EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1948                                  0, regno, rsi)
1949         if (bitmap_bit_p (lv_set, regno))
1950           {
1951             EXPR_TARGET_AVAILABLE (expr) = false;
1952             break;
1953           }
1954     }
1955 }
1956 
1957 /* Try to make EXPR speculative.  Return 1 when EXPR's pattern
1958    or dependence status have changed, 2 when also the target register
1959    became unavailable, 0 if nothing had to be changed.  */
1960 int
speculate_expr(expr_t expr,ds_t ds)1961 speculate_expr (expr_t expr, ds_t ds)
1962 {
1963   int res;
1964   rtx_insn *orig_insn_rtx;
1965   rtx spec_pat;
1966   ds_t target_ds, current_ds;
1967 
1968   /* Obtain the status we need to put on EXPR.   */
1969   target_ds = (ds & SPECULATIVE);
1970   current_ds = EXPR_SPEC_DONE_DS (expr);
1971   ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1972 
1973   orig_insn_rtx = EXPR_INSN_RTX (expr);
1974 
1975   res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1976 
1977   switch (res)
1978     {
1979     case 0:
1980       EXPR_SPEC_DONE_DS (expr) = ds;
1981       return current_ds != ds ? 1 : 0;
1982 
1983     case 1:
1984       {
1985 	rtx_insn *spec_insn_rtx =
1986 	  create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1987 	vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1988 
1989 	change_vinsn_in_expr (expr, spec_vinsn);
1990 	EXPR_SPEC_DONE_DS (expr) = ds;
1991         EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1992 
1993         /* Do not allow clobbering the address register of speculative
1994            insns.  */
1995         if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1996 				    expr_dest_reg (expr)))
1997           {
1998             EXPR_TARGET_AVAILABLE (expr) = false;
1999             return 2;
2000           }
2001 
2002 	return 1;
2003       }
2004 
2005     case -1:
2006       return -1;
2007 
2008     default:
2009       gcc_unreachable ();
2010       return -1;
2011     }
2012 }
2013 
2014 /* Return a destination register, if any, of EXPR.  */
2015 rtx
expr_dest_reg(expr_t expr)2016 expr_dest_reg (expr_t expr)
2017 {
2018   rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2019 
2020   if (dest != NULL_RTX && REG_P (dest))
2021     return dest;
2022 
2023   return NULL_RTX;
2024 }
2025 
2026 /* Returns the REGNO of the R's destination.  */
2027 unsigned
expr_dest_regno(expr_t expr)2028 expr_dest_regno (expr_t expr)
2029 {
2030   rtx dest = expr_dest_reg (expr);
2031 
2032   gcc_assert (dest != NULL_RTX);
2033   return REGNO (dest);
2034 }
2035 
2036 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2037    AV_SET having unavailable target register.  */
2038 void
mark_unavailable_targets(av_set_t join_set,av_set_t av_set,regset lv_set)2039 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2040 {
2041   expr_t expr;
2042   av_set_iterator avi;
2043 
2044   FOR_EACH_EXPR (expr, avi, join_set)
2045     if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2046       set_unavailable_target_for_expr (expr, lv_set);
2047 }
2048 
2049 
2050 /* Returns true if REG (at least partially) is present in REGS.  */
2051 bool
register_unavailable_p(regset regs,rtx reg)2052 register_unavailable_p (regset regs, rtx reg)
2053 {
2054   unsigned regno, end_regno;
2055 
2056   regno = REGNO (reg);
2057   if (bitmap_bit_p (regs, regno))
2058     return true;
2059 
2060   end_regno = END_REGNO (reg);
2061 
2062   while (++regno < end_regno)
2063     if (bitmap_bit_p (regs, regno))
2064       return true;
2065 
2066   return false;
2067 }
2068 
2069 /* Av set functions.  */
2070 
2071 /* Add a new element to av set SETP.
2072    Return the element added.  */
2073 static av_set_t
av_set_add_element(av_set_t * setp)2074 av_set_add_element (av_set_t *setp)
2075 {
2076   /* Insert at the beginning of the list.  */
2077   _list_add (setp);
2078   return *setp;
2079 }
2080 
2081 /* Add EXPR to SETP.  */
2082 void
av_set_add(av_set_t * setp,expr_t expr)2083 av_set_add (av_set_t *setp, expr_t expr)
2084 {
2085   av_set_t elem;
2086 
2087   gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2088   elem = av_set_add_element (setp);
2089   copy_expr (_AV_SET_EXPR (elem), expr);
2090 }
2091 
2092 /* Same, but do not copy EXPR.  */
2093 static void
av_set_add_nocopy(av_set_t * setp,expr_t expr)2094 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2095 {
2096   av_set_t elem;
2097 
2098   elem = av_set_add_element (setp);
2099   *_AV_SET_EXPR (elem) = *expr;
2100 }
2101 
2102 /* Remove expr pointed to by IP from the av_set.  */
2103 void
av_set_iter_remove(av_set_iterator * ip)2104 av_set_iter_remove (av_set_iterator *ip)
2105 {
2106   clear_expr (_AV_SET_EXPR (*ip->lp));
2107   _list_iter_remove (ip);
2108 }
2109 
2110 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2111    sense of vinsn_equal_p function. Return NULL if no such expr is
2112    in SET was found.  */
2113 expr_t
av_set_lookup(av_set_t set,vinsn_t sought_vinsn)2114 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2115 {
2116   expr_t expr;
2117   av_set_iterator i;
2118 
2119   FOR_EACH_EXPR (expr, i, set)
2120     if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2121       return expr;
2122   return NULL;
2123 }
2124 
2125 /* Same, but also remove the EXPR found.   */
2126 static expr_t
av_set_lookup_and_remove(av_set_t * setp,vinsn_t sought_vinsn)2127 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2128 {
2129   expr_t expr;
2130   av_set_iterator i;
2131 
2132   FOR_EACH_EXPR_1 (expr, i, setp)
2133     if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2134       {
2135         _list_iter_remove_nofree (&i);
2136         return expr;
2137       }
2138   return NULL;
2139 }
2140 
2141 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2142    sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2143    Returns NULL if no such expr is in SET was found.  */
2144 static expr_t
av_set_lookup_other_equiv_expr(av_set_t set,expr_t expr)2145 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2146 {
2147   expr_t cur_expr;
2148   av_set_iterator i;
2149 
2150   FOR_EACH_EXPR (cur_expr, i, set)
2151     {
2152       if (cur_expr == expr)
2153         continue;
2154       if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2155         return cur_expr;
2156     }
2157 
2158   return NULL;
2159 }
2160 
2161 /* If other expression is already in AVP, remove one of them.  */
2162 expr_t
merge_with_other_exprs(av_set_t * avp,av_set_iterator * ip,expr_t expr)2163 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2164 {
2165   expr_t expr2;
2166 
2167   expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2168   if (expr2 != NULL)
2169     {
2170       /* Reset target availability on merge, since taking it only from one
2171 	 of the exprs would be controversial for different code.  */
2172       EXPR_TARGET_AVAILABLE (expr2) = -1;
2173       EXPR_USEFULNESS (expr2) = 0;
2174 
2175       merge_expr (expr2, expr, NULL);
2176 
2177       /* Fix usefulness as it should be now REG_BR_PROB_BASE.  */
2178       EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2179 
2180       av_set_iter_remove (ip);
2181       return expr2;
2182     }
2183 
2184   return expr;
2185 }
2186 
2187 /* Return true if there is an expr that correlates to VI in SET.  */
2188 bool
av_set_is_in_p(av_set_t set,vinsn_t vi)2189 av_set_is_in_p (av_set_t set, vinsn_t vi)
2190 {
2191   return av_set_lookup (set, vi) != NULL;
2192 }
2193 
2194 /* Return a copy of SET.  */
2195 av_set_t
av_set_copy(av_set_t set)2196 av_set_copy (av_set_t set)
2197 {
2198   expr_t expr;
2199   av_set_iterator i;
2200   av_set_t res = NULL;
2201 
2202   FOR_EACH_EXPR (expr, i, set)
2203     av_set_add (&res, expr);
2204 
2205   return res;
2206 }
2207 
2208 /* Join two av sets that do not have common elements by attaching second set
2209    (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2210    _AV_SET_NEXT of first set's last element).  */
2211 static void
join_distinct_sets(av_set_t * to_tailp,av_set_t * fromp)2212 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2213 {
2214   gcc_assert (*to_tailp == NULL);
2215   *to_tailp = *fromp;
2216   *fromp = NULL;
2217 }
2218 
2219 /* Makes set pointed to by TO to be the union of TO and FROM.  Clear av_set
2220    pointed to by FROMP afterwards.  */
2221 void
av_set_union_and_clear(av_set_t * top,av_set_t * fromp,insn_t insn)2222 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2223 {
2224   expr_t expr1;
2225   av_set_iterator i;
2226 
2227   /* Delete from TOP all exprs, that present in FROMP.  */
2228   FOR_EACH_EXPR_1 (expr1, i, top)
2229     {
2230       expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2231 
2232       if (expr2)
2233 	{
2234           merge_expr (expr2, expr1, insn);
2235 	  av_set_iter_remove (&i);
2236 	}
2237     }
2238 
2239   join_distinct_sets (i.lp, fromp);
2240 }
2241 
2242 /* Same as above, but also update availability of target register in
2243    TOP judging by TO_LV_SET and FROM_LV_SET.  */
2244 void
av_set_union_and_live(av_set_t * top,av_set_t * fromp,regset to_lv_set,regset from_lv_set,insn_t insn)2245 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2246                        regset from_lv_set, insn_t insn)
2247 {
2248   expr_t expr1;
2249   av_set_iterator i;
2250   av_set_t *to_tailp, in_both_set = NULL;
2251 
2252   /* Delete from TOP all expres, that present in FROMP.  */
2253   FOR_EACH_EXPR_1 (expr1, i, top)
2254     {
2255       expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2256 
2257       if (expr2)
2258 	{
2259           /* It may be that the expressions have different destination
2260              registers, in which case we need to check liveness here.  */
2261           if (EXPR_SEPARABLE_P (expr1))
2262             {
2263               int regno1 = (REG_P (EXPR_LHS (expr1))
2264                             ? (int) expr_dest_regno (expr1) : -1);
2265               int regno2 = (REG_P (EXPR_LHS (expr2))
2266                             ? (int) expr_dest_regno (expr2) : -1);
2267 
2268               /* ??? We don't have a way to check restrictions for
2269                *other* register on the current path, we did it only
2270                for the current target register.  Give up.  */
2271               if (regno1 != regno2)
2272                 EXPR_TARGET_AVAILABLE (expr2) = -1;
2273             }
2274           else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2275             EXPR_TARGET_AVAILABLE (expr2) = -1;
2276 
2277           merge_expr (expr2, expr1, insn);
2278           av_set_add_nocopy (&in_both_set, expr2);
2279 	  av_set_iter_remove (&i);
2280 	}
2281       else
2282         /* EXPR1 is present in TOP, but not in FROMP.  Check it on
2283            FROM_LV_SET.  */
2284         set_unavailable_target_for_expr (expr1, from_lv_set);
2285     }
2286   to_tailp = i.lp;
2287 
2288   /* These expressions are not present in TOP.  Check liveness
2289      restrictions on TO_LV_SET.  */
2290   FOR_EACH_EXPR (expr1, i, *fromp)
2291     set_unavailable_target_for_expr (expr1, to_lv_set);
2292 
2293   join_distinct_sets (i.lp, &in_both_set);
2294   join_distinct_sets (to_tailp, fromp);
2295 }
2296 
2297 /* Clear av_set pointed to by SETP.  */
2298 void
av_set_clear(av_set_t * setp)2299 av_set_clear (av_set_t *setp)
2300 {
2301   expr_t expr;
2302   av_set_iterator i;
2303 
2304   FOR_EACH_EXPR_1 (expr, i, setp)
2305     av_set_iter_remove (&i);
2306 
2307   gcc_assert (*setp == NULL);
2308 }
2309 
2310 /* Leave only one non-speculative element in the SETP.  */
2311 void
av_set_leave_one_nonspec(av_set_t * setp)2312 av_set_leave_one_nonspec (av_set_t *setp)
2313 {
2314   expr_t expr;
2315   av_set_iterator i;
2316   bool has_one_nonspec = false;
2317 
2318   /* Keep all speculative exprs, and leave one non-speculative
2319      (the first one).  */
2320   FOR_EACH_EXPR_1 (expr, i, setp)
2321     {
2322       if (!EXPR_SPEC_DONE_DS (expr))
2323 	{
2324   	  if (has_one_nonspec)
2325 	    av_set_iter_remove (&i);
2326 	  else
2327 	    has_one_nonspec = true;
2328 	}
2329     }
2330 }
2331 
2332 /* Return the N'th element of the SET.  */
2333 expr_t
av_set_element(av_set_t set,int n)2334 av_set_element (av_set_t set, int n)
2335 {
2336   expr_t expr;
2337   av_set_iterator i;
2338 
2339   FOR_EACH_EXPR (expr, i, set)
2340     if (n-- == 0)
2341       return expr;
2342 
2343   gcc_unreachable ();
2344   return NULL;
2345 }
2346 
2347 /* Deletes all expressions from AVP that are conditional branches (IFs).  */
2348 void
av_set_substract_cond_branches(av_set_t * avp)2349 av_set_substract_cond_branches (av_set_t *avp)
2350 {
2351   av_set_iterator i;
2352   expr_t expr;
2353 
2354   FOR_EACH_EXPR_1 (expr, i, avp)
2355     if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2356       av_set_iter_remove (&i);
2357 }
2358 
2359 /* Multiplies usefulness attribute of each member of av-set *AVP by
2360    value PROB / ALL_PROB.  */
2361 void
av_set_split_usefulness(av_set_t av,int prob,int all_prob)2362 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2363 {
2364   av_set_iterator i;
2365   expr_t expr;
2366 
2367   FOR_EACH_EXPR (expr, i, av)
2368     EXPR_USEFULNESS (expr) = (all_prob
2369                               ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2370                               : 0);
2371 }
2372 
2373 /* Leave in AVP only those expressions, which are present in AV,
2374    and return it, merging history expressions.  */
2375 void
av_set_code_motion_filter(av_set_t * avp,av_set_t av)2376 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2377 {
2378   av_set_iterator i;
2379   expr_t expr, expr2;
2380 
2381   FOR_EACH_EXPR_1 (expr, i, avp)
2382     if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2383       av_set_iter_remove (&i);
2384     else
2385       /* When updating av sets in bookkeeping blocks, we can add more insns
2386 	 there which will be transformed but the upper av sets will not
2387 	 reflect those transformations.  We then fail to undo those
2388 	 when searching for such insns.  So merge the history saved
2389 	 in the av set of the block we are processing.  */
2390       merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2391 			  EXPR_HISTORY_OF_CHANGES (expr2));
2392 }
2393 
2394 
2395 
2396 /* Dependence hooks to initialize insn data.  */
2397 
2398 /* This is used in hooks callable from dependence analysis when initializing
2399    instruction's data.  */
2400 static struct
2401 {
2402   /* Where the dependence was found (lhs/rhs).  */
2403   deps_where_t where;
2404 
2405   /* The actual data object to initialize.  */
2406   idata_t id;
2407 
2408   /* True when the insn should not be made clonable.  */
2409   bool force_unique_p;
2410 
2411   /* True when insn should be treated as of type USE, i.e. never renamed.  */
2412   bool force_use_p;
2413 } deps_init_id_data;
2414 
2415 
2416 /* Setup ID for INSN.  FORCE_UNIQUE_P is true when INSN should not be
2417    clonable.  */
2418 static void
setup_id_for_insn(idata_t id,insn_t insn,bool force_unique_p)2419 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2420 {
2421   int type;
2422 
2423   /* Determine whether INSN could be cloned and return appropriate vinsn type.
2424      That clonable insns which can be separated into lhs and rhs have type SET.
2425      Other clonable insns have type USE.  */
2426   type = GET_CODE (insn);
2427 
2428   /* Only regular insns could be cloned.  */
2429   if (type == INSN && !force_unique_p)
2430     type = SET;
2431   else if (type == JUMP_INSN && simplejump_p (insn))
2432     type = PC;
2433   else if (type == DEBUG_INSN)
2434     type = !force_unique_p ? USE : INSN;
2435 
2436   IDATA_TYPE (id) = type;
2437   IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2438   IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2439   IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2440 }
2441 
2442 /* Start initializing insn data.  */
2443 static void
deps_init_id_start_insn(insn_t insn)2444 deps_init_id_start_insn (insn_t insn)
2445 {
2446   gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2447 
2448   setup_id_for_insn (deps_init_id_data.id, insn,
2449                      deps_init_id_data.force_unique_p);
2450   deps_init_id_data.where = DEPS_IN_INSN;
2451 }
2452 
2453 /* Start initializing lhs data.  */
2454 static void
deps_init_id_start_lhs(rtx lhs)2455 deps_init_id_start_lhs (rtx lhs)
2456 {
2457   gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2458   gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2459 
2460   if (IDATA_TYPE (deps_init_id_data.id) == SET)
2461     {
2462       IDATA_LHS (deps_init_id_data.id) = lhs;
2463       deps_init_id_data.where = DEPS_IN_LHS;
2464     }
2465 }
2466 
2467 /* Finish initializing lhs data.  */
2468 static void
deps_init_id_finish_lhs(void)2469 deps_init_id_finish_lhs (void)
2470 {
2471   deps_init_id_data.where = DEPS_IN_INSN;
2472 }
2473 
2474 /* Note a set of REGNO.  */
2475 static void
deps_init_id_note_reg_set(int regno)2476 deps_init_id_note_reg_set (int regno)
2477 {
2478   haifa_note_reg_set (regno);
2479 
2480   if (deps_init_id_data.where == DEPS_IN_RHS)
2481     deps_init_id_data.force_use_p = true;
2482 
2483   if (IDATA_TYPE (deps_init_id_data.id) != PC)
2484     SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2485 
2486 #ifdef STACK_REGS
2487   /* Make instructions that set stack registers to be ineligible for
2488      renaming to avoid issues with find_used_regs.  */
2489   if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2490     deps_init_id_data.force_use_p = true;
2491 #endif
2492 }
2493 
2494 /* Note a clobber of REGNO.  */
2495 static void
deps_init_id_note_reg_clobber(int regno)2496 deps_init_id_note_reg_clobber (int regno)
2497 {
2498   haifa_note_reg_clobber (regno);
2499 
2500   if (deps_init_id_data.where == DEPS_IN_RHS)
2501     deps_init_id_data.force_use_p = true;
2502 
2503   if (IDATA_TYPE (deps_init_id_data.id) != PC)
2504     SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2505 }
2506 
2507 /* Note a use of REGNO.  */
2508 static void
deps_init_id_note_reg_use(int regno)2509 deps_init_id_note_reg_use (int regno)
2510 {
2511   haifa_note_reg_use (regno);
2512 
2513   if (IDATA_TYPE (deps_init_id_data.id) != PC)
2514     SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2515 }
2516 
2517 /* Start initializing rhs data.  */
2518 static void
deps_init_id_start_rhs(rtx rhs)2519 deps_init_id_start_rhs (rtx rhs)
2520 {
2521   gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2522 
2523   /* And there was no sel_deps_reset_to_insn ().  */
2524   if (IDATA_LHS (deps_init_id_data.id) != NULL)
2525     {
2526       IDATA_RHS (deps_init_id_data.id) = rhs;
2527       deps_init_id_data.where = DEPS_IN_RHS;
2528     }
2529 }
2530 
2531 /* Finish initializing rhs data.  */
2532 static void
deps_init_id_finish_rhs(void)2533 deps_init_id_finish_rhs (void)
2534 {
2535   gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2536 	      || deps_init_id_data.where == DEPS_IN_INSN);
2537   deps_init_id_data.where = DEPS_IN_INSN;
2538 }
2539 
2540 /* Finish initializing insn data.  */
2541 static void
deps_init_id_finish_insn(void)2542 deps_init_id_finish_insn (void)
2543 {
2544   gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2545 
2546   if (IDATA_TYPE (deps_init_id_data.id) == SET)
2547     {
2548       rtx lhs = IDATA_LHS (deps_init_id_data.id);
2549       rtx rhs = IDATA_RHS (deps_init_id_data.id);
2550 
2551       if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2552 	  || deps_init_id_data.force_use_p)
2553 	{
2554           /* This should be a USE, as we don't want to schedule its RHS
2555              separately.  However, we still want to have them recorded
2556              for the purposes of substitution.  That's why we don't
2557              simply call downgrade_to_use () here.  */
2558 	  gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2559 	  gcc_assert (!lhs == !rhs);
2560 
2561 	  IDATA_TYPE (deps_init_id_data.id) = USE;
2562 	}
2563     }
2564 
2565   deps_init_id_data.where = DEPS_IN_NOWHERE;
2566 }
2567 
2568 /* This is dependence info used for initializing insn's data.  */
2569 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2570 
2571 /* This initializes most of the static part of the above structure.  */
2572 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2573   {
2574     NULL,
2575 
2576     deps_init_id_start_insn,
2577     deps_init_id_finish_insn,
2578     deps_init_id_start_lhs,
2579     deps_init_id_finish_lhs,
2580     deps_init_id_start_rhs,
2581     deps_init_id_finish_rhs,
2582     deps_init_id_note_reg_set,
2583     deps_init_id_note_reg_clobber,
2584     deps_init_id_note_reg_use,
2585     NULL, /* note_mem_dep */
2586     NULL, /* note_dep */
2587 
2588     0, /* use_cselib */
2589     0, /* use_deps_list */
2590     0 /* generate_spec_deps */
2591   };
2592 
2593 /* Initialize INSN's lhs and rhs in ID.  When FORCE_UNIQUE_P is true,
2594    we don't actually need information about lhs and rhs.  */
2595 static void
setup_id_lhs_rhs(idata_t id,insn_t insn,bool force_unique_p)2596 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2597 {
2598   rtx pat = PATTERN (insn);
2599 
2600   if (NONJUMP_INSN_P (insn)
2601       && GET_CODE (pat) == SET
2602       && !force_unique_p)
2603     {
2604       IDATA_RHS (id) = SET_SRC (pat);
2605       IDATA_LHS (id) = SET_DEST (pat);
2606     }
2607   else
2608     IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2609 }
2610 
2611 /* Possibly downgrade INSN to USE.  */
2612 static void
maybe_downgrade_id_to_use(idata_t id,insn_t insn)2613 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2614 {
2615   bool must_be_use = false;
2616   df_ref def;
2617   rtx lhs = IDATA_LHS (id);
2618   rtx rhs = IDATA_RHS (id);
2619 
2620   /* We downgrade only SETs.  */
2621   if (IDATA_TYPE (id) != SET)
2622     return;
2623 
2624   if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2625     {
2626       IDATA_TYPE (id) = USE;
2627       return;
2628     }
2629 
2630   FOR_EACH_INSN_DEF (def, insn)
2631     {
2632       if (DF_REF_INSN (def)
2633           && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2634           && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2635         {
2636           must_be_use = true;
2637           break;
2638         }
2639 
2640 #ifdef STACK_REGS
2641       /* Make instructions that set stack registers to be ineligible for
2642 	 renaming to avoid issues with find_used_regs.  */
2643       if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2644 	{
2645 	  must_be_use = true;
2646 	  break;
2647 	}
2648 #endif
2649     }
2650 
2651   if (must_be_use)
2652     IDATA_TYPE (id) = USE;
2653 }
2654 
2655 /* Setup implicit register clobbers calculated by sched-deps for INSN
2656    before reload and save them in ID.  */
2657 static void
setup_id_implicit_regs(idata_t id,insn_t insn)2658 setup_id_implicit_regs (idata_t id, insn_t insn)
2659 {
2660   if (reload_completed)
2661     return;
2662 
2663   HARD_REG_SET temp;
2664 
2665   get_implicit_reg_pending_clobbers (&temp, insn);
2666   IOR_REG_SET_HRS (IDATA_REG_SETS (id), temp);
2667 }
2668 
2669 /* Setup register sets describing INSN in ID.  */
2670 static void
setup_id_reg_sets(idata_t id,insn_t insn)2671 setup_id_reg_sets (idata_t id, insn_t insn)
2672 {
2673   struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2674   df_ref def, use;
2675   regset tmp = get_clear_regset_from_pool ();
2676 
2677   FOR_EACH_INSN_INFO_DEF (def, insn_info)
2678     {
2679       unsigned int regno = DF_REF_REGNO (def);
2680 
2681       /* Post modifies are treated like clobbers by sched-deps.c.  */
2682       if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2683                                      | DF_REF_PRE_POST_MODIFY)))
2684         SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2685       else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2686         {
2687 	  SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2688 
2689 #ifdef STACK_REGS
2690 	  /* For stack registers, treat writes to them as writes
2691 	     to the first one to be consistent with sched-deps.c.  */
2692 	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2693 	    SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2694 #endif
2695 	}
2696       /* Mark special refs that generate read/write def pair.  */
2697       if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2698           || regno == STACK_POINTER_REGNUM)
2699         bitmap_set_bit (tmp, regno);
2700     }
2701 
2702   FOR_EACH_INSN_INFO_USE (use, insn_info)
2703     {
2704       unsigned int regno = DF_REF_REGNO (use);
2705 
2706       /* When these refs are met for the first time, skip them, as
2707          these uses are just counterparts of some defs.  */
2708       if (bitmap_bit_p (tmp, regno))
2709         bitmap_clear_bit (tmp, regno);
2710       else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2711 	{
2712 	  SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2713 
2714 #ifdef STACK_REGS
2715 	  /* For stack registers, treat reads from them as reads from
2716 	     the first one to be consistent with sched-deps.c.  */
2717 	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2718 	    SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2719 #endif
2720 	}
2721     }
2722 
2723   /* Also get implicit reg clobbers from sched-deps.  */
2724   setup_id_implicit_regs (id, insn);
2725 
2726   return_regset_to_pool (tmp);
2727 }
2728 
2729 /* Initialize instruction data for INSN in ID using DF's data.  */
2730 static void
init_id_from_df(idata_t id,insn_t insn,bool force_unique_p)2731 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2732 {
2733   gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2734 
2735   setup_id_for_insn (id, insn, force_unique_p);
2736   setup_id_lhs_rhs (id, insn, force_unique_p);
2737 
2738   if (INSN_NOP_P (insn))
2739     return;
2740 
2741   maybe_downgrade_id_to_use (id, insn);
2742   setup_id_reg_sets (id, insn);
2743 }
2744 
2745 /* Initialize instruction data for INSN in ID.  */
2746 static void
deps_init_id(idata_t id,insn_t insn,bool force_unique_p)2747 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2748 {
2749   class deps_desc _dc, *dc = &_dc;
2750 
2751   deps_init_id_data.where = DEPS_IN_NOWHERE;
2752   deps_init_id_data.id = id;
2753   deps_init_id_data.force_unique_p = force_unique_p;
2754   deps_init_id_data.force_use_p = false;
2755 
2756   init_deps (dc, false);
2757   memcpy (&deps_init_id_sched_deps_info,
2758 	  &const_deps_init_id_sched_deps_info,
2759 	  sizeof (deps_init_id_sched_deps_info));
2760   if (spec_info != NULL)
2761     deps_init_id_sched_deps_info.generate_spec_deps = 1;
2762   sched_deps_info = &deps_init_id_sched_deps_info;
2763 
2764   deps_analyze_insn (dc, insn);
2765   /* Implicit reg clobbers received from sched-deps separately.  */
2766   setup_id_implicit_regs (id, insn);
2767 
2768   free_deps (dc);
2769   deps_init_id_data.id = NULL;
2770 }
2771 
2772 
2773 struct sched_scan_info_def
2774 {
2775   /* This hook notifies scheduler frontend to extend its internal per basic
2776      block data structures.  This hook should be called once before a series of
2777      calls to bb_init ().  */
2778   void (*extend_bb) (void);
2779 
2780   /* This hook makes scheduler frontend to initialize its internal data
2781      structures for the passed basic block.  */
2782   void (*init_bb) (basic_block);
2783 
2784   /* This hook notifies scheduler frontend to extend its internal per insn data
2785      structures.  This hook should be called once before a series of calls to
2786      insn_init ().  */
2787   void (*extend_insn) (void);
2788 
2789   /* This hook makes scheduler frontend to initialize its internal data
2790      structures for the passed insn.  */
2791   void (*init_insn) (insn_t);
2792 };
2793 
2794 /* A driver function to add a set of basic blocks (BBS) to the
2795    scheduling region.  */
2796 static void
sched_scan(const struct sched_scan_info_def * ssi,bb_vec_t bbs)2797 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2798 {
2799   unsigned i;
2800   basic_block bb;
2801 
2802   if (ssi->extend_bb)
2803     ssi->extend_bb ();
2804 
2805   if (ssi->init_bb)
2806     FOR_EACH_VEC_ELT (bbs, i, bb)
2807       ssi->init_bb (bb);
2808 
2809   if (ssi->extend_insn)
2810     ssi->extend_insn ();
2811 
2812   if (ssi->init_insn)
2813     FOR_EACH_VEC_ELT (bbs, i, bb)
2814       {
2815 	rtx_insn *insn;
2816 
2817 	FOR_BB_INSNS (bb, insn)
2818 	  ssi->init_insn (insn);
2819       }
2820 }
2821 
2822 /* Implement hooks for collecting fundamental insn properties like if insn is
2823    an ASM or is within a SCHED_GROUP.  */
2824 
2825 /* True when a "one-time init" data for INSN was already inited.  */
2826 static bool
first_time_insn_init(insn_t insn)2827 first_time_insn_init (insn_t insn)
2828 {
2829   return INSN_LIVE (insn) == NULL;
2830 }
2831 
2832 /* Hash an entry in a transformed_insns hashtable.  */
2833 static hashval_t
hash_transformed_insns(const void * p)2834 hash_transformed_insns (const void *p)
2835 {
2836   return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2837 }
2838 
2839 /* Compare the entries in a transformed_insns hashtable.  */
2840 static int
eq_transformed_insns(const void * p,const void * q)2841 eq_transformed_insns (const void *p, const void *q)
2842 {
2843   rtx_insn *i1 =
2844     VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2845   rtx_insn *i2 =
2846     VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2847 
2848   if (INSN_UID (i1) == INSN_UID (i2))
2849     return 1;
2850   return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2851 }
2852 
2853 /* Free an entry in a transformed_insns hashtable.  */
2854 static void
free_transformed_insns(void * p)2855 free_transformed_insns (void *p)
2856 {
2857   struct transformed_insns *pti = (struct transformed_insns *) p;
2858 
2859   vinsn_detach (pti->vinsn_old);
2860   vinsn_detach (pti->vinsn_new);
2861   free (pti);
2862 }
2863 
2864 /* Init the s_i_d data for INSN which should be inited just once, when
2865    we first see the insn.  */
2866 static void
init_first_time_insn_data(insn_t insn)2867 init_first_time_insn_data (insn_t insn)
2868 {
2869   /* This should not be set if this is the first time we init data for
2870      insn.  */
2871   gcc_assert (first_time_insn_init (insn));
2872 
2873   /* These are needed for nops too.  */
2874   INSN_LIVE (insn) = get_regset_from_pool ();
2875   INSN_LIVE_VALID_P (insn) = false;
2876 
2877   if (!INSN_NOP_P (insn))
2878     {
2879       INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2880       INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2881       INSN_TRANSFORMED_INSNS (insn)
2882         = htab_create (16, hash_transformed_insns,
2883                        eq_transformed_insns, free_transformed_insns);
2884       init_deps (&INSN_DEPS_CONTEXT (insn), true);
2885     }
2886 }
2887 
2888 /* Free almost all above data for INSN that is scheduled already.
2889    Used for extra-large basic blocks.  */
2890 void
free_data_for_scheduled_insn(insn_t insn)2891 free_data_for_scheduled_insn (insn_t insn)
2892 {
2893   gcc_assert (! first_time_insn_init (insn));
2894 
2895   if (! INSN_ANALYZED_DEPS (insn))
2896     return;
2897 
2898   BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2899   BITMAP_FREE (INSN_FOUND_DEPS (insn));
2900   htab_delete (INSN_TRANSFORMED_INSNS (insn));
2901 
2902   /* This is allocated only for bookkeeping insns.  */
2903   if (INSN_ORIGINATORS (insn))
2904     BITMAP_FREE (INSN_ORIGINATORS (insn));
2905   free_deps (&INSN_DEPS_CONTEXT (insn));
2906 
2907   INSN_ANALYZED_DEPS (insn) = NULL;
2908 
2909   /* Clear the readonly flag so we would ICE when trying to recalculate
2910      the deps context (as we believe that it should not happen).  */
2911   (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2912 }
2913 
2914 /* Free the same data as above for INSN.  */
2915 static void
free_first_time_insn_data(insn_t insn)2916 free_first_time_insn_data (insn_t insn)
2917 {
2918   gcc_assert (! first_time_insn_init (insn));
2919 
2920   free_data_for_scheduled_insn (insn);
2921   return_regset_to_pool (INSN_LIVE (insn));
2922   INSN_LIVE (insn) = NULL;
2923   INSN_LIVE_VALID_P (insn) = false;
2924 }
2925 
2926 /* Initialize region-scope data structures for basic blocks.  */
2927 static void
init_global_and_expr_for_bb(basic_block bb)2928 init_global_and_expr_for_bb (basic_block bb)
2929 {
2930   if (sel_bb_empty_p (bb))
2931     return;
2932 
2933   invalidate_av_set (bb);
2934 }
2935 
2936 /* Data for global dependency analysis (to initialize CANT_MOVE and
2937    SCHED_GROUP_P).  */
2938 static struct
2939 {
2940   /* Previous insn.  */
2941   insn_t prev_insn;
2942 } init_global_data;
2943 
2944 /* Determine if INSN is in the sched_group, is an asm or should not be
2945    cloned.  After that initialize its expr.  */
2946 static void
init_global_and_expr_for_insn(insn_t insn)2947 init_global_and_expr_for_insn (insn_t insn)
2948 {
2949   if (LABEL_P (insn))
2950     return;
2951 
2952   if (NOTE_INSN_BASIC_BLOCK_P (insn))
2953     {
2954       init_global_data.prev_insn = NULL;
2955       return;
2956     }
2957 
2958   gcc_assert (INSN_P (insn));
2959 
2960   if (SCHED_GROUP_P (insn))
2961     /* Setup a sched_group.  */
2962     {
2963       insn_t prev_insn = init_global_data.prev_insn;
2964 
2965       if (prev_insn)
2966 	INSN_SCHED_NEXT (prev_insn) = insn;
2967 
2968       init_global_data.prev_insn = insn;
2969     }
2970   else
2971     init_global_data.prev_insn = NULL;
2972 
2973   if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2974       || asm_noperands (PATTERN (insn)) >= 0)
2975     /* Mark INSN as an asm.  */
2976     INSN_ASM_P (insn) = true;
2977 
2978   {
2979     bool force_unique_p;
2980     ds_t spec_done_ds;
2981 
2982     /* Certain instructions cannot be cloned, and frame related insns and
2983        the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2984        their block.  */
2985     if (prologue_epilogue_contains (insn))
2986       {
2987         if (RTX_FRAME_RELATED_P (insn))
2988           CANT_MOVE (insn) = 1;
2989         else
2990           {
2991             rtx note;
2992             for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2993               if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2994                   && ((enum insn_note) INTVAL (XEXP (note, 0))
2995                       == NOTE_INSN_EPILOGUE_BEG))
2996                 {
2997                   CANT_MOVE (insn) = 1;
2998                   break;
2999                 }
3000           }
3001         force_unique_p = true;
3002       }
3003     else
3004       if (CANT_MOVE (insn)
3005           || INSN_ASM_P (insn)
3006           || SCHED_GROUP_P (insn)
3007 	  || CALL_P (insn)
3008           /* Exception handling insns are always unique.  */
3009           || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
3010           /* TRAP_IF though have an INSN code is control_flow_insn_p ().  */
3011           || control_flow_insn_p (insn)
3012           || volatile_insn_p (PATTERN (insn))
3013           || (targetm.cannot_copy_insn_p
3014               && targetm.cannot_copy_insn_p (insn)))
3015         force_unique_p = true;
3016       else
3017         force_unique_p = false;
3018 
3019     if (targetm.sched.get_insn_spec_ds)
3020       {
3021 	spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3022 	spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3023       }
3024     else
3025       spec_done_ds = 0;
3026 
3027     /* Initialize INSN's expr.  */
3028     init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3029 	       REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3030 	       spec_done_ds, 0, 0, vNULL, true,
3031 	       false, false, false, CANT_MOVE (insn));
3032   }
3033 
3034   init_first_time_insn_data (insn);
3035 }
3036 
3037 /* Scan the region and initialize instruction data for basic blocks BBS.  */
3038 void
sel_init_global_and_expr(bb_vec_t bbs)3039 sel_init_global_and_expr (bb_vec_t bbs)
3040 {
3041   /* ??? It would be nice to implement push / pop scheme for sched_infos.  */
3042   const struct sched_scan_info_def ssi =
3043     {
3044       NULL, /* extend_bb */
3045       init_global_and_expr_for_bb, /* init_bb */
3046       extend_insn_data, /* extend_insn */
3047       init_global_and_expr_for_insn /* init_insn */
3048     };
3049 
3050   sched_scan (&ssi, bbs);
3051 }
3052 
3053 /* Finalize region-scope data structures for basic blocks.  */
3054 static void
finish_global_and_expr_for_bb(basic_block bb)3055 finish_global_and_expr_for_bb (basic_block bb)
3056 {
3057   av_set_clear (&BB_AV_SET (bb));
3058   BB_AV_LEVEL (bb) = 0;
3059 }
3060 
3061 /* Finalize INSN's data.  */
3062 static void
finish_global_and_expr_insn(insn_t insn)3063 finish_global_and_expr_insn (insn_t insn)
3064 {
3065   if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3066     return;
3067 
3068   gcc_assert (INSN_P (insn));
3069 
3070   if (INSN_LUID (insn) > 0)
3071     {
3072       free_first_time_insn_data (insn);
3073       INSN_WS_LEVEL (insn) = 0;
3074       CANT_MOVE (insn) = 0;
3075 
3076       /* We can no longer assert this, as vinsns of this insn could be
3077          easily live in other insn's caches.  This should be changed to
3078          a counter-like approach among all vinsns.  */
3079       gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3080       clear_expr (INSN_EXPR (insn));
3081     }
3082 }
3083 
3084 /* Finalize per instruction data for the whole region.  */
3085 void
sel_finish_global_and_expr(void)3086 sel_finish_global_and_expr (void)
3087 {
3088   {
3089     bb_vec_t bbs;
3090     int i;
3091 
3092     bbs.create (current_nr_blocks);
3093 
3094     for (i = 0; i < current_nr_blocks; i++)
3095       bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
3096 
3097     /* Clear AV_SETs and INSN_EXPRs.  */
3098     {
3099       const struct sched_scan_info_def ssi =
3100 	{
3101 	  NULL, /* extend_bb */
3102 	  finish_global_and_expr_for_bb, /* init_bb */
3103 	  NULL, /* extend_insn */
3104 	  finish_global_and_expr_insn /* init_insn */
3105 	};
3106 
3107       sched_scan (&ssi, bbs);
3108     }
3109 
3110     bbs.release ();
3111   }
3112 
3113   finish_insns ();
3114 }
3115 
3116 
3117 /* In the below hooks, we merely calculate whether or not a dependence
3118    exists, and in what part of insn.  However, we will need more data
3119    when we'll start caching dependence requests.  */
3120 
3121 /* Container to hold information for dependency analysis.  */
3122 static struct
3123 {
3124   deps_t dc;
3125 
3126   /* A variable to track which part of rtx we are scanning in
3127      sched-deps.c: sched_analyze_insn ().  */
3128   deps_where_t where;
3129 
3130   /* Current producer.  */
3131   insn_t pro;
3132 
3133   /* Current consumer.  */
3134   vinsn_t con;
3135 
3136   /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3137      X is from { INSN, LHS, RHS }.  */
3138   ds_t has_dep_p[DEPS_IN_NOWHERE];
3139 } has_dependence_data;
3140 
3141 /* Start analyzing dependencies of INSN.  */
3142 static void
has_dependence_start_insn(insn_t insn ATTRIBUTE_UNUSED)3143 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3144 {
3145   gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3146 
3147   has_dependence_data.where = DEPS_IN_INSN;
3148 }
3149 
3150 /* Finish analyzing dependencies of an insn.  */
3151 static void
has_dependence_finish_insn(void)3152 has_dependence_finish_insn (void)
3153 {
3154   gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3155 
3156   has_dependence_data.where = DEPS_IN_NOWHERE;
3157 }
3158 
3159 /* Start analyzing dependencies of LHS.  */
3160 static void
has_dependence_start_lhs(rtx lhs ATTRIBUTE_UNUSED)3161 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3162 {
3163   gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3164 
3165   if (VINSN_LHS (has_dependence_data.con) != NULL)
3166     has_dependence_data.where = DEPS_IN_LHS;
3167 }
3168 
3169 /* Finish analyzing dependencies of an lhs.  */
3170 static void
has_dependence_finish_lhs(void)3171 has_dependence_finish_lhs (void)
3172 {
3173   has_dependence_data.where = DEPS_IN_INSN;
3174 }
3175 
3176 /* Start analyzing dependencies of RHS.  */
3177 static void
has_dependence_start_rhs(rtx rhs ATTRIBUTE_UNUSED)3178 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3179 {
3180   gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3181 
3182   if (VINSN_RHS (has_dependence_data.con) != NULL)
3183     has_dependence_data.where = DEPS_IN_RHS;
3184 }
3185 
3186 /* Start analyzing dependencies of an rhs.  */
3187 static void
has_dependence_finish_rhs(void)3188 has_dependence_finish_rhs (void)
3189 {
3190   gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3191 	      || has_dependence_data.where == DEPS_IN_INSN);
3192 
3193   has_dependence_data.where = DEPS_IN_INSN;
3194 }
3195 
3196 /* Note a set of REGNO.  */
3197 static void
has_dependence_note_reg_set(int regno)3198 has_dependence_note_reg_set (int regno)
3199 {
3200   struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3201 
3202   if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3203 				       VINSN_INSN_RTX
3204 				       (has_dependence_data.con)))
3205     {
3206       ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3207 
3208       if (reg_last->sets != NULL
3209 	  || reg_last->clobbers != NULL)
3210 	*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3211 
3212       if (reg_last->uses || reg_last->implicit_sets)
3213 	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3214     }
3215 }
3216 
3217 /* Note a clobber of REGNO.  */
3218 static void
has_dependence_note_reg_clobber(int regno)3219 has_dependence_note_reg_clobber (int regno)
3220 {
3221   struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3222 
3223   if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3224 				       VINSN_INSN_RTX
3225 				       (has_dependence_data.con)))
3226     {
3227       ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3228 
3229       if (reg_last->sets)
3230 	*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3231 
3232       if (reg_last->uses || reg_last->implicit_sets)
3233 	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3234     }
3235 }
3236 
3237 /* Note a use of REGNO.  */
3238 static void
has_dependence_note_reg_use(int regno)3239 has_dependence_note_reg_use (int regno)
3240 {
3241   struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3242 
3243   if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3244 				       VINSN_INSN_RTX
3245 				       (has_dependence_data.con)))
3246     {
3247       ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3248 
3249       if (reg_last->sets)
3250 	*dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3251 
3252       if (reg_last->clobbers || reg_last->implicit_sets)
3253 	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3254 
3255       /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3256 	 is actually a check insn.  We need to do this for any register
3257 	 read-read dependency with the check unless we track properly
3258 	 all registers written by BE_IN_SPEC-speculated insns, as
3259 	 we don't have explicit dependence lists.  See PR 53975.  */
3260       if (reg_last->uses)
3261 	{
3262 	  ds_t pro_spec_checked_ds;
3263 
3264 	  pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3265 	  pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3266 
3267 	  if (pro_spec_checked_ds != 0)
3268 	    *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3269 				  NULL_RTX, NULL_RTX);
3270 	}
3271     }
3272 }
3273 
3274 /* Note a memory dependence.  */
3275 static void
has_dependence_note_mem_dep(rtx mem ATTRIBUTE_UNUSED,rtx pending_mem ATTRIBUTE_UNUSED,insn_t pending_insn ATTRIBUTE_UNUSED,ds_t ds ATTRIBUTE_UNUSED)3276 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3277 			     rtx pending_mem ATTRIBUTE_UNUSED,
3278 			     insn_t pending_insn ATTRIBUTE_UNUSED,
3279 			     ds_t ds ATTRIBUTE_UNUSED)
3280 {
3281   if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3282 				       VINSN_INSN_RTX (has_dependence_data.con)))
3283     {
3284       ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3285 
3286       *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3287     }
3288 }
3289 
3290 /* Note a dependence.  */
3291 static void
has_dependence_note_dep(insn_t pro,ds_t ds ATTRIBUTE_UNUSED)3292 has_dependence_note_dep (insn_t pro, ds_t ds ATTRIBUTE_UNUSED)
3293 {
3294   insn_t real_pro = has_dependence_data.pro;
3295   insn_t real_con = VINSN_INSN_RTX (has_dependence_data.con);
3296 
3297   /* We do not allow for debug insns to move through others unless they
3298      are at the start of bb.  This movement may create bookkeeping copies
3299      that later would not be able to move up, violating the invariant
3300      that a bookkeeping copy should be movable as the original insn.
3301      Detect that here and allow that movement if we allowed it before
3302      in the first place.  */
3303   if (DEBUG_INSN_P (real_con) && !DEBUG_INSN_P (real_pro)
3304       && INSN_UID (NEXT_INSN (pro)) == INSN_UID (real_con))
3305     return;
3306 
3307   if (!sched_insns_conditions_mutex_p (real_pro, real_con))
3308     {
3309       ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3310 
3311       *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3312     }
3313 }
3314 
3315 /* Mark the insn as having a hard dependence that prevents speculation.  */
3316 void
sel_mark_hard_insn(rtx insn)3317 sel_mark_hard_insn (rtx insn)
3318 {
3319   int i;
3320 
3321   /* Only work when we're in has_dependence_p mode.
3322      ??? This is a hack, this should actually be a hook.  */
3323   if (!has_dependence_data.dc || !has_dependence_data.pro)
3324     return;
3325 
3326   gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3327   gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3328 
3329   for (i = 0; i < DEPS_IN_NOWHERE; i++)
3330     has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3331 }
3332 
3333 /* This structure holds the hooks for the dependency analysis used when
3334    actually processing dependencies in the scheduler.  */
3335 static struct sched_deps_info_def has_dependence_sched_deps_info;
3336 
3337 /* This initializes most of the fields of the above structure.  */
3338 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3339   {
3340     NULL,
3341 
3342     has_dependence_start_insn,
3343     has_dependence_finish_insn,
3344     has_dependence_start_lhs,
3345     has_dependence_finish_lhs,
3346     has_dependence_start_rhs,
3347     has_dependence_finish_rhs,
3348     has_dependence_note_reg_set,
3349     has_dependence_note_reg_clobber,
3350     has_dependence_note_reg_use,
3351     has_dependence_note_mem_dep,
3352     has_dependence_note_dep,
3353 
3354     0, /* use_cselib */
3355     0, /* use_deps_list */
3356     0 /* generate_spec_deps */
3357   };
3358 
3359 /* Initialize has_dependence_sched_deps_info with extra spec field.  */
3360 static void
setup_has_dependence_sched_deps_info(void)3361 setup_has_dependence_sched_deps_info (void)
3362 {
3363   memcpy (&has_dependence_sched_deps_info,
3364 	  &const_has_dependence_sched_deps_info,
3365 	  sizeof (has_dependence_sched_deps_info));
3366 
3367   if (spec_info != NULL)
3368     has_dependence_sched_deps_info.generate_spec_deps = 1;
3369 
3370   sched_deps_info = &has_dependence_sched_deps_info;
3371 }
3372 
3373 /* Remove all dependences found and recorded in has_dependence_data array.  */
3374 void
sel_clear_has_dependence(void)3375 sel_clear_has_dependence (void)
3376 {
3377   int i;
3378 
3379   for (i = 0; i < DEPS_IN_NOWHERE; i++)
3380     has_dependence_data.has_dep_p[i] = 0;
3381 }
3382 
3383 /* Return nonzero if EXPR has is dependent upon PRED.  Return the pointer
3384    to the dependence information array in HAS_DEP_PP.  */
3385 ds_t
has_dependence_p(expr_t expr,insn_t pred,ds_t ** has_dep_pp)3386 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3387 {
3388   int i;
3389   ds_t ds;
3390   class deps_desc *dc;
3391 
3392   if (INSN_SIMPLEJUMP_P (pred))
3393     /* Unconditional jump is just a transfer of control flow.
3394        Ignore it.  */
3395     return false;
3396 
3397   dc = &INSN_DEPS_CONTEXT (pred);
3398 
3399   /* We init this field lazily.  */
3400   if (dc->reg_last == NULL)
3401     init_deps_reg_last (dc);
3402 
3403   if (!dc->readonly)
3404     {
3405       has_dependence_data.pro = NULL;
3406       /* Initialize empty dep context with information about PRED.  */
3407       advance_deps_context (dc, pred);
3408       dc->readonly = 1;
3409     }
3410 
3411   has_dependence_data.where = DEPS_IN_NOWHERE;
3412   has_dependence_data.pro = pred;
3413   has_dependence_data.con = EXPR_VINSN (expr);
3414   has_dependence_data.dc = dc;
3415 
3416   sel_clear_has_dependence ();
3417 
3418   /* Now catch all dependencies that would be generated between PRED and
3419      INSN.  */
3420   setup_has_dependence_sched_deps_info ();
3421   deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3422   has_dependence_data.dc = NULL;
3423 
3424   /* When a barrier was found, set DEPS_IN_INSN bits.  */
3425   if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3426     has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3427   else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3428     has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3429 
3430   /* Do not allow stores to memory to move through checks.  Currently
3431      we don't move this to sched-deps.c as the check doesn't have
3432      obvious places to which this dependence can be attached.
3433      FIMXE: this should go to a hook.  */
3434   if (EXPR_LHS (expr)
3435       && MEM_P (EXPR_LHS (expr))
3436       && sel_insn_is_speculation_check (pred))
3437     has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3438 
3439   *has_dep_pp = has_dependence_data.has_dep_p;
3440   ds = 0;
3441   for (i = 0; i < DEPS_IN_NOWHERE; i++)
3442     ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3443 			NULL_RTX, NULL_RTX);
3444 
3445   return ds;
3446 }
3447 
3448 
3449 /* Dependence hooks implementation that checks dependence latency constraints
3450    on the insns being scheduled.  The entry point for these routines is
3451    tick_check_p predicate.  */
3452 
3453 static struct
3454 {
3455   /* An expr we are currently checking.  */
3456   expr_t expr;
3457 
3458   /* A minimal cycle for its scheduling.  */
3459   int cycle;
3460 
3461   /* Whether we have seen a true dependence while checking.  */
3462   bool seen_true_dep_p;
3463 } tick_check_data;
3464 
3465 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3466    on PRO with status DS and weight DW.  */
3467 static void
tick_check_dep_with_dw(insn_t pro_insn,ds_t ds,dw_t dw)3468 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3469 {
3470   expr_t con_expr = tick_check_data.expr;
3471   insn_t con_insn = EXPR_INSN_RTX (con_expr);
3472 
3473   if (con_insn != pro_insn)
3474     {
3475       enum reg_note dt;
3476       int tick;
3477 
3478       if (/* PROducer was removed from above due to pipelining.  */
3479 	  !INSN_IN_STREAM_P (pro_insn)
3480 	  /* Or PROducer was originally on the next iteration regarding the
3481 	     CONsumer.  */
3482 	  || (INSN_SCHED_TIMES (pro_insn)
3483 	      - EXPR_SCHED_TIMES (con_expr)) > 1)
3484 	/* Don't count this dependence.  */
3485         return;
3486 
3487       dt = ds_to_dt (ds);
3488       if (dt == REG_DEP_TRUE)
3489         tick_check_data.seen_true_dep_p = true;
3490 
3491       gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3492 
3493       {
3494 	dep_def _dep, *dep = &_dep;
3495 
3496 	init_dep (dep, pro_insn, con_insn, dt);
3497 
3498 	tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3499       }
3500 
3501       /* When there are several kinds of dependencies between pro and con,
3502          only REG_DEP_TRUE should be taken into account.  */
3503       if (tick > tick_check_data.cycle
3504 	  && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3505 	tick_check_data.cycle = tick;
3506     }
3507 }
3508 
3509 /* An implementation of note_dep hook.  */
3510 static void
tick_check_note_dep(insn_t pro,ds_t ds)3511 tick_check_note_dep (insn_t pro, ds_t ds)
3512 {
3513   tick_check_dep_with_dw (pro, ds, 0);
3514 }
3515 
3516 /* An implementation of note_mem_dep hook.  */
3517 static void
tick_check_note_mem_dep(rtx mem1,rtx mem2,insn_t pro,ds_t ds)3518 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3519 {
3520   dw_t dw;
3521 
3522   dw = (ds_to_dt (ds) == REG_DEP_TRUE
3523         ? estimate_dep_weak (mem1, mem2)
3524         : 0);
3525 
3526   tick_check_dep_with_dw (pro, ds, dw);
3527 }
3528 
3529 /* This structure contains hooks for dependence analysis used when determining
3530    whether an insn is ready for scheduling.  */
3531 static struct sched_deps_info_def tick_check_sched_deps_info =
3532   {
3533     NULL,
3534 
3535     NULL,
3536     NULL,
3537     NULL,
3538     NULL,
3539     NULL,
3540     NULL,
3541     haifa_note_reg_set,
3542     haifa_note_reg_clobber,
3543     haifa_note_reg_use,
3544     tick_check_note_mem_dep,
3545     tick_check_note_dep,
3546 
3547     0, 0, 0
3548   };
3549 
3550 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3551    scheduled.  Return 0 if all data from producers in DC is ready.  */
3552 int
tick_check_p(expr_t expr,deps_t dc,fence_t fence)3553 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3554 {
3555   int cycles_left;
3556   /* Initialize variables.  */
3557   tick_check_data.expr = expr;
3558   tick_check_data.cycle = 0;
3559   tick_check_data.seen_true_dep_p = false;
3560   sched_deps_info = &tick_check_sched_deps_info;
3561 
3562   gcc_assert (!dc->readonly);
3563   dc->readonly = 1;
3564   deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3565   dc->readonly = 0;
3566 
3567   cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3568 
3569   return cycles_left >= 0 ? cycles_left : 0;
3570 }
3571 
3572 
3573 /* Functions to work with insns.  */
3574 
3575 /* Returns true if LHS of INSN is the same as DEST of an insn
3576    being moved.  */
3577 bool
lhs_of_insn_equals_to_dest_p(insn_t insn,rtx dest)3578 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3579 {
3580   rtx lhs = INSN_LHS (insn);
3581 
3582   if (lhs == NULL || dest == NULL)
3583     return false;
3584 
3585   return rtx_equal_p (lhs, dest);
3586 }
3587 
3588 /* Return s_i_d entry of INSN.  Callable from debugger.  */
3589 sel_insn_data_def
insn_sid(insn_t insn)3590 insn_sid (insn_t insn)
3591 {
3592   return *SID (insn);
3593 }
3594 
3595 /* True when INSN is a speculative check.  We can tell this by looking
3596    at the data structures of the selective scheduler, not by examining
3597    the pattern.  */
3598 bool
sel_insn_is_speculation_check(rtx insn)3599 sel_insn_is_speculation_check (rtx insn)
3600 {
3601   return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
3602 }
3603 
3604 /* Extracts machine mode MODE and destination location DST_LOC
3605    for given INSN.  */
3606 void
get_dest_and_mode(rtx insn,rtx * dst_loc,machine_mode * mode)3607 get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode)
3608 {
3609   rtx pat = PATTERN (insn);
3610 
3611   gcc_assert (dst_loc);
3612   gcc_assert (GET_CODE (pat) == SET);
3613 
3614   *dst_loc = SET_DEST (pat);
3615 
3616   gcc_assert (*dst_loc);
3617   gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3618 
3619   if (mode)
3620     *mode = GET_MODE (*dst_loc);
3621 }
3622 
3623 /* Returns true when moving through JUMP will result in bookkeeping
3624    creation.  */
3625 bool
bookkeeping_can_be_created_if_moved_through_p(insn_t jump)3626 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3627 {
3628   insn_t succ;
3629   succ_iterator si;
3630 
3631   FOR_EACH_SUCC (succ, si, jump)
3632     if (sel_num_cfg_preds_gt_1 (succ))
3633       return true;
3634 
3635   return false;
3636 }
3637 
3638 /* Return 'true' if INSN is the only one in its basic block.  */
3639 static bool
insn_is_the_only_one_in_bb_p(insn_t insn)3640 insn_is_the_only_one_in_bb_p (insn_t insn)
3641 {
3642   return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3643 }
3644 
3645 /* Check that the region we're scheduling still has at most one
3646    backedge.  */
3647 static void
verify_backedges(void)3648 verify_backedges (void)
3649 {
3650   if (pipelining_p)
3651     {
3652       int i, n = 0;
3653       edge e;
3654       edge_iterator ei;
3655 
3656       for (i = 0; i < current_nr_blocks; i++)
3657         FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
3658           if (in_current_region_p (e->dest)
3659               && BLOCK_TO_BB (e->dest->index) < i)
3660             n++;
3661 
3662       gcc_assert (n <= 1);
3663     }
3664 }
3665 
3666 
3667 /* Functions to work with control flow.  */
3668 
3669 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3670    are sorted in topological order (it might have been invalidated by
3671    redirecting an edge).  */
3672 static void
sel_recompute_toporder(void)3673 sel_recompute_toporder (void)
3674 {
3675   int i, n, rgn;
3676   int *postorder, n_blocks;
3677 
3678   postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
3679   n_blocks = post_order_compute (postorder, false, false);
3680 
3681   rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3682   for (n = 0, i = n_blocks - 1; i >= 0; i--)
3683     if (CONTAINING_RGN (postorder[i]) == rgn)
3684       {
3685 	BLOCK_TO_BB (postorder[i]) = n;
3686 	BB_TO_BLOCK (n) = postorder[i];
3687 	n++;
3688       }
3689 
3690   /* Assert that we updated info for all blocks.  We may miss some blocks if
3691      this function is called when redirecting an edge made a block
3692      unreachable, but that block is not deleted yet.  */
3693   gcc_assert (n == RGN_NR_BLOCKS (rgn));
3694 }
3695 
3696 /* Tidy the possibly empty block BB.  */
3697 static bool
maybe_tidy_empty_bb(basic_block bb)3698 maybe_tidy_empty_bb (basic_block bb)
3699 {
3700   basic_block succ_bb, pred_bb, note_bb;
3701   vec<basic_block> dom_bbs;
3702   edge e;
3703   edge_iterator ei;
3704   bool rescan_p;
3705 
3706   /* Keep empty bb only if this block immediately precedes EXIT and
3707      has incoming non-fallthrough edge, or it has no predecessors or
3708      successors.  Otherwise remove it.  */
3709   if (!sel_bb_empty_p (bb)
3710       || (single_succ_p (bb)
3711 	  && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
3712           && (!single_pred_p (bb)
3713               || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3714       || EDGE_COUNT (bb->preds) == 0
3715       || EDGE_COUNT (bb->succs) == 0)
3716     return false;
3717 
3718   /* Do not attempt to redirect complex edges.  */
3719   FOR_EACH_EDGE (e, ei, bb->preds)
3720     if (e->flags & EDGE_COMPLEX)
3721       return false;
3722     else if (e->flags & EDGE_FALLTHRU)
3723       {
3724 	rtx note;
3725 	/* If prev bb ends with asm goto, see if any of the
3726 	   ASM_OPERANDS_LABELs don't point to the fallthru
3727 	   label.  Do not attempt to redirect it in that case.  */
3728 	if (JUMP_P (BB_END (e->src))
3729 	    && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3730 	  {
3731 	    int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3732 
3733 	    for (i = 0; i < n; ++i)
3734 	      if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3735 		return false;
3736 	  }
3737       }
3738 
3739   free_data_sets (bb);
3740 
3741   /* Do not delete BB if it has more than one successor.
3742      That can occur when we moving a jump.  */
3743   if (!single_succ_p (bb))
3744     {
3745       gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3746       sel_merge_blocks (bb->prev_bb, bb);
3747       return true;
3748     }
3749 
3750   succ_bb = single_succ (bb);
3751   rescan_p = true;
3752   pred_bb = NULL;
3753   dom_bbs.create (0);
3754 
3755   /* Save a pred/succ from the current region to attach the notes to.  */
3756   note_bb = NULL;
3757   FOR_EACH_EDGE (e, ei, bb->preds)
3758     if (in_current_region_p (e->src))
3759       {
3760 	note_bb = e->src;
3761 	break;
3762       }
3763   if (note_bb == NULL)
3764     note_bb = succ_bb;
3765 
3766   /* Redirect all non-fallthru edges to the next bb.  */
3767   while (rescan_p)
3768     {
3769       rescan_p = false;
3770 
3771       FOR_EACH_EDGE (e, ei, bb->preds)
3772         {
3773           pred_bb = e->src;
3774 
3775           if (!(e->flags & EDGE_FALLTHRU))
3776             {
3777 	      /* We cannot invalidate computed topological order by moving
3778 	         the edge destination block (E->SUCC) along a fallthru edge.
3779 
3780 		 We will update dominators here only when we'll get
3781 		 an unreachable block when redirecting, otherwise
3782 		 sel_redirect_edge_and_branch will take care of it.  */
3783 	      if (e->dest != bb
3784 		  && single_pred_p (e->dest))
3785 		dom_bbs.safe_push (e->dest);
3786               sel_redirect_edge_and_branch (e, succ_bb);
3787               rescan_p = true;
3788               break;
3789             }
3790 	  /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3791 	     to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3792 	     still have to adjust it.  */
3793 	  else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3794 	    {
3795 	      /* If possible, try to remove the unneeded conditional jump.  */
3796 	      if (onlyjump_p (BB_END (pred_bb))
3797 		  && INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3798 		  && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3799 		{
3800 		  if (!sel_remove_insn (BB_END (pred_bb), false, false))
3801 		    tidy_fallthru_edge (e);
3802 		}
3803 	      else
3804 		sel_redirect_edge_and_branch (e, succ_bb);
3805 	      rescan_p = true;
3806 	      break;
3807 	    }
3808         }
3809     }
3810 
3811   if (can_merge_blocks_p (bb->prev_bb, bb))
3812     sel_merge_blocks (bb->prev_bb, bb);
3813   else
3814     {
3815       /* This is a block without fallthru predecessor.  Just delete it.  */
3816       gcc_assert (note_bb);
3817       move_bb_info (note_bb, bb);
3818       remove_empty_bb (bb, true);
3819     }
3820 
3821   if (!dom_bbs.is_empty ())
3822     {
3823       dom_bbs.safe_push (succ_bb);
3824       iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3825       dom_bbs.release ();
3826     }
3827 
3828   return true;
3829 }
3830 
3831 /* Tidy the control flow after we have removed original insn from
3832    XBB.  Return true if we have removed some blocks.  When FULL_TIDYING
3833    is true, also try to optimize control flow on non-empty blocks.  */
3834 bool
tidy_control_flow(basic_block xbb,bool full_tidying)3835 tidy_control_flow (basic_block xbb, bool full_tidying)
3836 {
3837   bool changed = true;
3838   insn_t first, last;
3839 
3840   /* First check whether XBB is empty.  */
3841   changed = maybe_tidy_empty_bb (xbb);
3842   if (changed || !full_tidying)
3843     return changed;
3844 
3845   /* Check if there is a unnecessary jump after insn left.  */
3846   if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3847       && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3848       && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3849     {
3850       /* We used to call sel_remove_insn here that can trigger tidy_control_flow
3851          before we fix up the fallthru edge.  Correct that ordering by
3852 	 explicitly doing the latter before the former.  */
3853       clear_expr (INSN_EXPR (BB_END (xbb)));
3854       tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3855       if (tidy_control_flow (xbb, false))
3856 	return true;
3857     }
3858 
3859   first = sel_bb_head (xbb);
3860   last = sel_bb_end (xbb);
3861   if (MAY_HAVE_DEBUG_INSNS)
3862     {
3863       if (first != last && DEBUG_INSN_P (first))
3864 	do
3865 	  first = NEXT_INSN (first);
3866 	while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3867 
3868       if (first != last && DEBUG_INSN_P (last))
3869 	do
3870 	  last = PREV_INSN (last);
3871 	while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3872     }
3873   /* Check if there is an unnecessary jump in previous basic block leading
3874      to next basic block left after removing INSN from stream.
3875      If it is so, remove that jump and redirect edge to current
3876      basic block (where there was INSN before deletion).  This way
3877      when NOP will be deleted several instructions later with its
3878      basic block we will not get a jump to next instruction, which
3879      can be harmful.  */
3880   if (first == last
3881       && !sel_bb_empty_p (xbb)
3882       && INSN_NOP_P (last)
3883       /* Flow goes fallthru from current block to the next.  */
3884       && EDGE_COUNT (xbb->succs) == 1
3885       && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3886       /* When successor is an EXIT block, it may not be the next block.  */
3887       && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
3888       /* And unconditional jump in previous basic block leads to
3889          next basic block of XBB and this jump can be safely removed.  */
3890       && in_current_region_p (xbb->prev_bb)
3891       && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3892       && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3893       /* Also this jump is not at the scheduling boundary.  */
3894       && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3895     {
3896       bool recompute_toporder_p;
3897       /* Clear data structures of jump - jump itself will be removed
3898          by sel_redirect_edge_and_branch.  */
3899       clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3900       recompute_toporder_p
3901         = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3902 
3903       gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3904 
3905       /* We could have skipped some debug insns which did not get removed with the block,
3906          and the seqnos could become incorrect.  Fix them up here.  */
3907       if (MAY_HAVE_DEBUG_INSNS && (sel_bb_head (xbb) != first || sel_bb_end (xbb) != last))
3908        {
3909          if (!sel_bb_empty_p (xbb->prev_bb))
3910            {
3911              int prev_seqno = INSN_SEQNO (sel_bb_end (xbb->prev_bb));
3912              if (prev_seqno > INSN_SEQNO (sel_bb_head (xbb)))
3913                for (insn_t insn = sel_bb_head (xbb); insn != first; insn = NEXT_INSN (insn))
3914                  INSN_SEQNO (insn) = prev_seqno + 1;
3915            }
3916        }
3917 
3918       /* It can turn out that after removing unused jump, basic block
3919          that contained that jump, becomes empty too.  In such case
3920          remove it too.  */
3921       if (sel_bb_empty_p (xbb->prev_bb))
3922         changed = maybe_tidy_empty_bb (xbb->prev_bb);
3923       if (recompute_toporder_p)
3924 	sel_recompute_toporder ();
3925     }
3926 
3927   /* TODO: use separate flag for CFG checking.  */
3928   if (flag_checking)
3929     {
3930       verify_backedges ();
3931       verify_dominators (CDI_DOMINATORS);
3932     }
3933 
3934   return changed;
3935 }
3936 
3937 /* Purge meaningless empty blocks in the middle of a region.  */
3938 void
purge_empty_blocks(void)3939 purge_empty_blocks (void)
3940 {
3941   int i;
3942 
3943   /* Do not attempt to delete the first basic block in the region.  */
3944   for (i = 1; i < current_nr_blocks; )
3945     {
3946       basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
3947 
3948       if (maybe_tidy_empty_bb (b))
3949 	continue;
3950 
3951       i++;
3952     }
3953 }
3954 
3955 /* Rip-off INSN from the insn stream.  When ONLY_DISCONNECT is true,
3956    do not delete insn's data, because it will be later re-emitted.
3957    Return true if we have removed some blocks afterwards.  */
3958 bool
sel_remove_insn(insn_t insn,bool only_disconnect,bool full_tidying)3959 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3960 {
3961   basic_block bb = BLOCK_FOR_INSN (insn);
3962 
3963   gcc_assert (INSN_IN_STREAM_P (insn));
3964 
3965   if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3966     {
3967       expr_t expr;
3968       av_set_iterator i;
3969 
3970       /* When we remove a debug insn that is head of a BB, it remains
3971 	 in the AV_SET of the block, but it shouldn't.  */
3972       FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3973 	if (EXPR_INSN_RTX (expr) == insn)
3974 	  {
3975 	    av_set_iter_remove (&i);
3976 	    break;
3977 	  }
3978     }
3979 
3980   if (only_disconnect)
3981     remove_insn (insn);
3982   else
3983     {
3984       delete_insn (insn);
3985       clear_expr (INSN_EXPR (insn));
3986     }
3987 
3988   /* It is necessary to NULL these fields in case we are going to re-insert
3989      INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT
3990      case, but also for NOPs that we will return to the nop pool.  */
3991   SET_PREV_INSN (insn) = NULL_RTX;
3992   SET_NEXT_INSN (insn) = NULL_RTX;
3993   set_block_for_insn (insn, NULL);
3994 
3995   return tidy_control_flow (bb, full_tidying);
3996 }
3997 
3998 /* Estimate number of the insns in BB.  */
3999 static int
sel_estimate_number_of_insns(basic_block bb)4000 sel_estimate_number_of_insns (basic_block bb)
4001 {
4002   int res = 0;
4003   insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
4004 
4005   for (; insn != next_tail; insn = NEXT_INSN (insn))
4006     if (NONDEBUG_INSN_P (insn))
4007       res++;
4008 
4009   return res;
4010 }
4011 
4012 /* We don't need separate luids for notes or labels.  */
4013 static int
sel_luid_for_non_insn(rtx x)4014 sel_luid_for_non_insn (rtx x)
4015 {
4016   gcc_assert (NOTE_P (x) || LABEL_P (x));
4017 
4018   return -1;
4019 }
4020 
4021 /*  Find the proper seqno for inserting at INSN by successors.
4022     Return -1 if no successors with positive seqno exist.  */
4023 static int
get_seqno_by_succs(rtx_insn * insn)4024 get_seqno_by_succs (rtx_insn *insn)
4025 {
4026   basic_block bb = BLOCK_FOR_INSN (insn);
4027   rtx_insn *tmp = insn, *end = BB_END (bb);
4028   int seqno;
4029   insn_t succ = NULL;
4030   succ_iterator si;
4031 
4032   while (tmp != end)
4033     {
4034       tmp = NEXT_INSN (tmp);
4035       if (INSN_P (tmp))
4036         return INSN_SEQNO (tmp);
4037     }
4038 
4039   seqno = INT_MAX;
4040 
4041   FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4042     if (INSN_SEQNO (succ) > 0)
4043       seqno = MIN (seqno, INSN_SEQNO (succ));
4044 
4045   if (seqno == INT_MAX)
4046     return -1;
4047 
4048   return seqno;
4049 }
4050 
4051 /* Compute seqno for INSN by its preds or succs.  Use OLD_SEQNO to compute
4052    seqno in corner cases.  */
4053 static int
get_seqno_for_a_jump(insn_t insn,int old_seqno)4054 get_seqno_for_a_jump (insn_t insn, int old_seqno)
4055 {
4056   int seqno;
4057 
4058   gcc_assert (INSN_SIMPLEJUMP_P (insn));
4059 
4060   if (!sel_bb_head_p (insn))
4061     seqno = INSN_SEQNO (PREV_INSN (insn));
4062   else
4063     {
4064       basic_block bb = BLOCK_FOR_INSN (insn);
4065 
4066       if (single_pred_p (bb)
4067 	  && !in_current_region_p (single_pred (bb)))
4068 	{
4069           /* We can have preds outside a region when splitting edges
4070              for pipelining of an outer loop.  Use succ instead.
4071              There should be only one of them.  */
4072 	  insn_t succ = NULL;
4073           succ_iterator si;
4074           bool first = true;
4075 
4076 	  gcc_assert (flag_sel_sched_pipelining_outer_loops
4077 		      && current_loop_nest);
4078           FOR_EACH_SUCC_1 (succ, si, insn,
4079                            SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4080             {
4081               gcc_assert (first);
4082               first = false;
4083             }
4084 
4085 	  gcc_assert (succ != NULL);
4086 	  seqno = INSN_SEQNO (succ);
4087 	}
4088       else
4089 	{
4090 	  insn_t *preds;
4091 	  int n;
4092 
4093 	  cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4094 
4095 	  gcc_assert (n > 0);
4096 	  /* For one predecessor, use simple method.  */
4097 	  if (n == 1)
4098 	    seqno = INSN_SEQNO (preds[0]);
4099 	  else
4100 	    seqno = get_seqno_by_preds (insn);
4101 
4102 	  free (preds);
4103 	}
4104     }
4105 
4106   /* We were unable to find a good seqno among preds.  */
4107   if (seqno < 0)
4108     seqno = get_seqno_by_succs (insn);
4109 
4110   if (seqno < 0)
4111     {
4112       /* The only case where this could be here legally is that the only
4113 	 unscheduled insn was a conditional jump that got removed and turned
4114 	 into this unconditional one.  Initialize from the old seqno
4115 	 of that jump passed down to here.  */
4116       seqno = old_seqno;
4117     }
4118 
4119   gcc_assert (seqno >= 0);
4120   return seqno;
4121 }
4122 
4123 /*  Find the proper seqno for inserting at INSN.  Returns -1 if no predecessors
4124     with positive seqno exist.  */
4125 int
get_seqno_by_preds(rtx_insn * insn)4126 get_seqno_by_preds (rtx_insn *insn)
4127 {
4128   basic_block bb = BLOCK_FOR_INSN (insn);
4129   rtx_insn *tmp = insn, *head = BB_HEAD (bb);
4130   insn_t *preds;
4131   int n, i, seqno;
4132 
4133   /* Loop backwards from INSN to HEAD including both.  */
4134   while (1)
4135     {
4136       if (INSN_P (tmp))
4137 	return INSN_SEQNO (tmp);
4138       if (tmp == head)
4139 	break;
4140       tmp = PREV_INSN (tmp);
4141     }
4142 
4143   cfg_preds (bb, &preds, &n);
4144   for (i = 0, seqno = -1; i < n; i++)
4145     seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4146 
4147   return seqno;
4148 }
4149 
4150 
4151 
4152 /* Extend pass-scope data structures for basic blocks.  */
4153 void
sel_extend_global_bb_info(void)4154 sel_extend_global_bb_info (void)
4155 {
4156   sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
4157 }
4158 
4159 /* Extend region-scope data structures for basic blocks.  */
4160 static void
extend_region_bb_info(void)4161 extend_region_bb_info (void)
4162 {
4163   sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
4164 }
4165 
4166 /* Extend all data structures to fit for all basic blocks.  */
4167 static void
extend_bb_info(void)4168 extend_bb_info (void)
4169 {
4170   sel_extend_global_bb_info ();
4171   extend_region_bb_info ();
4172 }
4173 
4174 /* Finalize pass-scope data structures for basic blocks.  */
4175 void
sel_finish_global_bb_info(void)4176 sel_finish_global_bb_info (void)
4177 {
4178   sel_global_bb_info.release ();
4179 }
4180 
4181 /* Finalize region-scope data structures for basic blocks.  */
4182 static void
finish_region_bb_info(void)4183 finish_region_bb_info (void)
4184 {
4185   sel_region_bb_info.release ();
4186 }
4187 
4188 
4189 /* Data for each insn in current region.  */
4190 vec<sel_insn_data_def> s_i_d;
4191 
4192 /* Extend data structures for insns from current region.  */
4193 static void
extend_insn_data(void)4194 extend_insn_data (void)
4195 {
4196   int reserve;
4197 
4198   sched_extend_target ();
4199   sched_deps_init (false);
4200 
4201   /* Extend data structures for insns from current region.  */
4202   reserve = (sched_max_luid + 1 - s_i_d.length ());
4203   if (reserve > 0 && ! s_i_d.space (reserve))
4204     {
4205       int size;
4206 
4207       if (sched_max_luid / 2 > 1024)
4208         size = sched_max_luid + 1024;
4209       else
4210         size = 3 * sched_max_luid / 2;
4211 
4212 
4213       s_i_d.safe_grow_cleared (size, true);
4214     }
4215 }
4216 
4217 /* Finalize data structures for insns from current region.  */
4218 static void
finish_insns(void)4219 finish_insns (void)
4220 {
4221   unsigned i;
4222 
4223   /* Clear here all dependence contexts that may have left from insns that were
4224      removed during the scheduling.  */
4225   for (i = 0; i < s_i_d.length (); i++)
4226     {
4227       sel_insn_data_def *sid_entry = &s_i_d[i];
4228 
4229       if (sid_entry->live)
4230         return_regset_to_pool (sid_entry->live);
4231       if (sid_entry->analyzed_deps)
4232 	{
4233 	  BITMAP_FREE (sid_entry->analyzed_deps);
4234 	  BITMAP_FREE (sid_entry->found_deps);
4235           htab_delete (sid_entry->transformed_insns);
4236 	  free_deps (&sid_entry->deps_context);
4237 	}
4238       if (EXPR_VINSN (&sid_entry->expr))
4239         {
4240           clear_expr (&sid_entry->expr);
4241 
4242           /* Also, clear CANT_MOVE bit here, because we really don't want it
4243              to be passed to the next region.  */
4244           CANT_MOVE_BY_LUID (i) = 0;
4245         }
4246     }
4247 
4248   s_i_d.release ();
4249 }
4250 
4251 /* A proxy to pass initialization data to init_insn ().  */
4252 static sel_insn_data_def _insn_init_ssid;
4253 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4254 
4255 /* If true create a new vinsn.  Otherwise use the one from EXPR.  */
4256 static bool insn_init_create_new_vinsn_p;
4257 
4258 /* Set all necessary data for initialization of the new insn[s].  */
4259 static expr_t
set_insn_init(expr_t expr,vinsn_t vi,int seqno)4260 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4261 {
4262   expr_t x = &insn_init_ssid->expr;
4263 
4264   copy_expr_onside (x, expr);
4265   if (vi != NULL)
4266     {
4267       insn_init_create_new_vinsn_p = false;
4268       change_vinsn_in_expr (x, vi);
4269     }
4270   else
4271     insn_init_create_new_vinsn_p = true;
4272 
4273   insn_init_ssid->seqno = seqno;
4274   return x;
4275 }
4276 
4277 /* Init data for INSN.  */
4278 static void
init_insn_data(insn_t insn)4279 init_insn_data (insn_t insn)
4280 {
4281   expr_t expr;
4282   sel_insn_data_t ssid = insn_init_ssid;
4283 
4284   /* The fields mentioned below are special and hence are not being
4285      propagated to the new insns.  */
4286   gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4287 	      && !ssid->after_stall_p && ssid->sched_cycle == 0);
4288   gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4289 
4290   expr = INSN_EXPR (insn);
4291   copy_expr (expr, &ssid->expr);
4292   prepare_insn_expr (insn, ssid->seqno);
4293 
4294   if (insn_init_create_new_vinsn_p)
4295     change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4296 
4297   if (first_time_insn_init (insn))
4298     init_first_time_insn_data (insn);
4299 }
4300 
4301 /* This is used to initialize spurious jumps generated by
4302    sel_redirect_edge ().  OLD_SEQNO is used for initializing seqnos
4303    in corner cases within get_seqno_for_a_jump.  */
4304 static void
init_simplejump_data(insn_t insn,int old_seqno)4305 init_simplejump_data (insn_t insn, int old_seqno)
4306 {
4307   init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4308 	     REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
4309 	     vNULL, true, false, false,
4310 	     false, true);
4311   INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno);
4312   init_first_time_insn_data (insn);
4313 }
4314 
4315 /* Perform deferred initialization of insns.  This is used to process
4316    a new jump that may be created by redirect_edge.  OLD_SEQNO is used
4317    for initializing simplejumps in init_simplejump_data.  */
4318 static void
sel_init_new_insn(insn_t insn,int flags,int old_seqno)4319 sel_init_new_insn (insn_t insn, int flags, int old_seqno)
4320 {
4321   /* We create data structures for bb when the first insn is emitted in it.  */
4322   if (INSN_P (insn)
4323       && INSN_IN_STREAM_P (insn)
4324       && insn_is_the_only_one_in_bb_p (insn))
4325     {
4326       extend_bb_info ();
4327       create_initial_data_sets (BLOCK_FOR_INSN (insn));
4328     }
4329 
4330   if (flags & INSN_INIT_TODO_LUID)
4331     {
4332       sched_extend_luids ();
4333       sched_init_insn_luid (insn);
4334     }
4335 
4336   if (flags & INSN_INIT_TODO_SSID)
4337     {
4338       extend_insn_data ();
4339       init_insn_data (insn);
4340       clear_expr (&insn_init_ssid->expr);
4341     }
4342 
4343   if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4344     {
4345       extend_insn_data ();
4346       init_simplejump_data (insn, old_seqno);
4347     }
4348 
4349   gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4350               == CONTAINING_RGN (BB_TO_BLOCK (0)));
4351 }
4352 
4353 
4354 /* Functions to init/finish work with lv sets.  */
4355 
4356 /* Init BB_LV_SET of BB from DF_LR_IN set of BB.  */
4357 static void
init_lv_set(basic_block bb)4358 init_lv_set (basic_block bb)
4359 {
4360   gcc_assert (!BB_LV_SET_VALID_P (bb));
4361 
4362   BB_LV_SET (bb) = get_regset_from_pool ();
4363   COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4364   BB_LV_SET_VALID_P (bb) = true;
4365 }
4366 
4367 /* Copy liveness information to BB from FROM_BB.  */
4368 static void
copy_lv_set_from(basic_block bb,basic_block from_bb)4369 copy_lv_set_from (basic_block bb, basic_block from_bb)
4370 {
4371   gcc_assert (!BB_LV_SET_VALID_P (bb));
4372 
4373   COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4374   BB_LV_SET_VALID_P (bb) = true;
4375 }
4376 
4377 /* Initialize lv set of all bb headers.  */
4378 void
init_lv_sets(void)4379 init_lv_sets (void)
4380 {
4381   basic_block bb;
4382 
4383   /* Initialize of LV sets.  */
4384   FOR_EACH_BB_FN (bb, cfun)
4385     init_lv_set (bb);
4386 
4387   /* Don't forget EXIT_BLOCK.  */
4388   init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4389 }
4390 
4391 /* Release lv set of HEAD.  */
4392 static void
free_lv_set(basic_block bb)4393 free_lv_set (basic_block bb)
4394 {
4395   gcc_assert (BB_LV_SET (bb) != NULL);
4396 
4397   return_regset_to_pool (BB_LV_SET (bb));
4398   BB_LV_SET (bb) = NULL;
4399   BB_LV_SET_VALID_P (bb) = false;
4400 }
4401 
4402 /* Finalize lv sets of all bb headers.  */
4403 void
free_lv_sets(void)4404 free_lv_sets (void)
4405 {
4406   basic_block bb;
4407 
4408   /* Don't forget EXIT_BLOCK.  */
4409   free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4410 
4411   /* Free LV sets.  */
4412   FOR_EACH_BB_FN (bb, cfun)
4413     if (BB_LV_SET (bb))
4414       free_lv_set (bb);
4415 }
4416 
4417 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4418    compute_av() processes BB.  This function is called when creating new basic
4419    blocks, as well as for blocks (either new or existing) where new jumps are
4420    created when the control flow is being updated.  */
4421 static void
invalidate_av_set(basic_block bb)4422 invalidate_av_set (basic_block bb)
4423 {
4424   BB_AV_LEVEL (bb) = -1;
4425 }
4426 
4427 /* Create initial data sets for BB (they will be invalid).  */
4428 static void
create_initial_data_sets(basic_block bb)4429 create_initial_data_sets (basic_block bb)
4430 {
4431   if (BB_LV_SET (bb))
4432     BB_LV_SET_VALID_P (bb) = false;
4433   else
4434     BB_LV_SET (bb) = get_regset_from_pool ();
4435   invalidate_av_set (bb);
4436 }
4437 
4438 /* Free av set of BB.  */
4439 static void
free_av_set(basic_block bb)4440 free_av_set (basic_block bb)
4441 {
4442   av_set_clear (&BB_AV_SET (bb));
4443   BB_AV_LEVEL (bb) = 0;
4444 }
4445 
4446 /* Free data sets of BB.  */
4447 void
free_data_sets(basic_block bb)4448 free_data_sets (basic_block bb)
4449 {
4450   free_lv_set (bb);
4451   free_av_set (bb);
4452 }
4453 
4454 /* Exchange data sets of TO and FROM.  */
4455 void
exchange_data_sets(basic_block to,basic_block from)4456 exchange_data_sets (basic_block to, basic_block from)
4457 {
4458   /* Exchange lv sets of TO and FROM.  */
4459   std::swap (BB_LV_SET (from), BB_LV_SET (to));
4460   std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to));
4461 
4462   /* Exchange av sets of TO and FROM.  */
4463   std::swap (BB_AV_SET (from), BB_AV_SET (to));
4464   std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to));
4465 }
4466 
4467 /* Copy data sets of FROM to TO.  */
4468 void
copy_data_sets(basic_block to,basic_block from)4469 copy_data_sets (basic_block to, basic_block from)
4470 {
4471   gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4472   gcc_assert (BB_AV_SET (to) == NULL);
4473 
4474   BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4475   BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4476 
4477   if (BB_AV_SET_VALID_P (from))
4478     {
4479       BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4480     }
4481   if (BB_LV_SET_VALID_P (from))
4482     {
4483       gcc_assert (BB_LV_SET (to) != NULL);
4484       COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4485     }
4486 }
4487 
4488 /* Return an av set for INSN, if any.  */
4489 av_set_t
get_av_set(insn_t insn)4490 get_av_set (insn_t insn)
4491 {
4492   av_set_t av_set;
4493 
4494   gcc_assert (AV_SET_VALID_P (insn));
4495 
4496   if (sel_bb_head_p (insn))
4497     av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4498   else
4499     av_set = NULL;
4500 
4501   return av_set;
4502 }
4503 
4504 /* Implementation of AV_LEVEL () macro.  Return AV_LEVEL () of INSN.  */
4505 int
get_av_level(insn_t insn)4506 get_av_level (insn_t insn)
4507 {
4508   int av_level;
4509 
4510   gcc_assert (INSN_P (insn));
4511 
4512   if (sel_bb_head_p (insn))
4513     av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4514   else
4515     av_level = INSN_WS_LEVEL (insn);
4516 
4517   return av_level;
4518 }
4519 
4520 
4521 
4522 /* Variables to work with control-flow graph.  */
4523 
4524 /* The basic block that already has been processed by the sched_data_update (),
4525    but hasn't been in sel_add_bb () yet.  */
4526 static vec<basic_block> last_added_blocks;
4527 
4528 /* A pool for allocating successor infos.  */
4529 static struct
4530 {
4531   /* A stack for saving succs_info structures.  */
4532   struct succs_info *stack;
4533 
4534   /* Its size.  */
4535   int size;
4536 
4537   /* Top of the stack.  */
4538   int top;
4539 
4540   /* Maximal value of the top.  */
4541   int max_top;
4542 }  succs_info_pool;
4543 
4544 /* Functions to work with control-flow graph.  */
4545 
4546 /* Return basic block note of BB.  */
4547 rtx_insn *
sel_bb_head(basic_block bb)4548 sel_bb_head (basic_block bb)
4549 {
4550   rtx_insn *head;
4551 
4552   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4553     {
4554       gcc_assert (exit_insn != NULL_RTX);
4555       head = exit_insn;
4556     }
4557   else
4558     {
4559       rtx_note *note = bb_note (bb);
4560       head = next_nonnote_insn (note);
4561 
4562       if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4563 	head = NULL;
4564     }
4565 
4566   return head;
4567 }
4568 
4569 /* Return true if INSN is a basic block header.  */
4570 bool
sel_bb_head_p(insn_t insn)4571 sel_bb_head_p (insn_t insn)
4572 {
4573   return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4574 }
4575 
4576 /* Return last insn of BB.  */
4577 rtx_insn *
sel_bb_end(basic_block bb)4578 sel_bb_end (basic_block bb)
4579 {
4580   if (sel_bb_empty_p (bb))
4581     return NULL;
4582 
4583   gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
4584 
4585   return BB_END (bb);
4586 }
4587 
4588 /* Return true if INSN is the last insn in its basic block.  */
4589 bool
sel_bb_end_p(insn_t insn)4590 sel_bb_end_p (insn_t insn)
4591 {
4592   return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4593 }
4594 
4595 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK.  */
4596 bool
sel_bb_empty_p(basic_block bb)4597 sel_bb_empty_p (basic_block bb)
4598 {
4599   return sel_bb_head (bb) == NULL;
4600 }
4601 
4602 /* True when BB belongs to the current scheduling region.  */
4603 bool
in_current_region_p(basic_block bb)4604 in_current_region_p (basic_block bb)
4605 {
4606   if (bb->index < NUM_FIXED_BLOCKS)
4607     return false;
4608 
4609   return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4610 }
4611 
4612 /* Return the block which is a fallthru bb of a conditional jump JUMP.  */
4613 basic_block
fallthru_bb_of_jump(const rtx_insn * jump)4614 fallthru_bb_of_jump (const rtx_insn *jump)
4615 {
4616   if (!JUMP_P (jump))
4617     return NULL;
4618 
4619   if (!any_condjump_p (jump))
4620     return NULL;
4621 
4622   /* A basic block that ends with a conditional jump may still have one successor
4623      (and be followed by a barrier), we are not interested.  */
4624   if (single_succ_p (BLOCK_FOR_INSN (jump)))
4625     return NULL;
4626 
4627   return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4628 }
4629 
4630 /* Remove all notes from BB.  */
4631 static void
init_bb(basic_block bb)4632 init_bb (basic_block bb)
4633 {
4634   remove_notes (bb_note (bb), BB_END (bb));
4635   BB_NOTE_LIST (bb) = note_list;
4636 }
4637 
4638 void
sel_init_bbs(bb_vec_t bbs)4639 sel_init_bbs (bb_vec_t bbs)
4640 {
4641   const struct sched_scan_info_def ssi =
4642     {
4643       extend_bb_info, /* extend_bb */
4644       init_bb, /* init_bb */
4645       NULL, /* extend_insn */
4646       NULL /* init_insn */
4647     };
4648 
4649   sched_scan (&ssi, bbs);
4650 }
4651 
4652 /* Restore notes for the whole region.  */
4653 static void
sel_restore_notes(void)4654 sel_restore_notes (void)
4655 {
4656   int bb;
4657   insn_t insn;
4658 
4659   for (bb = 0; bb < current_nr_blocks; bb++)
4660     {
4661       basic_block first, last;
4662 
4663       first = EBB_FIRST_BB (bb);
4664       last = EBB_LAST_BB (bb)->next_bb;
4665 
4666       do
4667 	{
4668 	  note_list = BB_NOTE_LIST (first);
4669 	  restore_other_notes (NULL, first);
4670 	  BB_NOTE_LIST (first) = NULL;
4671 
4672 	  FOR_BB_INSNS (first, insn)
4673 	    if (NONDEBUG_INSN_P (insn))
4674 	      reemit_notes (insn);
4675 
4676           first = first->next_bb;
4677 	}
4678       while (first != last);
4679     }
4680 }
4681 
4682 /* Free per-bb data structures.  */
4683 void
sel_finish_bbs(void)4684 sel_finish_bbs (void)
4685 {
4686   sel_restore_notes ();
4687 
4688   /* Remove current loop preheader from this loop.  */
4689   if (current_loop_nest)
4690     sel_remove_loop_preheader ();
4691 
4692   finish_region_bb_info ();
4693 }
4694 
4695 /* Return true if INSN has a single successor of type FLAGS.  */
4696 bool
sel_insn_has_single_succ_p(insn_t insn,int flags)4697 sel_insn_has_single_succ_p (insn_t insn, int flags)
4698 {
4699   insn_t succ;
4700   succ_iterator si;
4701   bool first_p = true;
4702 
4703   FOR_EACH_SUCC_1 (succ, si, insn, flags)
4704     {
4705       if (first_p)
4706 	first_p = false;
4707       else
4708 	return false;
4709     }
4710 
4711   return true;
4712 }
4713 
4714 /* Allocate successor's info.  */
4715 static struct succs_info *
alloc_succs_info(void)4716 alloc_succs_info (void)
4717 {
4718   if (succs_info_pool.top == succs_info_pool.max_top)
4719     {
4720       int i;
4721 
4722       if (++succs_info_pool.max_top >= succs_info_pool.size)
4723         gcc_unreachable ();
4724 
4725       i = ++succs_info_pool.top;
4726       succs_info_pool.stack[i].succs_ok.create (10);
4727       succs_info_pool.stack[i].succs_other.create (10);
4728       succs_info_pool.stack[i].probs_ok.create (10);
4729     }
4730   else
4731     succs_info_pool.top++;
4732 
4733   return &succs_info_pool.stack[succs_info_pool.top];
4734 }
4735 
4736 /* Free successor's info.  */
4737 void
free_succs_info(struct succs_info * sinfo)4738 free_succs_info (struct succs_info * sinfo)
4739 {
4740   gcc_assert (succs_info_pool.top >= 0
4741               && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4742   succs_info_pool.top--;
4743 
4744   /* Clear stale info.  */
4745   sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4746   sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4747   sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
4748   sinfo->all_prob = 0;
4749   sinfo->succs_ok_n = 0;
4750   sinfo->all_succs_n = 0;
4751 }
4752 
4753 /* Compute successor info for INSN.  FLAGS are the flags passed
4754    to the FOR_EACH_SUCC_1 iterator.  */
4755 struct succs_info *
compute_succs_info(insn_t insn,short flags)4756 compute_succs_info (insn_t insn, short flags)
4757 {
4758   succ_iterator si;
4759   insn_t succ;
4760   struct succs_info *sinfo = alloc_succs_info ();
4761 
4762   /* Traverse *all* successors and decide what to do with each.  */
4763   FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4764     {
4765       /* FIXME: this doesn't work for skipping to loop exits, as we don't
4766          perform code motion through inner loops.  */
4767       short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4768 
4769       if (current_flags & flags)
4770         {
4771           sinfo->succs_ok.safe_push (succ);
4772           sinfo->probs_ok.safe_push (
4773 		    /* FIXME: Improve calculation when skipping
4774                        inner loop to exits.  */
4775                     si.bb_end
4776 		    ? (si.e1->probability.initialized_p ()
4777                        ? si.e1->probability.to_reg_br_prob_base ()
4778                        : 0)
4779 		    : REG_BR_PROB_BASE);
4780           sinfo->succs_ok_n++;
4781         }
4782       else
4783         sinfo->succs_other.safe_push (succ);
4784 
4785       /* Compute all_prob.  */
4786       if (!si.bb_end)
4787         sinfo->all_prob = REG_BR_PROB_BASE;
4788       else if (si.e1->probability.initialized_p ())
4789         sinfo->all_prob += si.e1->probability.to_reg_br_prob_base ();
4790 
4791       sinfo->all_succs_n++;
4792     }
4793 
4794   return sinfo;
4795 }
4796 
4797 /* Return the predecessors of BB in PREDS and their number in N.
4798    Empty blocks are skipped.  SIZE is used to allocate PREDS.  */
4799 static void
cfg_preds_1(basic_block bb,insn_t ** preds,int * n,int * size)4800 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4801 {
4802   edge e;
4803   edge_iterator ei;
4804 
4805   gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4806 
4807   FOR_EACH_EDGE (e, ei, bb->preds)
4808     {
4809       basic_block pred_bb = e->src;
4810       insn_t bb_end = BB_END (pred_bb);
4811 
4812       if (!in_current_region_p (pred_bb))
4813 	{
4814 	  gcc_assert (flag_sel_sched_pipelining_outer_loops
4815 		      && current_loop_nest);
4816 	  continue;
4817 	}
4818 
4819       if (sel_bb_empty_p (pred_bb))
4820 	cfg_preds_1 (pred_bb, preds, n, size);
4821       else
4822 	{
4823 	  if (*n == *size)
4824 	    *preds = XRESIZEVEC (insn_t, *preds,
4825                                  (*size = 2 * *size + 1));
4826 	  (*preds)[(*n)++] = bb_end;
4827 	}
4828     }
4829 
4830   gcc_assert (*n != 0
4831 	      || (flag_sel_sched_pipelining_outer_loops
4832 		  && current_loop_nest));
4833 }
4834 
4835 /* Find all predecessors of BB and record them in PREDS and their number
4836    in N.  Empty blocks are skipped, and only normal (forward in-region)
4837    edges are processed.  */
4838 static void
cfg_preds(basic_block bb,insn_t ** preds,int * n)4839 cfg_preds (basic_block bb, insn_t **preds, int *n)
4840 {
4841   int size = 0;
4842 
4843   *preds = NULL;
4844   *n = 0;
4845   cfg_preds_1 (bb, preds, n, &size);
4846 }
4847 
4848 /* Returns true if we are moving INSN through join point.  */
4849 bool
sel_num_cfg_preds_gt_1(insn_t insn)4850 sel_num_cfg_preds_gt_1 (insn_t insn)
4851 {
4852   basic_block bb;
4853 
4854   if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4855     return false;
4856 
4857   bb = BLOCK_FOR_INSN (insn);
4858 
4859   while (1)
4860     {
4861       if (EDGE_COUNT (bb->preds) > 1)
4862 	return true;
4863 
4864       gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4865       bb = EDGE_PRED (bb, 0)->src;
4866 
4867       if (!sel_bb_empty_p (bb))
4868 	break;
4869     }
4870 
4871   return false;
4872 }
4873 
4874 /* Returns true when BB should be the end of an ebb.  Adapted from the
4875    code in sched-ebb.c.  */
4876 bool
bb_ends_ebb_p(basic_block bb)4877 bb_ends_ebb_p (basic_block bb)
4878 {
4879   basic_block next_bb = bb_next_bb (bb);
4880   edge e;
4881 
4882   if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4883       || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4884       || (LABEL_P (BB_HEAD (next_bb))
4885 	  /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4886 	     Work around that.  */
4887 	  && !single_pred_p (next_bb)))
4888     return true;
4889 
4890   if (!in_current_region_p (next_bb))
4891     return true;
4892 
4893   e = find_fallthru_edge (bb->succs);
4894   if (e)
4895     {
4896       gcc_assert (e->dest == next_bb);
4897 
4898       return false;
4899     }
4900 
4901   return true;
4902 }
4903 
4904 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4905    successor of INSN.  */
4906 bool
in_same_ebb_p(insn_t insn,insn_t succ)4907 in_same_ebb_p (insn_t insn, insn_t succ)
4908 {
4909   basic_block ptr = BLOCK_FOR_INSN (insn);
4910 
4911   for (;;)
4912     {
4913       if (ptr == BLOCK_FOR_INSN (succ))
4914         return true;
4915 
4916       if (bb_ends_ebb_p (ptr))
4917         return false;
4918 
4919       ptr = bb_next_bb (ptr);
4920     }
4921 }
4922 
4923 /* Recomputes the reverse topological order for the function and
4924    saves it in REV_TOP_ORDER_INDEX.  REV_TOP_ORDER_INDEX_LEN is also
4925    modified appropriately.  */
4926 static void
recompute_rev_top_order(void)4927 recompute_rev_top_order (void)
4928 {
4929   int *postorder;
4930   int n_blocks, i;
4931 
4932   if (!rev_top_order_index
4933       || rev_top_order_index_len < last_basic_block_for_fn (cfun))
4934     {
4935       rev_top_order_index_len = last_basic_block_for_fn (cfun);
4936       rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4937                                         rev_top_order_index_len);
4938     }
4939 
4940   postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
4941 
4942   n_blocks = post_order_compute (postorder, true, false);
4943   gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
4944 
4945   /* Build reverse function: for each basic block with BB->INDEX == K
4946      rev_top_order_index[K] is it's reverse topological sort number.  */
4947   for (i = 0; i < n_blocks; i++)
4948     {
4949       gcc_assert (postorder[i] < rev_top_order_index_len);
4950       rev_top_order_index[postorder[i]] = i;
4951     }
4952 
4953   free (postorder);
4954 }
4955 
4956 /* Clear all flags from insns in BB that could spoil its rescheduling.  */
4957 void
clear_outdated_rtx_info(basic_block bb)4958 clear_outdated_rtx_info (basic_block bb)
4959 {
4960   rtx_insn *insn;
4961 
4962   FOR_BB_INSNS (bb, insn)
4963     if (INSN_P (insn))
4964       {
4965 	SCHED_GROUP_P (insn) = 0;
4966 	INSN_AFTER_STALL_P (insn) = 0;
4967 	INSN_SCHED_TIMES (insn) = 0;
4968 	EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4969 
4970         /* We cannot use the changed caches, as previously we could ignore
4971            the LHS dependence due to enabled renaming and transform
4972            the expression, and currently we'll be unable to do this.  */
4973         htab_empty (INSN_TRANSFORMED_INSNS (insn));
4974       }
4975 }
4976 
4977 /* Add BB_NOTE to the pool of available basic block notes.  */
4978 static void
return_bb_to_pool(basic_block bb)4979 return_bb_to_pool (basic_block bb)
4980 {
4981   rtx_note *note = bb_note (bb);
4982 
4983   gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4984 	      && bb->aux == NULL);
4985 
4986   /* It turns out that current cfg infrastructure does not support
4987      reuse of basic blocks.  Don't bother for now.  */
4988   /*bb_note_pool.safe_push (note);*/
4989 }
4990 
4991 /* Get a bb_note from pool or return NULL_RTX if pool is empty.  */
4992 static rtx_note *
get_bb_note_from_pool(void)4993 get_bb_note_from_pool (void)
4994 {
4995   if (bb_note_pool.is_empty ())
4996     return NULL;
4997   else
4998     {
4999       rtx_note *note = bb_note_pool.pop ();
5000 
5001       SET_PREV_INSN (note) = NULL_RTX;
5002       SET_NEXT_INSN (note) = NULL_RTX;
5003 
5004       return note;
5005     }
5006 }
5007 
5008 /* Free bb_note_pool.  */
5009 void
free_bb_note_pool(void)5010 free_bb_note_pool (void)
5011 {
5012   bb_note_pool.release ();
5013 }
5014 
5015 /* Setup scheduler pool and successor structure.  */
5016 void
alloc_sched_pools(void)5017 alloc_sched_pools (void)
5018 {
5019   int succs_size;
5020 
5021   succs_size = MAX_WS + 1;
5022   succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
5023   succs_info_pool.size = succs_size;
5024   succs_info_pool.top = -1;
5025   succs_info_pool.max_top = -1;
5026 }
5027 
5028 /* Free the pools.  */
5029 void
free_sched_pools(void)5030 free_sched_pools (void)
5031 {
5032   int i;
5033 
5034   sched_lists_pool.release ();
5035   gcc_assert (succs_info_pool.top == -1);
5036   for (i = 0; i <= succs_info_pool.max_top; i++)
5037     {
5038       succs_info_pool.stack[i].succs_ok.release ();
5039       succs_info_pool.stack[i].succs_other.release ();
5040       succs_info_pool.stack[i].probs_ok.release ();
5041     }
5042   free (succs_info_pool.stack);
5043 }
5044 
5045 
5046 /* Returns a position in RGN where BB can be inserted retaining
5047    topological order.  */
5048 static int
find_place_to_insert_bb(basic_block bb,int rgn)5049 find_place_to_insert_bb (basic_block bb, int rgn)
5050 {
5051   bool has_preds_outside_rgn = false;
5052   edge e;
5053   edge_iterator ei;
5054 
5055   /* Find whether we have preds outside the region.  */
5056   FOR_EACH_EDGE (e, ei, bb->preds)
5057     if (!in_current_region_p (e->src))
5058       {
5059         has_preds_outside_rgn = true;
5060         break;
5061       }
5062 
5063   /* Recompute the top order -- needed when we have > 1 pred
5064      and in case we don't have preds outside.  */
5065   if (flag_sel_sched_pipelining_outer_loops
5066       && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5067     {
5068       int i, bbi = bb->index, cur_bbi;
5069 
5070       recompute_rev_top_order ();
5071       for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5072         {
5073           cur_bbi = BB_TO_BLOCK (i);
5074           if (rev_top_order_index[bbi]
5075               < rev_top_order_index[cur_bbi])
5076             break;
5077         }
5078 
5079       /* We skipped the right block, so we increase i.  We accommodate
5080          it for increasing by step later, so we decrease i.  */
5081       return (i + 1) - 1;
5082     }
5083   else if (has_preds_outside_rgn)
5084     {
5085       /* This is the case when we generate an extra empty block
5086          to serve as region head during pipelining.  */
5087       e = EDGE_SUCC (bb, 0);
5088       gcc_assert (EDGE_COUNT (bb->succs) == 1
5089                   && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5090                   && (BLOCK_TO_BB (e->dest->index) == 0));
5091       return -1;
5092     }
5093 
5094   /* We don't have preds outside the region.  We should have
5095      the only pred, because the multiple preds case comes from
5096      the pipelining of outer loops, and that is handled above.
5097      Just take the bbi of this single pred.  */
5098   if (EDGE_COUNT (bb->succs) > 0)
5099     {
5100       int pred_bbi;
5101 
5102       gcc_assert (EDGE_COUNT (bb->preds) == 1);
5103 
5104       pred_bbi = EDGE_PRED (bb, 0)->src->index;
5105       return BLOCK_TO_BB (pred_bbi);
5106     }
5107   else
5108     /* BB has no successors.  It is safe to put it in the end.  */
5109     return current_nr_blocks - 1;
5110 }
5111 
5112 /* Deletes an empty basic block freeing its data.  */
5113 static void
delete_and_free_basic_block(basic_block bb)5114 delete_and_free_basic_block (basic_block bb)
5115 {
5116   gcc_assert (sel_bb_empty_p (bb));
5117 
5118   if (BB_LV_SET (bb))
5119     free_lv_set (bb);
5120 
5121   bitmap_clear_bit (blocks_to_reschedule, bb->index);
5122 
5123   /* Can't assert av_set properties because we use sel_aremove_bb
5124      when removing loop preheader from the region.  At the point of
5125      removing the preheader we already have deallocated sel_region_bb_info.  */
5126   gcc_assert (BB_LV_SET (bb) == NULL
5127               && !BB_LV_SET_VALID_P (bb)
5128               && BB_AV_LEVEL (bb) == 0
5129               && BB_AV_SET (bb) == NULL);
5130 
5131   delete_basic_block (bb);
5132 }
5133 
5134 /* Add BB to the current region and update the region data.  */
5135 static void
add_block_to_current_region(basic_block bb)5136 add_block_to_current_region (basic_block bb)
5137 {
5138   int i, pos, bbi = -2, rgn;
5139 
5140   rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5141   bbi = find_place_to_insert_bb (bb, rgn);
5142   bbi += 1;
5143   pos = RGN_BLOCKS (rgn) + bbi;
5144 
5145   gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5146               && ebb_head[bbi] == pos);
5147 
5148   /* Make a place for the new block.  */
5149   extend_regions ();
5150 
5151   for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5152     BLOCK_TO_BB (rgn_bb_table[i])++;
5153 
5154   memmove (rgn_bb_table + pos + 1,
5155            rgn_bb_table + pos,
5156            (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5157 
5158   /* Initialize data for BB.  */
5159   rgn_bb_table[pos] = bb->index;
5160   BLOCK_TO_BB (bb->index) = bbi;
5161   CONTAINING_RGN (bb->index) = rgn;
5162 
5163   RGN_NR_BLOCKS (rgn)++;
5164 
5165   for (i = rgn + 1; i <= nr_regions; i++)
5166     RGN_BLOCKS (i)++;
5167 }
5168 
5169 /* Remove BB from the current region and update the region data.  */
5170 static void
remove_bb_from_region(basic_block bb)5171 remove_bb_from_region (basic_block bb)
5172 {
5173   int i, pos, bbi = -2, rgn;
5174 
5175   rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5176   bbi = BLOCK_TO_BB (bb->index);
5177   pos = RGN_BLOCKS (rgn) + bbi;
5178 
5179   gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5180               && ebb_head[bbi] == pos);
5181 
5182   for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5183     BLOCK_TO_BB (rgn_bb_table[i])--;
5184 
5185   memmove (rgn_bb_table + pos,
5186            rgn_bb_table + pos + 1,
5187            (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5188 
5189   RGN_NR_BLOCKS (rgn)--;
5190   for (i = rgn + 1; i <= nr_regions; i++)
5191     RGN_BLOCKS (i)--;
5192 }
5193 
5194 /* Add BB to the current region  and update all data.  If BB is NULL, add all
5195    blocks from last_added_blocks vector.  */
5196 static void
sel_add_bb(basic_block bb)5197 sel_add_bb (basic_block bb)
5198 {
5199   /* Extend luids so that new notes will receive zero luids.  */
5200   sched_extend_luids ();
5201   sched_init_bbs ();
5202   sel_init_bbs (last_added_blocks);
5203 
5204   /* When bb is passed explicitly, the vector should contain
5205      the only element that equals to bb; otherwise, the vector
5206      should not be NULL.  */
5207   gcc_assert (last_added_blocks.exists ());
5208 
5209   if (bb != NULL)
5210     {
5211       gcc_assert (last_added_blocks.length () == 1
5212                   && last_added_blocks[0] == bb);
5213       add_block_to_current_region (bb);
5214 
5215       /* We associate creating/deleting data sets with the first insn
5216          appearing / disappearing in the bb.  */
5217       if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5218 	create_initial_data_sets (bb);
5219 
5220       last_added_blocks.release ();
5221     }
5222   else
5223     /* BB is NULL - process LAST_ADDED_BLOCKS instead.  */
5224     {
5225       int i;
5226       basic_block temp_bb = NULL;
5227 
5228       for (i = 0;
5229            last_added_blocks.iterate (i, &bb); i++)
5230         {
5231           add_block_to_current_region (bb);
5232           temp_bb = bb;
5233         }
5234 
5235       /* We need to fetch at least one bb so we know the region
5236          to update.  */
5237       gcc_assert (temp_bb != NULL);
5238       bb = temp_bb;
5239 
5240       last_added_blocks.release ();
5241     }
5242 
5243   rgn_setup_region (CONTAINING_RGN (bb->index));
5244 }
5245 
5246 /* Remove BB from the current region and update all data.
5247    If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg.  */
5248 static void
sel_remove_bb(basic_block bb,bool remove_from_cfg_p)5249 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5250 {
5251   unsigned idx = bb->index;
5252 
5253   gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5254 
5255   remove_bb_from_region (bb);
5256   return_bb_to_pool (bb);
5257   bitmap_clear_bit (blocks_to_reschedule, idx);
5258 
5259   if (remove_from_cfg_p)
5260     {
5261       basic_block succ = single_succ (bb);
5262       delete_and_free_basic_block (bb);
5263       set_immediate_dominator (CDI_DOMINATORS, succ,
5264                                recompute_dominator (CDI_DOMINATORS, succ));
5265     }
5266 
5267   rgn_setup_region (CONTAINING_RGN (idx));
5268 }
5269 
5270 /* Concatenate info of EMPTY_BB to info of MERGE_BB.  */
5271 static void
move_bb_info(basic_block merge_bb,basic_block empty_bb)5272 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5273 {
5274   if (in_current_region_p (merge_bb))
5275     concat_note_lists (BB_NOTE_LIST (empty_bb),
5276 		       &BB_NOTE_LIST (merge_bb));
5277   BB_NOTE_LIST (empty_bb) = NULL;
5278 
5279 }
5280 
5281 /* Remove EMPTY_BB.  If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5282    region, but keep it in CFG.  */
5283 static void
remove_empty_bb(basic_block empty_bb,bool remove_from_cfg_p)5284 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5285 {
5286   /* The block should contain just a note or a label.
5287      We try to check whether it is unused below.  */
5288   gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5289               || LABEL_P (BB_HEAD (empty_bb)));
5290 
5291   /* If basic block has predecessors or successors, redirect them.  */
5292   if (remove_from_cfg_p
5293       && (EDGE_COUNT (empty_bb->preds) > 0
5294 	  || EDGE_COUNT (empty_bb->succs) > 0))
5295     {
5296       basic_block pred;
5297       basic_block succ;
5298 
5299       /* We need to init PRED and SUCC before redirecting edges.  */
5300       if (EDGE_COUNT (empty_bb->preds) > 0)
5301 	{
5302 	  edge e;
5303 
5304 	  gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5305 
5306 	  e = EDGE_PRED (empty_bb, 0);
5307           gcc_assert (e->src == empty_bb->prev_bb
5308 		      && (e->flags & EDGE_FALLTHRU));
5309 
5310 	  pred = empty_bb->prev_bb;
5311 	}
5312       else
5313 	pred = NULL;
5314 
5315       if (EDGE_COUNT (empty_bb->succs) > 0)
5316 	{
5317           /* We do not check fallthruness here as above, because
5318              after removing a jump the edge may actually be not fallthru.  */
5319 	  gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5320 	  succ = EDGE_SUCC (empty_bb, 0)->dest;
5321 	}
5322       else
5323 	succ = NULL;
5324 
5325       if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5326         {
5327           edge e = EDGE_PRED (empty_bb, 0);
5328 
5329           if (e->flags & EDGE_FALLTHRU)
5330             redirect_edge_succ_nodup (e, succ);
5331           else
5332             sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5333         }
5334 
5335       if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5336 	{
5337 	  edge e = EDGE_SUCC (empty_bb, 0);
5338 
5339 	  if (find_edge (pred, e->dest) == NULL)
5340 	    redirect_edge_pred (e, pred);
5341 	}
5342     }
5343 
5344   /* Finish removing.  */
5345   sel_remove_bb (empty_bb, remove_from_cfg_p);
5346 }
5347 
5348 /* An implementation of create_basic_block hook, which additionally updates
5349    per-bb data structures.  */
5350 static basic_block
sel_create_basic_block(void * headp,void * endp,basic_block after)5351 sel_create_basic_block (void *headp, void *endp, basic_block after)
5352 {
5353   basic_block new_bb;
5354   rtx_note *new_bb_note;
5355 
5356   gcc_assert (flag_sel_sched_pipelining_outer_loops
5357               || !last_added_blocks.exists ());
5358 
5359   new_bb_note = get_bb_note_from_pool ();
5360 
5361   if (new_bb_note == NULL_RTX)
5362     new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5363   else
5364     {
5365       new_bb = create_basic_block_structure ((rtx_insn *) headp,
5366 					     (rtx_insn *) endp,
5367 					     new_bb_note, after);
5368       new_bb->aux = NULL;
5369     }
5370 
5371   last_added_blocks.safe_push (new_bb);
5372 
5373   return new_bb;
5374 }
5375 
5376 /* Implement sched_init_only_bb ().  */
5377 static void
sel_init_only_bb(basic_block bb,basic_block after)5378 sel_init_only_bb (basic_block bb, basic_block after)
5379 {
5380   gcc_assert (after == NULL);
5381 
5382   extend_regions ();
5383   rgn_make_new_region_out_of_new_block (bb);
5384 }
5385 
5386 /* Update the latch when we've splitted or merged it from FROM block to TO.
5387    This should be checked for all outer loops, too.  */
5388 static void
change_loops_latches(basic_block from,basic_block to)5389 change_loops_latches (basic_block from, basic_block to)
5390 {
5391   gcc_assert (from != to);
5392 
5393   if (current_loop_nest)
5394     {
5395       class loop *loop;
5396 
5397       for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5398         if (considered_for_pipelining_p (loop) && loop->latch == from)
5399           {
5400             gcc_assert (loop == current_loop_nest);
5401             loop->latch = to;
5402             gcc_assert (loop_latch_edge (loop));
5403           }
5404     }
5405 }
5406 
5407 /* Splits BB on two basic blocks, adding it to the region and extending
5408    per-bb data structures.  Returns the newly created bb.  */
5409 static basic_block
sel_split_block(basic_block bb,rtx after)5410 sel_split_block (basic_block bb, rtx after)
5411 {
5412   basic_block new_bb;
5413   insn_t insn;
5414 
5415   new_bb = sched_split_block_1 (bb, after);
5416   sel_add_bb (new_bb);
5417 
5418   /* This should be called after sel_add_bb, because this uses
5419      CONTAINING_RGN for the new block, which is not yet initialized.
5420      FIXME: this function may be a no-op now.  */
5421   change_loops_latches (bb, new_bb);
5422 
5423   /* Update ORIG_BB_INDEX for insns moved into the new block.  */
5424   FOR_BB_INSNS (new_bb, insn)
5425    if (INSN_P (insn))
5426      EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5427 
5428   if (sel_bb_empty_p (bb))
5429     {
5430       gcc_assert (!sel_bb_empty_p (new_bb));
5431 
5432       /* NEW_BB has data sets that need to be updated and BB holds
5433 	 data sets that should be removed.  Exchange these data sets
5434 	 so that we won't lose BB's valid data sets.  */
5435       exchange_data_sets (new_bb, bb);
5436       free_data_sets (bb);
5437     }
5438 
5439   if (!sel_bb_empty_p (new_bb)
5440       && bitmap_bit_p (blocks_to_reschedule, bb->index))
5441     bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5442 
5443   return new_bb;
5444 }
5445 
5446 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5447    Otherwise returns NULL.  */
5448 static rtx_insn *
check_for_new_jump(basic_block bb,int prev_max_uid)5449 check_for_new_jump (basic_block bb, int prev_max_uid)
5450 {
5451   rtx_insn *end;
5452 
5453   end = sel_bb_end (bb);
5454   if (end && INSN_UID (end) >= prev_max_uid)
5455     return end;
5456   return NULL;
5457 }
5458 
5459 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5460    New means having UID at least equal to PREV_MAX_UID.  */
5461 static rtx_insn *
find_new_jump(basic_block from,basic_block jump_bb,int prev_max_uid)5462 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5463 {
5464   rtx_insn *jump;
5465 
5466   /* Return immediately if no new insns were emitted.  */
5467   if (get_max_uid () == prev_max_uid)
5468     return NULL;
5469 
5470   /* Now check both blocks for new jumps.  It will ever be only one.  */
5471   if ((jump = check_for_new_jump (from, prev_max_uid)))
5472     return jump;
5473 
5474   if (jump_bb != NULL
5475       && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5476     return jump;
5477   return NULL;
5478 }
5479 
5480 /* Splits E and adds the newly created basic block to the current region.
5481    Returns this basic block.  */
5482 basic_block
sel_split_edge(edge e)5483 sel_split_edge (edge e)
5484 {
5485   basic_block new_bb, src, other_bb = NULL;
5486   int prev_max_uid;
5487   rtx_insn *jump;
5488 
5489   src = e->src;
5490   prev_max_uid = get_max_uid ();
5491   new_bb = split_edge (e);
5492 
5493   if (flag_sel_sched_pipelining_outer_loops
5494       && current_loop_nest)
5495     {
5496       int i;
5497       basic_block bb;
5498 
5499       /* Some of the basic blocks might not have been added to the loop.
5500          Add them here, until this is fixed in force_fallthru.  */
5501       for (i = 0;
5502            last_added_blocks.iterate (i, &bb); i++)
5503         if (!bb->loop_father)
5504           {
5505             add_bb_to_loop (bb, e->dest->loop_father);
5506 
5507             gcc_assert (!other_bb && (new_bb->index != bb->index));
5508             other_bb = bb;
5509           }
5510     }
5511 
5512   /* Add all last_added_blocks to the region.  */
5513   sel_add_bb (NULL);
5514 
5515   jump = find_new_jump (src, new_bb, prev_max_uid);
5516   if (jump)
5517     sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5518 
5519   /* Put the correct lv set on this block.  */
5520   if (other_bb && !sel_bb_empty_p (other_bb))
5521     compute_live (sel_bb_head (other_bb));
5522 
5523   return new_bb;
5524 }
5525 
5526 /* Implement sched_create_empty_bb ().  */
5527 static basic_block
sel_create_empty_bb(basic_block after)5528 sel_create_empty_bb (basic_block after)
5529 {
5530   basic_block new_bb;
5531 
5532   new_bb = sched_create_empty_bb_1 (after);
5533 
5534   /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5535      later.  */
5536   gcc_assert (last_added_blocks.length () == 1
5537 	      && last_added_blocks[0] == new_bb);
5538 
5539   last_added_blocks.release ();
5540   return new_bb;
5541 }
5542 
5543 /* Implement sched_create_recovery_block.  ORIG_INSN is where block
5544    will be splitted to insert a check.  */
5545 basic_block
sel_create_recovery_block(insn_t orig_insn)5546 sel_create_recovery_block (insn_t orig_insn)
5547 {
5548   basic_block first_bb, second_bb, recovery_block;
5549   basic_block before_recovery = NULL;
5550   rtx_insn *jump;
5551 
5552   first_bb = BLOCK_FOR_INSN (orig_insn);
5553   if (sel_bb_end_p (orig_insn))
5554     {
5555       /* Avoid introducing an empty block while splitting.  */
5556       gcc_assert (single_succ_p (first_bb));
5557       second_bb = single_succ (first_bb);
5558     }
5559   else
5560     second_bb = sched_split_block (first_bb, orig_insn);
5561 
5562   recovery_block = sched_create_recovery_block (&before_recovery);
5563   if (before_recovery)
5564     copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
5565 
5566   gcc_assert (sel_bb_empty_p (recovery_block));
5567   sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5568   if (current_loops != NULL)
5569     add_bb_to_loop (recovery_block, first_bb->loop_father);
5570 
5571   sel_add_bb (recovery_block);
5572 
5573   jump = BB_END (recovery_block);
5574   gcc_assert (sel_bb_head (recovery_block) == jump);
5575   sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5576 
5577   return recovery_block;
5578 }
5579 
5580 /* Merge basic block B into basic block A.  */
5581 static void
sel_merge_blocks(basic_block a,basic_block b)5582 sel_merge_blocks (basic_block a, basic_block b)
5583 {
5584   gcc_assert (sel_bb_empty_p (b)
5585               && EDGE_COUNT (b->preds) == 1
5586               && EDGE_PRED (b, 0)->src == b->prev_bb);
5587 
5588   move_bb_info (b->prev_bb, b);
5589   remove_empty_bb (b, false);
5590   merge_blocks (a, b);
5591   change_loops_latches (b, a);
5592 }
5593 
5594 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5595    data structures for possibly created bb and insns.  */
5596 void
sel_redirect_edge_and_branch_force(edge e,basic_block to)5597 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5598 {
5599   basic_block jump_bb, src, orig_dest = e->dest;
5600   int prev_max_uid;
5601   rtx_insn *jump;
5602   int old_seqno = -1;
5603 
5604   /* This function is now used only for bookkeeping code creation, where
5605      we'll never get the single pred of orig_dest block and thus will not
5606      hit unreachable blocks when updating dominator info.  */
5607   gcc_assert (!sel_bb_empty_p (e->src)
5608               && !single_pred_p (orig_dest));
5609   src = e->src;
5610   prev_max_uid = get_max_uid ();
5611   /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5612      when the conditional jump being redirected may become unconditional.  */
5613   if (any_condjump_p (BB_END (src))
5614       && INSN_SEQNO (BB_END (src)) >= 0)
5615     old_seqno = INSN_SEQNO (BB_END (src));
5616 
5617   jump_bb = redirect_edge_and_branch_force (e, to);
5618   if (jump_bb != NULL)
5619     sel_add_bb (jump_bb);
5620 
5621   /* This function could not be used to spoil the loop structure by now,
5622      thus we don't care to update anything.  But check it to be sure.  */
5623   if (current_loop_nest
5624       && pipelining_p)
5625     gcc_assert (loop_latch_edge (current_loop_nest));
5626 
5627   jump = find_new_jump (src, jump_bb, prev_max_uid);
5628   if (jump)
5629     sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP,
5630 		       old_seqno);
5631   set_immediate_dominator (CDI_DOMINATORS, to,
5632 			   recompute_dominator (CDI_DOMINATORS, to));
5633   set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5634 			   recompute_dominator (CDI_DOMINATORS, orig_dest));
5635   if (jump && sel_bb_head_p (jump))
5636     compute_live (jump);
5637 }
5638 
5639 /* A wrapper for redirect_edge_and_branch.  Return TRUE if blocks connected by
5640    redirected edge are in reverse topological order.  */
5641 bool
sel_redirect_edge_and_branch(edge e,basic_block to)5642 sel_redirect_edge_and_branch (edge e, basic_block to)
5643 {
5644   bool latch_edge_p;
5645   basic_block src, orig_dest = e->dest;
5646   int prev_max_uid;
5647   rtx_insn *jump;
5648   edge redirected;
5649   bool recompute_toporder_p = false;
5650   bool maybe_unreachable = single_pred_p (orig_dest);
5651   int old_seqno = -1;
5652 
5653   latch_edge_p = (pipelining_p
5654                   && current_loop_nest
5655                   && e == loop_latch_edge (current_loop_nest));
5656 
5657   src = e->src;
5658   prev_max_uid = get_max_uid ();
5659 
5660   /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5661      when the conditional jump being redirected may become unconditional.  */
5662   if (any_condjump_p (BB_END (src))
5663       && INSN_SEQNO (BB_END (src)) >= 0)
5664     old_seqno = INSN_SEQNO (BB_END (src));
5665 
5666   redirected = redirect_edge_and_branch (e, to);
5667 
5668   gcc_assert (redirected && !last_added_blocks.exists ());
5669 
5670   /* When we've redirected a latch edge, update the header.  */
5671   if (latch_edge_p)
5672     {
5673       current_loop_nest->header = to;
5674       gcc_assert (loop_latch_edge (current_loop_nest));
5675     }
5676 
5677   /* In rare situations, the topological relation between the blocks connected
5678      by the redirected edge can change (see PR42245 for an example).  Update
5679      block_to_bb/bb_to_block.  */
5680   if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5681       && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5682     recompute_toporder_p = true;
5683 
5684   jump = find_new_jump (src, NULL, prev_max_uid);
5685   if (jump)
5686     sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno);
5687 
5688   /* Only update dominator info when we don't have unreachable blocks.
5689      Otherwise we'll update in maybe_tidy_empty_bb.  */
5690   if (!maybe_unreachable)
5691     {
5692       set_immediate_dominator (CDI_DOMINATORS, to,
5693                                recompute_dominator (CDI_DOMINATORS, to));
5694       set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5695                                recompute_dominator (CDI_DOMINATORS, orig_dest));
5696     }
5697   if (jump && sel_bb_head_p (jump))
5698     compute_live (jump);
5699   return recompute_toporder_p;
5700 }
5701 
5702 /* This variable holds the cfg hooks used by the selective scheduler.  */
5703 static struct cfg_hooks sel_cfg_hooks;
5704 
5705 /* Register sel-sched cfg hooks.  */
5706 void
sel_register_cfg_hooks(void)5707 sel_register_cfg_hooks (void)
5708 {
5709   sched_split_block = sel_split_block;
5710 
5711   orig_cfg_hooks = get_cfg_hooks ();
5712   sel_cfg_hooks = orig_cfg_hooks;
5713 
5714   sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5715 
5716   set_cfg_hooks (sel_cfg_hooks);
5717 
5718   sched_init_only_bb = sel_init_only_bb;
5719   sched_split_block = sel_split_block;
5720   sched_create_empty_bb = sel_create_empty_bb;
5721 }
5722 
5723 /* Unregister sel-sched cfg hooks.  */
5724 void
sel_unregister_cfg_hooks(void)5725 sel_unregister_cfg_hooks (void)
5726 {
5727   sched_create_empty_bb = NULL;
5728   sched_split_block = NULL;
5729   sched_init_only_bb = NULL;
5730 
5731   set_cfg_hooks (orig_cfg_hooks);
5732 }
5733 
5734 
5735 /* Emit an insn rtx based on PATTERN.  If a jump insn is wanted,
5736    LABEL is where this jump should be directed.  */
5737 rtx_insn *
create_insn_rtx_from_pattern(rtx pattern,rtx label)5738 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5739 {
5740   rtx_insn *insn_rtx;
5741 
5742   gcc_assert (!INSN_P (pattern));
5743 
5744   start_sequence ();
5745 
5746   if (label == NULL_RTX)
5747     insn_rtx = emit_insn (pattern);
5748   else if (DEBUG_INSN_P (label))
5749     insn_rtx = emit_debug_insn (pattern);
5750   else
5751     {
5752       insn_rtx = emit_jump_insn (pattern);
5753       JUMP_LABEL (insn_rtx) = label;
5754       ++LABEL_NUSES (label);
5755     }
5756 
5757   end_sequence ();
5758 
5759   sched_extend_luids ();
5760   sched_extend_target ();
5761   sched_deps_init (false);
5762 
5763   /* Initialize INSN_CODE now.  */
5764   recog_memoized (insn_rtx);
5765   return insn_rtx;
5766 }
5767 
5768 /* Create a new vinsn for INSN_RTX.  FORCE_UNIQUE_P is true when the vinsn
5769    must not be clonable.  */
5770 vinsn_t
create_vinsn_from_insn_rtx(rtx_insn * insn_rtx,bool force_unique_p)5771 create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p)
5772 {
5773   gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5774 
5775   /* If VINSN_TYPE is not USE, retain its uniqueness.  */
5776   return vinsn_create (insn_rtx, force_unique_p);
5777 }
5778 
5779 /* Create a copy of INSN_RTX.  */
5780 rtx_insn *
create_copy_of_insn_rtx(rtx insn_rtx)5781 create_copy_of_insn_rtx (rtx insn_rtx)
5782 {
5783   rtx_insn *res;
5784   rtx link;
5785 
5786   if (DEBUG_INSN_P (insn_rtx))
5787     return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5788 					 insn_rtx);
5789 
5790   gcc_assert (NONJUMP_INSN_P (insn_rtx));
5791 
5792   res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5793                                       NULL_RTX);
5794 
5795   /* Locate the end of existing REG_NOTES in NEW_RTX.  */
5796   rtx *ptail = &REG_NOTES (res);
5797   while (*ptail != NULL_RTX)
5798     ptail = &XEXP (*ptail, 1);
5799 
5800   /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5801      since mark_jump_label will make them.  REG_LABEL_TARGETs are created
5802      there too, but are supposed to be sticky, so we copy them.  */
5803   for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5804     if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5805 	&& REG_NOTE_KIND (link) != REG_EQUAL
5806 	&& REG_NOTE_KIND (link) != REG_EQUIV)
5807       {
5808 	*ptail = duplicate_reg_note (link);
5809 	ptail = &XEXP (*ptail, 1);
5810       }
5811 
5812   return res;
5813 }
5814 
5815 /* Change vinsn field of EXPR to hold NEW_VINSN.  */
5816 void
change_vinsn_in_expr(expr_t expr,vinsn_t new_vinsn)5817 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5818 {
5819   vinsn_detach (EXPR_VINSN (expr));
5820 
5821   EXPR_VINSN (expr) = new_vinsn;
5822   vinsn_attach (new_vinsn);
5823 }
5824 
5825 /* Helpers for global init.  */
5826 /* This structure is used to be able to call existing bundling mechanism
5827    and calculate insn priorities.  */
5828 static struct haifa_sched_info sched_sel_haifa_sched_info =
5829 {
5830   NULL, /* init_ready_list */
5831   NULL, /* can_schedule_ready_p */
5832   NULL, /* schedule_more_p */
5833   NULL, /* new_ready */
5834   NULL, /* rgn_rank */
5835   sel_print_insn, /* rgn_print_insn */
5836   contributes_to_priority,
5837   NULL, /* insn_finishes_block_p */
5838 
5839   NULL, NULL,
5840   NULL, NULL,
5841   0, 0,
5842 
5843   NULL, /* add_remove_insn */
5844   NULL, /* begin_schedule_ready */
5845   NULL, /* begin_move_insn */
5846   NULL, /* advance_target_bb */
5847 
5848   NULL,
5849   NULL,
5850 
5851   SEL_SCHED | NEW_BBS
5852 };
5853 
5854 /* Setup special insns used in the scheduler.  */
5855 void
setup_nop_and_exit_insns(void)5856 setup_nop_and_exit_insns (void)
5857 {
5858   gcc_assert (nop_pattern == NULL_RTX
5859 	      && exit_insn == NULL_RTX);
5860 
5861   nop_pattern = constm1_rtx;
5862 
5863   start_sequence ();
5864   emit_insn (nop_pattern);
5865   exit_insn = get_insns ();
5866   end_sequence ();
5867   set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
5868 }
5869 
5870 /* Free special insns used in the scheduler.  */
5871 void
free_nop_and_exit_insns(void)5872 free_nop_and_exit_insns (void)
5873 {
5874   exit_insn = NULL;
5875   nop_pattern = NULL_RTX;
5876 }
5877 
5878 /* Setup a special vinsn used in new insns initialization.  */
5879 void
setup_nop_vinsn(void)5880 setup_nop_vinsn (void)
5881 {
5882   nop_vinsn = vinsn_create (exit_insn, false);
5883   vinsn_attach (nop_vinsn);
5884 }
5885 
5886 /* Free a special vinsn used in new insns initialization.  */
5887 void
free_nop_vinsn(void)5888 free_nop_vinsn (void)
5889 {
5890   gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5891   vinsn_detach (nop_vinsn);
5892   nop_vinsn = NULL;
5893 }
5894 
5895 /* Call a set_sched_flags hook.  */
5896 void
sel_set_sched_flags(void)5897 sel_set_sched_flags (void)
5898 {
5899   /* ??? This means that set_sched_flags were called, and we decided to
5900      support speculation.  However, set_sched_flags also modifies flags
5901      on current_sched_info, doing this only at global init.  And we
5902      sometimes change c_s_i later.  So put the correct flags again.  */
5903   if (spec_info && targetm.sched.set_sched_flags)
5904     targetm.sched.set_sched_flags (spec_info);
5905 }
5906 
5907 /* Setup pointers to global sched info structures.  */
5908 void
sel_setup_sched_infos(void)5909 sel_setup_sched_infos (void)
5910 {
5911   rgn_setup_common_sched_info ();
5912 
5913   memcpy (&sel_common_sched_info, common_sched_info,
5914 	  sizeof (sel_common_sched_info));
5915 
5916   sel_common_sched_info.fix_recovery_cfg = NULL;
5917   sel_common_sched_info.add_block = NULL;
5918   sel_common_sched_info.estimate_number_of_insns
5919     = sel_estimate_number_of_insns;
5920   sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5921   sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5922 
5923   common_sched_info = &sel_common_sched_info;
5924 
5925   current_sched_info = &sched_sel_haifa_sched_info;
5926   current_sched_info->sched_max_insns_priority =
5927     get_rgn_sched_max_insns_priority ();
5928 
5929   sel_set_sched_flags ();
5930 }
5931 
5932 
5933 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5934    *BB_ORD_INDEX after that is increased.  */
5935 static void
sel_add_block_to_region(basic_block bb,int * bb_ord_index,int rgn)5936 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5937 {
5938   RGN_NR_BLOCKS (rgn) += 1;
5939   RGN_DONT_CALC_DEPS (rgn) = 0;
5940   RGN_HAS_REAL_EBB (rgn) = 0;
5941   CONTAINING_RGN (bb->index) = rgn;
5942   BLOCK_TO_BB (bb->index) = *bb_ord_index;
5943   rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5944   (*bb_ord_index)++;
5945 
5946   /* FIXME: it is true only when not scheduling ebbs.  */
5947   RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5948 }
5949 
5950 /* Functions to support pipelining of outer loops.  */
5951 
5952 /* Creates a new empty region and returns it's number.  */
5953 static int
sel_create_new_region(void)5954 sel_create_new_region (void)
5955 {
5956   int new_rgn_number = nr_regions;
5957 
5958   RGN_NR_BLOCKS (new_rgn_number) = 0;
5959 
5960   /* FIXME: This will work only when EBBs are not created.  */
5961   if (new_rgn_number != 0)
5962     RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5963       RGN_NR_BLOCKS (new_rgn_number - 1);
5964   else
5965     RGN_BLOCKS (new_rgn_number) = 0;
5966 
5967   /* Set the blocks of the next region so the other functions may
5968      calculate the number of blocks in the region.  */
5969   RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5970     RGN_NR_BLOCKS (new_rgn_number);
5971 
5972   nr_regions++;
5973 
5974   return new_rgn_number;
5975 }
5976 
5977 /* If X has a smaller topological sort number than Y, returns -1;
5978    if greater, returns 1.  */
5979 static int
bb_top_order_comparator(const void * x,const void * y)5980 bb_top_order_comparator (const void *x, const void *y)
5981 {
5982   basic_block bb1 = *(const basic_block *) x;
5983   basic_block bb2 = *(const basic_block *) y;
5984 
5985   gcc_assert (bb1 == bb2
5986 	      || rev_top_order_index[bb1->index]
5987 		 != rev_top_order_index[bb2->index]);
5988 
5989   /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5990      bbs with greater number should go earlier.  */
5991   if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5992     return -1;
5993   else
5994     return 1;
5995 }
5996 
5997 /* Create a region for LOOP and return its number.  If we don't want
5998    to pipeline LOOP, return -1.  */
5999 static int
make_region_from_loop(class loop * loop)6000 make_region_from_loop (class loop *loop)
6001 {
6002   unsigned int i;
6003   int new_rgn_number = -1;
6004   class loop *inner;
6005 
6006   /* Basic block index, to be assigned to BLOCK_TO_BB.  */
6007   int bb_ord_index = 0;
6008   basic_block *loop_blocks;
6009   basic_block preheader_block;
6010 
6011   if (loop->num_nodes
6012       > (unsigned) param_max_pipeline_region_blocks)
6013     return -1;
6014 
6015   /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
6016   for (inner = loop->inner; inner; inner = inner->inner)
6017     if (flow_bb_inside_loop_p (inner, loop->latch))
6018       return -1;
6019 
6020   loop->ninsns = num_loop_insns (loop);
6021   if ((int) loop->ninsns > param_max_pipeline_region_insns)
6022     return -1;
6023 
6024   loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
6025 
6026   for (i = 0; i < loop->num_nodes; i++)
6027     if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
6028       {
6029 	free (loop_blocks);
6030 	return -1;
6031       }
6032 
6033   preheader_block = loop_preheader_edge (loop)->src;
6034   gcc_assert (preheader_block);
6035   gcc_assert (loop_blocks[0] == loop->header);
6036 
6037   new_rgn_number = sel_create_new_region ();
6038 
6039   sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
6040   bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
6041 
6042   for (i = 0; i < loop->num_nodes; i++)
6043     {
6044       /* Add only those blocks that haven't been scheduled in the inner loop.
6045 	 The exception is the basic blocks with bookkeeping code - they should
6046 	 be added to the region (and they actually don't belong to the loop
6047 	 body, but to the region containing that loop body).  */
6048 
6049       gcc_assert (new_rgn_number >= 0);
6050 
6051       if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
6052 	{
6053 	  sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
6054                                    new_rgn_number);
6055 	  bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
6056 	}
6057     }
6058 
6059   free (loop_blocks);
6060   MARK_LOOP_FOR_PIPELINING (loop);
6061 
6062   return new_rgn_number;
6063 }
6064 
6065 /* Create a new region from preheader blocks LOOP_BLOCKS.  */
6066 void
make_region_from_loop_preheader(vec<basic_block> * & loop_blocks)6067 make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
6068 {
6069   unsigned int i;
6070   int new_rgn_number = -1;
6071   basic_block bb;
6072 
6073   /* Basic block index, to be assigned to BLOCK_TO_BB.  */
6074   int bb_ord_index = 0;
6075 
6076   new_rgn_number = sel_create_new_region ();
6077 
6078   FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
6079     {
6080       gcc_assert (new_rgn_number >= 0);
6081 
6082       sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6083     }
6084 
6085   vec_free (loop_blocks);
6086 }
6087 
6088 
6089 /* Create region(s) from loop nest LOOP, such that inner loops will be
6090    pipelined before outer loops.  Returns true when a region for LOOP
6091    is created.  */
6092 static bool
make_regions_from_loop_nest(class loop * loop)6093 make_regions_from_loop_nest (class loop *loop)
6094 {
6095   class loop *cur_loop;
6096   int rgn_number;
6097 
6098   /* Traverse all inner nodes of the loop.  */
6099   for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6100     if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
6101       return false;
6102 
6103   /* At this moment all regular inner loops should have been pipelined.
6104      Try to create a region from this loop.  */
6105   rgn_number = make_region_from_loop (loop);
6106 
6107   if (rgn_number < 0)
6108     return false;
6109 
6110   loop_nests.safe_push (loop);
6111   return true;
6112 }
6113 
6114 /* Initalize data structures needed.  */
6115 void
sel_init_pipelining(void)6116 sel_init_pipelining (void)
6117 {
6118   /* Collect loop information to be used in outer loops pipelining.  */
6119   loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6120                        | LOOPS_HAVE_FALLTHRU_PREHEADERS
6121 		       | LOOPS_HAVE_RECORDED_EXITS
6122 		       | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6123   current_loop_nest = NULL;
6124 
6125   bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
6126   bitmap_clear (bbs_in_loop_rgns);
6127 
6128   recompute_rev_top_order ();
6129 }
6130 
6131 /* Returns a class loop for region RGN.  */
6132 loop_p
get_loop_nest_for_rgn(unsigned int rgn)6133 get_loop_nest_for_rgn (unsigned int rgn)
6134 {
6135   /* Regions created with extend_rgns don't have corresponding loop nests,
6136      because they don't represent loops.  */
6137   if (rgn < loop_nests.length ())
6138     return loop_nests[rgn];
6139   else
6140     return NULL;
6141 }
6142 
6143 /* True when LOOP was included into pipelining regions.   */
6144 bool
considered_for_pipelining_p(class loop * loop)6145 considered_for_pipelining_p (class loop *loop)
6146 {
6147   if (loop_depth (loop) == 0)
6148     return false;
6149 
6150   /* Now, the loop could be too large or irreducible.  Check whether its
6151      region is in LOOP_NESTS.
6152      We determine the region number of LOOP as the region number of its
6153      latch.  We can't use header here, because this header could be
6154      just removed preheader and it will give us the wrong region number.
6155      Latch can't be used because it could be in the inner loop too.  */
6156   if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6157     {
6158       int rgn = CONTAINING_RGN (loop->latch->index);
6159 
6160       gcc_assert ((unsigned) rgn < loop_nests.length ());
6161       return true;
6162     }
6163 
6164   return false;
6165 }
6166 
6167 /* Makes regions from the rest of the blocks, after loops are chosen
6168    for pipelining.  */
6169 static void
make_regions_from_the_rest(void)6170 make_regions_from_the_rest (void)
6171 {
6172   int cur_rgn_blocks;
6173   int *loop_hdr;
6174   int i;
6175 
6176   basic_block bb;
6177   edge e;
6178   edge_iterator ei;
6179   int *degree;
6180 
6181   /* Index in rgn_bb_table where to start allocating new regions.  */
6182   cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6183 
6184   /* Make regions from all the rest basic blocks - those that don't belong to
6185      any loop or belong to irreducible loops.  Prepare the data structures
6186      for extend_rgns.  */
6187 
6188   /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6189      LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6190      loop.  */
6191   loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
6192   degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6193 
6194 
6195   /* For each basic block that belongs to some loop assign the number
6196      of innermost loop it belongs to.  */
6197   for (i = 0; i < last_basic_block_for_fn (cfun); i++)
6198     loop_hdr[i] = -1;
6199 
6200   FOR_EACH_BB_FN (bb, cfun)
6201     {
6202       if (bb->loop_father && bb->loop_father->num != 0
6203 	  && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6204 	loop_hdr[bb->index] = bb->loop_father->num;
6205     }
6206 
6207   /* For each basic block degree is calculated as the number of incoming
6208      edges, that are going out of bbs that are not yet scheduled.
6209      The basic blocks that are scheduled have degree value of zero.  */
6210   FOR_EACH_BB_FN (bb, cfun)
6211     {
6212       degree[bb->index] = 0;
6213 
6214       if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
6215 	{
6216 	  FOR_EACH_EDGE (e, ei, bb->preds)
6217 	    if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
6218 	      degree[bb->index]++;
6219 	}
6220       else
6221 	degree[bb->index] = -1;
6222     }
6223 
6224   extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6225 
6226   /* Any block that did not end up in a region is placed into a region
6227      by itself.  */
6228   FOR_EACH_BB_FN (bb, cfun)
6229     if (degree[bb->index] >= 0)
6230       {
6231 	rgn_bb_table[cur_rgn_blocks] = bb->index;
6232 	RGN_NR_BLOCKS (nr_regions) = 1;
6233 	RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6234         RGN_DONT_CALC_DEPS (nr_regions) = 0;
6235 	RGN_HAS_REAL_EBB (nr_regions) = 0;
6236 	CONTAINING_RGN (bb->index) = nr_regions++;
6237 	BLOCK_TO_BB (bb->index) = 0;
6238       }
6239 
6240   free (degree);
6241   free (loop_hdr);
6242 }
6243 
6244 /* Free data structures used in pipelining of loops.  */
sel_finish_pipelining(void)6245 void sel_finish_pipelining (void)
6246 {
6247   /* Release aux fields so we don't free them later by mistake.  */
6248   for (auto loop : loops_list (cfun, 0))
6249     loop->aux = NULL;
6250 
6251   loop_optimizer_finalize ();
6252 
6253   loop_nests.release ();
6254 
6255   free (rev_top_order_index);
6256   rev_top_order_index = NULL;
6257 }
6258 
6259 /* This function replaces the find_rgns when
6260    FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set.  */
6261 void
sel_find_rgns(void)6262 sel_find_rgns (void)
6263 {
6264   sel_init_pipelining ();
6265   extend_regions ();
6266 
6267   if (current_loops)
6268     {
6269       unsigned flags = flag_sel_sched_pipelining_outer_loops
6270 			 ? LI_FROM_INNERMOST
6271 			 : LI_ONLY_INNERMOST;
6272 
6273       for (auto loop : loops_list (cfun, flags))
6274 	make_regions_from_loop_nest (loop);
6275     }
6276 
6277   /* Make regions from all the rest basic blocks and schedule them.
6278      These blocks include blocks that don't belong to any loop or belong
6279      to irreducible loops.  */
6280   make_regions_from_the_rest ();
6281 
6282   /* We don't need bbs_in_loop_rgns anymore.  */
6283   sbitmap_free (bbs_in_loop_rgns);
6284   bbs_in_loop_rgns = NULL;
6285 }
6286 
6287 /* Add the preheader blocks from previous loop to current region taking
6288    it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6289    This function is only used with -fsel-sched-pipelining-outer-loops.  */
6290 void
sel_add_loop_preheaders(bb_vec_t * bbs)6291 sel_add_loop_preheaders (bb_vec_t *bbs)
6292 {
6293   int i;
6294   basic_block bb;
6295   vec<basic_block> *preheader_blocks
6296     = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6297 
6298   if (!preheader_blocks)
6299     return;
6300 
6301   for (i = 0; preheader_blocks->iterate (i, &bb); i++)
6302     {
6303       bbs->safe_push (bb);
6304       last_added_blocks.safe_push (bb);
6305       sel_add_bb (bb);
6306     }
6307 
6308   vec_free (preheader_blocks);
6309 }
6310 
6311 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6312    Please note that the function should also work when pipelining_p is
6313    false, because it is used when deciding whether we should or should
6314    not reschedule pipelined code.  */
6315 bool
sel_is_loop_preheader_p(basic_block bb)6316 sel_is_loop_preheader_p (basic_block bb)
6317 {
6318   if (current_loop_nest)
6319     {
6320       class loop *outer;
6321 
6322       if (preheader_removed)
6323         return false;
6324 
6325       /* Preheader is the first block in the region.  */
6326       if (BLOCK_TO_BB (bb->index) == 0)
6327         return true;
6328 
6329       /* We used to find a preheader with the topological information.
6330          Check that the above code is equivalent to what we did before.  */
6331 
6332       if (in_current_region_p (current_loop_nest->header))
6333 	gcc_assert (!(BLOCK_TO_BB (bb->index)
6334 		      < BLOCK_TO_BB (current_loop_nest->header->index)));
6335 
6336       /* Support the situation when the latch block of outer loop
6337          could be from here.  */
6338       for (outer = loop_outer (current_loop_nest);
6339 	   outer;
6340 	   outer = loop_outer (outer))
6341         if (considered_for_pipelining_p (outer) && outer->latch == bb)
6342           gcc_unreachable ();
6343     }
6344 
6345   return false;
6346 }
6347 
6348 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6349    can be removed, making the corresponding edge fallthrough (assuming that
6350    all basic blocks between JUMP_BB and DEST_BB are empty).  */
6351 static bool
bb_has_removable_jump_to_p(basic_block jump_bb,basic_block dest_bb)6352 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6353 {
6354   if (!onlyjump_p (BB_END (jump_bb))
6355       || tablejump_p (BB_END (jump_bb), NULL, NULL))
6356     return false;
6357 
6358   /* Several outgoing edges, abnormal edge or destination of jump is
6359      not DEST_BB.  */
6360   if (EDGE_COUNT (jump_bb->succs) != 1
6361       || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6362       || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6363     return false;
6364 
6365   /* If not anything of the upper.  */
6366   return true;
6367 }
6368 
6369 /* Removes the loop preheader from the current region and saves it in
6370    PREHEADER_BLOCKS of the father loop, so they will be added later to
6371    region that represents an outer loop.  */
6372 static void
sel_remove_loop_preheader(void)6373 sel_remove_loop_preheader (void)
6374 {
6375   int i, old_len;
6376   int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6377   basic_block bb;
6378   bool all_empty_p = true;
6379   vec<basic_block> *preheader_blocks
6380     = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6381 
6382   vec_check_alloc (preheader_blocks, 0);
6383 
6384   gcc_assert (current_loop_nest);
6385   old_len = preheader_blocks->length ();
6386 
6387   /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS.  */
6388   for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6389     {
6390       bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
6391 
6392       /* If the basic block belongs to region, but doesn't belong to
6393 	 corresponding loop, then it should be a preheader.  */
6394       if (sel_is_loop_preheader_p (bb))
6395         {
6396           preheader_blocks->safe_push (bb);
6397           if (BB_END (bb) != bb_note (bb))
6398             all_empty_p = false;
6399         }
6400     }
6401 
6402   /* Remove these blocks only after iterating over the whole region.  */
6403   for (i = preheader_blocks->length () - 1; i >= old_len; i--)
6404     {
6405       bb =  (*preheader_blocks)[i];
6406       sel_remove_bb (bb, false);
6407     }
6408 
6409   if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6410     {
6411       if (!all_empty_p)
6412         /* Immediately create new region from preheader.  */
6413         make_region_from_loop_preheader (preheader_blocks);
6414       else
6415         {
6416           /* If all preheader blocks are empty - dont create new empty region.
6417              Instead, remove them completely.  */
6418           FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
6419             {
6420               edge e;
6421               edge_iterator ei;
6422               basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6423 
6424               /* Redirect all incoming edges to next basic block.  */
6425               for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6426                 {
6427                   if (! (e->flags & EDGE_FALLTHRU))
6428                     redirect_edge_and_branch (e, bb->next_bb);
6429                   else
6430                     redirect_edge_succ (e, bb->next_bb);
6431                 }
6432               gcc_assert (BB_NOTE_LIST (bb) == NULL);
6433               delete_and_free_basic_block (bb);
6434 
6435               /* Check if after deleting preheader there is a nonconditional
6436                  jump in PREV_BB that leads to the next basic block NEXT_BB.
6437                  If it is so - delete this jump and clear data sets of its
6438                  basic block if it becomes empty.  */
6439 	      if (next_bb->prev_bb == prev_bb
6440 		  && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
6441                   && bb_has_removable_jump_to_p (prev_bb, next_bb))
6442                 {
6443                   redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6444                   if (BB_END (prev_bb) == bb_note (prev_bb))
6445                     free_data_sets (prev_bb);
6446                 }
6447 
6448               set_immediate_dominator (CDI_DOMINATORS, next_bb,
6449                                        recompute_dominator (CDI_DOMINATORS,
6450                                                             next_bb));
6451             }
6452         }
6453       vec_free (preheader_blocks);
6454     }
6455   else
6456     /* Store preheader within the father's loop structure.  */
6457     SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6458 			       preheader_blocks);
6459 }
6460 
6461 #endif
6462