1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "df.h"
28 #include "tm_p.h"
29 #include "cfgrtl.h"
30 #include "cfganal.h"
31 #include "cfgbuild.h"
32 #include "insn-config.h"
33 #include "insn-attr.h"
34 #include "recog.h"
35 #include "params.h"
36 #include "target.h"
37 #include "sched-int.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39
40 #ifdef INSN_SCHEDULING
41 #include "regset.h"
42 #include "cfgloop.h"
43 #include "sel-sched-ir.h"
44 /* We don't have to use it except for sel_print_insn. */
45 #include "sel-sched-dump.h"
46
47 /* A vector holding bb info for whole scheduling pass. */
48 vec<sel_global_bb_info_def>
49 sel_global_bb_info = vNULL;
50
51 /* A vector holding bb info. */
52 vec<sel_region_bb_info_def>
53 sel_region_bb_info = vNULL;
54
55 /* A pool for allocating all lists. */
56 object_allocator<_list_node> sched_lists_pool ("sel-sched-lists");
57
58 /* This contains information about successors for compute_av_set. */
59 struct succs_info current_succs;
60
61 /* Data structure to describe interaction with the generic scheduler utils. */
62 static struct common_sched_info_def sel_common_sched_info;
63
64 /* The loop nest being pipelined. */
65 struct loop *current_loop_nest;
66
67 /* LOOP_NESTS is a vector containing the corresponding loop nest for
68 each region. */
69 static vec<loop_p> loop_nests = vNULL;
70
71 /* Saves blocks already in loop regions, indexed by bb->index. */
72 static sbitmap bbs_in_loop_rgns = NULL;
73
74 /* CFG hooks that are saved before changing create_basic_block hook. */
75 static struct cfg_hooks orig_cfg_hooks;
76
77
78 /* Array containing reverse topological index of function basic blocks,
79 indexed by BB->INDEX. */
80 static int *rev_top_order_index = NULL;
81
82 /* Length of the above array. */
83 static int rev_top_order_index_len = -1;
84
85 /* A regset pool structure. */
86 static struct
87 {
88 /* The stack to which regsets are returned. */
89 regset *v;
90
91 /* Its pointer. */
92 int n;
93
94 /* Its size. */
95 int s;
96
97 /* In VV we save all generated regsets so that, when destructing the
98 pool, we can compare it with V and check that every regset was returned
99 back to pool. */
100 regset *vv;
101
102 /* The pointer of VV stack. */
103 int nn;
104
105 /* Its size. */
106 int ss;
107
108 /* The difference between allocated and returned regsets. */
109 int diff;
110 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
111
112 /* This represents the nop pool. */
113 static struct
114 {
115 /* The vector which holds previously emitted nops. */
116 insn_t *v;
117
118 /* Its pointer. */
119 int n;
120
121 /* Its size. */
122 int s;
123 } nop_pool = { NULL, 0, 0 };
124
125 /* The pool for basic block notes. */
126 static vec<rtx_note *> bb_note_pool;
127
128 /* A NOP pattern used to emit placeholder insns. */
129 rtx nop_pattern = NULL_RTX;
130 /* A special instruction that resides in EXIT_BLOCK.
131 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
132 rtx_insn *exit_insn = NULL;
133
134 /* TRUE if while scheduling current region, which is loop, its preheader
135 was removed. */
136 bool preheader_removed = false;
137
138
139 /* Forward static declarations. */
140 static void fence_clear (fence_t);
141
142 static void deps_init_id (idata_t, insn_t, bool);
143 static void init_id_from_df (idata_t, insn_t, bool);
144 static expr_t set_insn_init (expr_t, vinsn_t, int);
145
146 static void cfg_preds (basic_block, insn_t **, int *);
147 static void prepare_insn_expr (insn_t, int);
148 static void free_history_vect (vec<expr_history_def> &);
149
150 static void move_bb_info (basic_block, basic_block);
151 static void remove_empty_bb (basic_block, bool);
152 static void sel_merge_blocks (basic_block, basic_block);
153 static void sel_remove_loop_preheader (void);
154 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
155
156 static bool insn_is_the_only_one_in_bb_p (insn_t);
157 static void create_initial_data_sets (basic_block);
158
159 static void free_av_set (basic_block);
160 static void invalidate_av_set (basic_block);
161 static void extend_insn_data (void);
162 static void sel_init_new_insn (insn_t, int, int = -1);
163 static void finish_insns (void);
164
165 /* Various list functions. */
166
167 /* Copy an instruction list L. */
168 ilist_t
ilist_copy(ilist_t l)169 ilist_copy (ilist_t l)
170 {
171 ilist_t head = NULL, *tailp = &head;
172
173 while (l)
174 {
175 ilist_add (tailp, ILIST_INSN (l));
176 tailp = &ILIST_NEXT (*tailp);
177 l = ILIST_NEXT (l);
178 }
179
180 return head;
181 }
182
183 /* Invert an instruction list L. */
184 ilist_t
ilist_invert(ilist_t l)185 ilist_invert (ilist_t l)
186 {
187 ilist_t res = NULL;
188
189 while (l)
190 {
191 ilist_add (&res, ILIST_INSN (l));
192 l = ILIST_NEXT (l);
193 }
194
195 return res;
196 }
197
198 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
199 void
blist_add(blist_t * lp,insn_t to,ilist_t ptr,deps_t dc)200 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
201 {
202 bnd_t bnd;
203
204 _list_add (lp);
205 bnd = BLIST_BND (*lp);
206
207 BND_TO (bnd) = to;
208 BND_PTR (bnd) = ptr;
209 BND_AV (bnd) = NULL;
210 BND_AV1 (bnd) = NULL;
211 BND_DC (bnd) = dc;
212 }
213
214 /* Remove the list note pointed to by LP. */
215 void
blist_remove(blist_t * lp)216 blist_remove (blist_t *lp)
217 {
218 bnd_t b = BLIST_BND (*lp);
219
220 av_set_clear (&BND_AV (b));
221 av_set_clear (&BND_AV1 (b));
222 ilist_clear (&BND_PTR (b));
223
224 _list_remove (lp);
225 }
226
227 /* Init a fence tail L. */
228 void
flist_tail_init(flist_tail_t l)229 flist_tail_init (flist_tail_t l)
230 {
231 FLIST_TAIL_HEAD (l) = NULL;
232 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
233 }
234
235 /* Try to find fence corresponding to INSN in L. */
236 fence_t
flist_lookup(flist_t l,insn_t insn)237 flist_lookup (flist_t l, insn_t insn)
238 {
239 while (l)
240 {
241 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
242 return FLIST_FENCE (l);
243
244 l = FLIST_NEXT (l);
245 }
246
247 return NULL;
248 }
249
250 /* Init the fields of F before running fill_insns. */
251 static void
init_fence_for_scheduling(fence_t f)252 init_fence_for_scheduling (fence_t f)
253 {
254 FENCE_BNDS (f) = NULL;
255 FENCE_PROCESSED_P (f) = false;
256 FENCE_SCHEDULED_P (f) = false;
257 }
258
259 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
260 static void
flist_add(flist_t * lp,insn_t insn,state_t state,deps_t dc,void * tc,insn_t last_scheduled_insn,vec<rtx_insn *,va_gc> * executing_insns,int * ready_ticks,int ready_ticks_size,insn_t sched_next,int cycle,int cycle_issued_insns,int issue_more,bool starts_cycle_p,bool after_stall_p)261 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
262 insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns,
263 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
264 int cycle, int cycle_issued_insns, int issue_more,
265 bool starts_cycle_p, bool after_stall_p)
266 {
267 fence_t f;
268
269 _list_add (lp);
270 f = FLIST_FENCE (*lp);
271
272 FENCE_INSN (f) = insn;
273
274 gcc_assert (state != NULL);
275 FENCE_STATE (f) = state;
276
277 FENCE_CYCLE (f) = cycle;
278 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
279 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
280 FENCE_AFTER_STALL_P (f) = after_stall_p;
281
282 gcc_assert (dc != NULL);
283 FENCE_DC (f) = dc;
284
285 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
286 FENCE_TC (f) = tc;
287
288 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
289 FENCE_ISSUE_MORE (f) = issue_more;
290 FENCE_EXECUTING_INSNS (f) = executing_insns;
291 FENCE_READY_TICKS (f) = ready_ticks;
292 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
293 FENCE_SCHED_NEXT (f) = sched_next;
294
295 init_fence_for_scheduling (f);
296 }
297
298 /* Remove the head node of the list pointed to by LP. */
299 static void
flist_remove(flist_t * lp)300 flist_remove (flist_t *lp)
301 {
302 if (FENCE_INSN (FLIST_FENCE (*lp)))
303 fence_clear (FLIST_FENCE (*lp));
304 _list_remove (lp);
305 }
306
307 /* Clear the fence list pointed to by LP. */
308 void
flist_clear(flist_t * lp)309 flist_clear (flist_t *lp)
310 {
311 while (*lp)
312 flist_remove (lp);
313 }
314
315 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
316 void
def_list_add(def_list_t * dl,insn_t original_insn,bool crosses_call)317 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
318 {
319 def_t d;
320
321 _list_add (dl);
322 d = DEF_LIST_DEF (*dl);
323
324 d->orig_insn = original_insn;
325 d->crosses_call = crosses_call;
326 }
327
328
329 /* Functions to work with target contexts. */
330
331 /* Bulk target context. It is convenient for debugging purposes to ensure
332 that there are no uninitialized (null) target contexts. */
333 static tc_t bulk_tc = (tc_t) 1;
334
335 /* Target hooks wrappers. In the future we can provide some default
336 implementations for them. */
337
338 /* Allocate a store for the target context. */
339 static tc_t
alloc_target_context(void)340 alloc_target_context (void)
341 {
342 return (targetm.sched.alloc_sched_context
343 ? targetm.sched.alloc_sched_context () : bulk_tc);
344 }
345
346 /* Init target context TC.
347 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
348 Overwise, copy current backend context to TC. */
349 static void
init_target_context(tc_t tc,bool clean_p)350 init_target_context (tc_t tc, bool clean_p)
351 {
352 if (targetm.sched.init_sched_context)
353 targetm.sched.init_sched_context (tc, clean_p);
354 }
355
356 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
357 int init_target_context (). */
358 tc_t
create_target_context(bool clean_p)359 create_target_context (bool clean_p)
360 {
361 tc_t tc = alloc_target_context ();
362
363 init_target_context (tc, clean_p);
364 return tc;
365 }
366
367 /* Copy TC to the current backend context. */
368 void
set_target_context(tc_t tc)369 set_target_context (tc_t tc)
370 {
371 if (targetm.sched.set_sched_context)
372 targetm.sched.set_sched_context (tc);
373 }
374
375 /* TC is about to be destroyed. Free any internal data. */
376 static void
clear_target_context(tc_t tc)377 clear_target_context (tc_t tc)
378 {
379 if (targetm.sched.clear_sched_context)
380 targetm.sched.clear_sched_context (tc);
381 }
382
383 /* Clear and free it. */
384 static void
delete_target_context(tc_t tc)385 delete_target_context (tc_t tc)
386 {
387 clear_target_context (tc);
388
389 if (targetm.sched.free_sched_context)
390 targetm.sched.free_sched_context (tc);
391 }
392
393 /* Make a copy of FROM in TO.
394 NB: May be this should be a hook. */
395 static void
copy_target_context(tc_t to,tc_t from)396 copy_target_context (tc_t to, tc_t from)
397 {
398 tc_t tmp = create_target_context (false);
399
400 set_target_context (from);
401 init_target_context (to, false);
402
403 set_target_context (tmp);
404 delete_target_context (tmp);
405 }
406
407 /* Create a copy of TC. */
408 static tc_t
create_copy_of_target_context(tc_t tc)409 create_copy_of_target_context (tc_t tc)
410 {
411 tc_t copy = alloc_target_context ();
412
413 copy_target_context (copy, tc);
414
415 return copy;
416 }
417
418 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
419 is the same as in init_target_context (). */
420 void
reset_target_context(tc_t tc,bool clean_p)421 reset_target_context (tc_t tc, bool clean_p)
422 {
423 clear_target_context (tc);
424 init_target_context (tc, clean_p);
425 }
426
427 /* Functions to work with dependence contexts.
428 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
429 context. It accumulates information about processed insns to decide if
430 current insn is dependent on the processed ones. */
431
432 /* Make a copy of FROM in TO. */
433 static void
copy_deps_context(deps_t to,deps_t from)434 copy_deps_context (deps_t to, deps_t from)
435 {
436 init_deps (to, false);
437 deps_join (to, from);
438 }
439
440 /* Allocate store for dep context. */
441 static deps_t
alloc_deps_context(void)442 alloc_deps_context (void)
443 {
444 return XNEW (struct deps_desc);
445 }
446
447 /* Allocate and initialize dep context. */
448 static deps_t
create_deps_context(void)449 create_deps_context (void)
450 {
451 deps_t dc = alloc_deps_context ();
452
453 init_deps (dc, false);
454 return dc;
455 }
456
457 /* Create a copy of FROM. */
458 static deps_t
create_copy_of_deps_context(deps_t from)459 create_copy_of_deps_context (deps_t from)
460 {
461 deps_t to = alloc_deps_context ();
462
463 copy_deps_context (to, from);
464 return to;
465 }
466
467 /* Clean up internal data of DC. */
468 static void
clear_deps_context(deps_t dc)469 clear_deps_context (deps_t dc)
470 {
471 free_deps (dc);
472 }
473
474 /* Clear and free DC. */
475 static void
delete_deps_context(deps_t dc)476 delete_deps_context (deps_t dc)
477 {
478 clear_deps_context (dc);
479 free (dc);
480 }
481
482 /* Clear and init DC. */
483 static void
reset_deps_context(deps_t dc)484 reset_deps_context (deps_t dc)
485 {
486 clear_deps_context (dc);
487 init_deps (dc, false);
488 }
489
490 /* This structure describes the dependence analysis hooks for advancing
491 dependence context. */
492 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
493 {
494 NULL,
495
496 NULL, /* start_insn */
497 NULL, /* finish_insn */
498 NULL, /* start_lhs */
499 NULL, /* finish_lhs */
500 NULL, /* start_rhs */
501 NULL, /* finish_rhs */
502 haifa_note_reg_set,
503 haifa_note_reg_clobber,
504 haifa_note_reg_use,
505 NULL, /* note_mem_dep */
506 NULL, /* note_dep */
507
508 0, 0, 0
509 };
510
511 /* Process INSN and add its impact on DC. */
512 void
advance_deps_context(deps_t dc,insn_t insn)513 advance_deps_context (deps_t dc, insn_t insn)
514 {
515 sched_deps_info = &advance_deps_context_sched_deps_info;
516 deps_analyze_insn (dc, insn);
517 }
518
519
520 /* Functions to work with DFA states. */
521
522 /* Allocate store for a DFA state. */
523 static state_t
state_alloc(void)524 state_alloc (void)
525 {
526 return xmalloc (dfa_state_size);
527 }
528
529 /* Allocate and initialize DFA state. */
530 static state_t
state_create(void)531 state_create (void)
532 {
533 state_t state = state_alloc ();
534
535 state_reset (state);
536 advance_state (state);
537 return state;
538 }
539
540 /* Free DFA state. */
541 static void
state_free(state_t state)542 state_free (state_t state)
543 {
544 free (state);
545 }
546
547 /* Make a copy of FROM in TO. */
548 static void
state_copy(state_t to,state_t from)549 state_copy (state_t to, state_t from)
550 {
551 memcpy (to, from, dfa_state_size);
552 }
553
554 /* Create a copy of FROM. */
555 static state_t
state_create_copy(state_t from)556 state_create_copy (state_t from)
557 {
558 state_t to = state_alloc ();
559
560 state_copy (to, from);
561 return to;
562 }
563
564
565 /* Functions to work with fences. */
566
567 /* Clear the fence. */
568 static void
fence_clear(fence_t f)569 fence_clear (fence_t f)
570 {
571 state_t s = FENCE_STATE (f);
572 deps_t dc = FENCE_DC (f);
573 void *tc = FENCE_TC (f);
574
575 ilist_clear (&FENCE_BNDS (f));
576
577 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
578 || (s == NULL && dc == NULL && tc == NULL));
579
580 free (s);
581
582 if (dc != NULL)
583 delete_deps_context (dc);
584
585 if (tc != NULL)
586 delete_target_context (tc);
587 vec_free (FENCE_EXECUTING_INSNS (f));
588 free (FENCE_READY_TICKS (f));
589 FENCE_READY_TICKS (f) = NULL;
590 }
591
592 /* Init a list of fences with successors of OLD_FENCE. */
593 void
init_fences(insn_t old_fence)594 init_fences (insn_t old_fence)
595 {
596 insn_t succ;
597 succ_iterator si;
598 bool first = true;
599 int ready_ticks_size = get_max_uid () + 1;
600
601 FOR_EACH_SUCC_1 (succ, si, old_fence,
602 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
603 {
604
605 if (first)
606 first = false;
607 else
608 gcc_assert (flag_sel_sched_pipelining_outer_loops);
609
610 flist_add (&fences, succ,
611 state_create (),
612 create_deps_context () /* dc */,
613 create_target_context (true) /* tc */,
614 NULL /* last_scheduled_insn */,
615 NULL, /* executing_insns */
616 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
617 ready_ticks_size,
618 NULL /* sched_next */,
619 1 /* cycle */, 0 /* cycle_issued_insns */,
620 issue_rate, /* issue_more */
621 1 /* starts_cycle_p */, 0 /* after_stall_p */);
622 }
623 }
624
625 /* Merges two fences (filling fields of fence F with resulting values) by
626 following rules: 1) state, target context and last scheduled insn are
627 propagated from fallthrough edge if it is available;
628 2) deps context and cycle is propagated from more probable edge;
629 3) all other fields are set to corresponding constant values.
630
631 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
632 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
633 and AFTER_STALL_P are the corresponding fields of the second fence. */
634 static void
merge_fences(fence_t f,insn_t insn,state_t state,deps_t dc,void * tc,rtx_insn * last_scheduled_insn,vec<rtx_insn *,va_gc> * executing_insns,int * ready_ticks,int ready_ticks_size,rtx sched_next,int cycle,int issue_more,bool after_stall_p)635 merge_fences (fence_t f, insn_t insn,
636 state_t state, deps_t dc, void *tc,
637 rtx_insn *last_scheduled_insn,
638 vec<rtx_insn *, va_gc> *executing_insns,
639 int *ready_ticks, int ready_ticks_size,
640 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
641 {
642 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
643
644 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
645 && !sched_next && !FENCE_SCHED_NEXT (f));
646
647 /* Check if we can decide which path fences came.
648 If we can't (or don't want to) - reset all. */
649 if (last_scheduled_insn == NULL
650 || last_scheduled_insn_old == NULL
651 /* This is a case when INSN is reachable on several paths from
652 one insn (this can happen when pipelining of outer loops is on and
653 there are two edges: one going around of inner loop and the other -
654 right through it; in such case just reset everything). */
655 || last_scheduled_insn == last_scheduled_insn_old)
656 {
657 state_reset (FENCE_STATE (f));
658 state_free (state);
659
660 reset_deps_context (FENCE_DC (f));
661 delete_deps_context (dc);
662
663 reset_target_context (FENCE_TC (f), true);
664 delete_target_context (tc);
665
666 if (cycle > FENCE_CYCLE (f))
667 FENCE_CYCLE (f) = cycle;
668
669 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
670 FENCE_ISSUE_MORE (f) = issue_rate;
671 vec_free (executing_insns);
672 free (ready_ticks);
673 if (FENCE_EXECUTING_INSNS (f))
674 FENCE_EXECUTING_INSNS (f)->block_remove (0,
675 FENCE_EXECUTING_INSNS (f)->length ());
676 if (FENCE_READY_TICKS (f))
677 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
678 }
679 else
680 {
681 edge edge_old = NULL, edge_new = NULL;
682 edge candidate;
683 succ_iterator si;
684 insn_t succ;
685
686 /* Find fallthrough edge. */
687 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
688 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
689
690 if (!candidate
691 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
692 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
693 {
694 /* No fallthrough edge leading to basic block of INSN. */
695 state_reset (FENCE_STATE (f));
696 state_free (state);
697
698 reset_target_context (FENCE_TC (f), true);
699 delete_target_context (tc);
700
701 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
702 FENCE_ISSUE_MORE (f) = issue_rate;
703 }
704 else
705 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
706 {
707 /* Would be weird if same insn is successor of several fallthrough
708 edges. */
709 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
710 != BLOCK_FOR_INSN (last_scheduled_insn_old));
711
712 state_free (FENCE_STATE (f));
713 FENCE_STATE (f) = state;
714
715 delete_target_context (FENCE_TC (f));
716 FENCE_TC (f) = tc;
717
718 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
719 FENCE_ISSUE_MORE (f) = issue_more;
720 }
721 else
722 {
723 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
724 state_free (state);
725 delete_target_context (tc);
726
727 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
728 != BLOCK_FOR_INSN (last_scheduled_insn));
729 }
730
731 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
732 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
733 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
734 {
735 if (succ == insn)
736 {
737 /* No same successor allowed from several edges. */
738 gcc_assert (!edge_old);
739 edge_old = si.e1;
740 }
741 }
742 /* Find edge of second predecessor (last_scheduled_insn->insn). */
743 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
744 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
745 {
746 if (succ == insn)
747 {
748 /* No same successor allowed from several edges. */
749 gcc_assert (!edge_new);
750 edge_new = si.e1;
751 }
752 }
753
754 /* Check if we can choose most probable predecessor. */
755 if (edge_old == NULL || edge_new == NULL)
756 {
757 reset_deps_context (FENCE_DC (f));
758 delete_deps_context (dc);
759 vec_free (executing_insns);
760 free (ready_ticks);
761
762 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
763 if (FENCE_EXECUTING_INSNS (f))
764 FENCE_EXECUTING_INSNS (f)->block_remove (0,
765 FENCE_EXECUTING_INSNS (f)->length ());
766 if (FENCE_READY_TICKS (f))
767 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
768 }
769 else
770 if (edge_new->probability > edge_old->probability)
771 {
772 delete_deps_context (FENCE_DC (f));
773 FENCE_DC (f) = dc;
774 vec_free (FENCE_EXECUTING_INSNS (f));
775 FENCE_EXECUTING_INSNS (f) = executing_insns;
776 free (FENCE_READY_TICKS (f));
777 FENCE_READY_TICKS (f) = ready_ticks;
778 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
779 FENCE_CYCLE (f) = cycle;
780 }
781 else
782 {
783 /* Leave DC and CYCLE untouched. */
784 delete_deps_context (dc);
785 vec_free (executing_insns);
786 free (ready_ticks);
787 }
788 }
789
790 /* Fill remaining invariant fields. */
791 if (after_stall_p)
792 FENCE_AFTER_STALL_P (f) = 1;
793
794 FENCE_ISSUED_INSNS (f) = 0;
795 FENCE_STARTS_CYCLE_P (f) = 1;
796 FENCE_SCHED_NEXT (f) = NULL;
797 }
798
799 /* Add a new fence to NEW_FENCES list, initializing it from all
800 other parameters. */
801 static void
add_to_fences(flist_tail_t new_fences,insn_t insn,state_t state,deps_t dc,void * tc,rtx_insn * last_scheduled_insn,vec<rtx_insn *,va_gc> * executing_insns,int * ready_ticks,int ready_ticks_size,rtx_insn * sched_next,int cycle,int cycle_issued_insns,int issue_rate,bool starts_cycle_p,bool after_stall_p)802 add_to_fences (flist_tail_t new_fences, insn_t insn,
803 state_t state, deps_t dc, void *tc,
804 rtx_insn *last_scheduled_insn,
805 vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks,
806 int ready_ticks_size, rtx_insn *sched_next, int cycle,
807 int cycle_issued_insns, int issue_rate,
808 bool starts_cycle_p, bool after_stall_p)
809 {
810 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
811
812 if (! f)
813 {
814 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
815 last_scheduled_insn, executing_insns, ready_ticks,
816 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
817 issue_rate, starts_cycle_p, after_stall_p);
818
819 FLIST_TAIL_TAILP (new_fences)
820 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
821 }
822 else
823 {
824 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
825 executing_insns, ready_ticks, ready_ticks_size,
826 sched_next, cycle, issue_rate, after_stall_p);
827 }
828 }
829
830 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
831 void
move_fence_to_fences(flist_t old_fences,flist_tail_t new_fences)832 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
833 {
834 fence_t f, old;
835 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
836
837 old = FLIST_FENCE (old_fences);
838 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
839 FENCE_INSN (FLIST_FENCE (old_fences)));
840 if (f)
841 {
842 merge_fences (f, old->insn, old->state, old->dc, old->tc,
843 old->last_scheduled_insn, old->executing_insns,
844 old->ready_ticks, old->ready_ticks_size,
845 old->sched_next, old->cycle, old->issue_more,
846 old->after_stall_p);
847 }
848 else
849 {
850 _list_add (tailp);
851 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
852 *FLIST_FENCE (*tailp) = *old;
853 init_fence_for_scheduling (FLIST_FENCE (*tailp));
854 }
855 FENCE_INSN (old) = NULL;
856 }
857
858 /* Add a new fence to NEW_FENCES list and initialize most of its data
859 as a clean one. */
860 void
add_clean_fence_to_fences(flist_tail_t new_fences,insn_t succ,fence_t fence)861 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
862 {
863 int ready_ticks_size = get_max_uid () + 1;
864
865 add_to_fences (new_fences,
866 succ, state_create (), create_deps_context (),
867 create_target_context (true),
868 NULL, NULL,
869 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
870 NULL, FENCE_CYCLE (fence) + 1,
871 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
872 }
873
874 /* Add a new fence to NEW_FENCES list and initialize all of its data
875 from FENCE and SUCC. */
876 void
add_dirty_fence_to_fences(flist_tail_t new_fences,insn_t succ,fence_t fence)877 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
878 {
879 int * new_ready_ticks
880 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
881
882 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
883 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
884 add_to_fences (new_fences,
885 succ, state_create_copy (FENCE_STATE (fence)),
886 create_copy_of_deps_context (FENCE_DC (fence)),
887 create_copy_of_target_context (FENCE_TC (fence)),
888 FENCE_LAST_SCHEDULED_INSN (fence),
889 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
890 new_ready_ticks,
891 FENCE_READY_TICKS_SIZE (fence),
892 FENCE_SCHED_NEXT (fence),
893 FENCE_CYCLE (fence),
894 FENCE_ISSUED_INSNS (fence),
895 FENCE_ISSUE_MORE (fence),
896 FENCE_STARTS_CYCLE_P (fence),
897 FENCE_AFTER_STALL_P (fence));
898 }
899
900
901 /* Functions to work with regset and nop pools. */
902
903 /* Returns the new regset from pool. It might have some of the bits set
904 from the previous usage. */
905 regset
get_regset_from_pool(void)906 get_regset_from_pool (void)
907 {
908 regset rs;
909
910 if (regset_pool.n != 0)
911 rs = regset_pool.v[--regset_pool.n];
912 else
913 /* We need to create the regset. */
914 {
915 rs = ALLOC_REG_SET (®_obstack);
916
917 if (regset_pool.nn == regset_pool.ss)
918 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
919 (regset_pool.ss = 2 * regset_pool.ss + 1));
920 regset_pool.vv[regset_pool.nn++] = rs;
921 }
922
923 regset_pool.diff++;
924
925 return rs;
926 }
927
928 /* Same as above, but returns the empty regset. */
929 regset
get_clear_regset_from_pool(void)930 get_clear_regset_from_pool (void)
931 {
932 regset rs = get_regset_from_pool ();
933
934 CLEAR_REG_SET (rs);
935 return rs;
936 }
937
938 /* Return regset RS to the pool for future use. */
939 void
return_regset_to_pool(regset rs)940 return_regset_to_pool (regset rs)
941 {
942 gcc_assert (rs);
943 regset_pool.diff--;
944
945 if (regset_pool.n == regset_pool.s)
946 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
947 (regset_pool.s = 2 * regset_pool.s + 1));
948 regset_pool.v[regset_pool.n++] = rs;
949 }
950
951 /* This is used as a qsort callback for sorting regset pool stacks.
952 X and XX are addresses of two regsets. They are never equal. */
953 static int
cmp_v_in_regset_pool(const void * x,const void * xx)954 cmp_v_in_regset_pool (const void *x, const void *xx)
955 {
956 uintptr_t r1 = (uintptr_t) *((const regset *) x);
957 uintptr_t r2 = (uintptr_t) *((const regset *) xx);
958 if (r1 > r2)
959 return 1;
960 else if (r1 < r2)
961 return -1;
962 gcc_unreachable ();
963 }
964
965 /* Free the regset pool possibly checking for memory leaks. */
966 void
free_regset_pool(void)967 free_regset_pool (void)
968 {
969 if (flag_checking)
970 {
971 regset *v = regset_pool.v;
972 int i = 0;
973 int n = regset_pool.n;
974
975 regset *vv = regset_pool.vv;
976 int ii = 0;
977 int nn = regset_pool.nn;
978
979 int diff = 0;
980
981 gcc_assert (n <= nn);
982
983 /* Sort both vectors so it will be possible to compare them. */
984 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
985 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
986
987 while (ii < nn)
988 {
989 if (v[i] == vv[ii])
990 i++;
991 else
992 /* VV[II] was lost. */
993 diff++;
994
995 ii++;
996 }
997
998 gcc_assert (diff == regset_pool.diff);
999 }
1000
1001 /* If not true - we have a memory leak. */
1002 gcc_assert (regset_pool.diff == 0);
1003
1004 while (regset_pool.n)
1005 {
1006 --regset_pool.n;
1007 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1008 }
1009
1010 free (regset_pool.v);
1011 regset_pool.v = NULL;
1012 regset_pool.s = 0;
1013
1014 free (regset_pool.vv);
1015 regset_pool.vv = NULL;
1016 regset_pool.nn = 0;
1017 regset_pool.ss = 0;
1018
1019 regset_pool.diff = 0;
1020 }
1021
1022
1023 /* Functions to work with nop pools. NOP insns are used as temporary
1024 placeholders of the insns being scheduled to allow correct update of
1025 the data sets. When update is finished, NOPs are deleted. */
1026
1027 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1028 nops sel-sched generates. */
1029 static vinsn_t nop_vinsn = NULL;
1030
1031 /* Emit a nop before INSN, taking it from pool. */
1032 insn_t
get_nop_from_pool(insn_t insn)1033 get_nop_from_pool (insn_t insn)
1034 {
1035 rtx nop_pat;
1036 insn_t nop;
1037 bool old_p = nop_pool.n != 0;
1038 int flags;
1039
1040 if (old_p)
1041 nop_pat = nop_pool.v[--nop_pool.n];
1042 else
1043 nop_pat = nop_pattern;
1044
1045 nop = emit_insn_before (nop_pat, insn);
1046
1047 if (old_p)
1048 flags = INSN_INIT_TODO_SSID;
1049 else
1050 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1051
1052 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1053 sel_init_new_insn (nop, flags);
1054
1055 return nop;
1056 }
1057
1058 /* Remove NOP from the instruction stream and return it to the pool. */
1059 void
return_nop_to_pool(insn_t nop,bool full_tidying)1060 return_nop_to_pool (insn_t nop, bool full_tidying)
1061 {
1062 gcc_assert (INSN_IN_STREAM_P (nop));
1063 sel_remove_insn (nop, false, full_tidying);
1064
1065 /* We'll recycle this nop. */
1066 nop->set_undeleted ();
1067
1068 if (nop_pool.n == nop_pool.s)
1069 nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v,
1070 (nop_pool.s = 2 * nop_pool.s + 1));
1071 nop_pool.v[nop_pool.n++] = nop;
1072 }
1073
1074 /* Free the nop pool. */
1075 void
free_nop_pool(void)1076 free_nop_pool (void)
1077 {
1078 nop_pool.n = 0;
1079 nop_pool.s = 0;
1080 free (nop_pool.v);
1081 nop_pool.v = NULL;
1082 }
1083
1084
1085 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1086 The callback is given two rtxes XX and YY and writes the new rtxes
1087 to NX and NY in case some needs to be skipped. */
1088 static int
skip_unspecs_callback(const_rtx * xx,const_rtx * yy,rtx * nx,rtx * ny)1089 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1090 {
1091 const_rtx x = *xx;
1092 const_rtx y = *yy;
1093
1094 if (GET_CODE (x) == UNSPEC
1095 && (targetm.sched.skip_rtx_p == NULL
1096 || targetm.sched.skip_rtx_p (x)))
1097 {
1098 *nx = XVECEXP (x, 0, 0);
1099 *ny = CONST_CAST_RTX (y);
1100 return 1;
1101 }
1102
1103 if (GET_CODE (y) == UNSPEC
1104 && (targetm.sched.skip_rtx_p == NULL
1105 || targetm.sched.skip_rtx_p (y)))
1106 {
1107 *nx = CONST_CAST_RTX (x);
1108 *ny = XVECEXP (y, 0, 0);
1109 return 1;
1110 }
1111
1112 return 0;
1113 }
1114
1115 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1116 to support ia64 speculation. When changes are needed, new rtx X and new mode
1117 NMODE are written, and the callback returns true. */
1118 static int
hash_with_unspec_callback(const_rtx x,machine_mode mode ATTRIBUTE_UNUSED,rtx * nx,machine_mode * nmode)1119 hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED,
1120 rtx *nx, machine_mode* nmode)
1121 {
1122 if (GET_CODE (x) == UNSPEC
1123 && targetm.sched.skip_rtx_p
1124 && targetm.sched.skip_rtx_p (x))
1125 {
1126 *nx = XVECEXP (x, 0 ,0);
1127 *nmode = VOIDmode;
1128 return 1;
1129 }
1130
1131 return 0;
1132 }
1133
1134 /* Returns LHS and RHS are ok to be scheduled separately. */
1135 static bool
lhs_and_rhs_separable_p(rtx lhs,rtx rhs)1136 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1137 {
1138 if (lhs == NULL || rhs == NULL)
1139 return false;
1140
1141 /* Do not schedule constants as rhs: no point to use reg, if const
1142 can be used. Moreover, scheduling const as rhs may lead to mode
1143 mismatch cause consts don't have modes but they could be merged
1144 from branches where the same const used in different modes. */
1145 if (CONSTANT_P (rhs))
1146 return false;
1147
1148 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1149 if (COMPARISON_P (rhs))
1150 return false;
1151
1152 /* Do not allow single REG to be an rhs. */
1153 if (REG_P (rhs))
1154 return false;
1155
1156 /* See comment at find_used_regs_1 (*1) for explanation of this
1157 restriction. */
1158 /* FIXME: remove this later. */
1159 if (MEM_P (lhs))
1160 return false;
1161
1162 /* This will filter all tricky things like ZERO_EXTRACT etc.
1163 For now we don't handle it. */
1164 if (!REG_P (lhs) && !MEM_P (lhs))
1165 return false;
1166
1167 return true;
1168 }
1169
1170 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1171 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1172 used e.g. for insns from recovery blocks. */
1173 static void
vinsn_init(vinsn_t vi,insn_t insn,bool force_unique_p)1174 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1175 {
1176 hash_rtx_callback_function hrcf;
1177 int insn_class;
1178
1179 VINSN_INSN_RTX (vi) = insn;
1180 VINSN_COUNT (vi) = 0;
1181 vi->cost = -1;
1182
1183 if (INSN_NOP_P (insn))
1184 return;
1185
1186 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1187 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1188 else
1189 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1190
1191 /* Hash vinsn depending on whether it is separable or not. */
1192 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1193 if (VINSN_SEPARABLE_P (vi))
1194 {
1195 rtx rhs = VINSN_RHS (vi);
1196
1197 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1198 NULL, NULL, false, hrcf);
1199 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1200 VOIDmode, NULL, NULL,
1201 false, hrcf);
1202 }
1203 else
1204 {
1205 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1206 NULL, NULL, false, hrcf);
1207 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1208 }
1209
1210 insn_class = haifa_classify_insn (insn);
1211 if (insn_class >= 2
1212 && (!targetm.sched.get_insn_spec_ds
1213 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1214 == 0)))
1215 VINSN_MAY_TRAP_P (vi) = true;
1216 else
1217 VINSN_MAY_TRAP_P (vi) = false;
1218 }
1219
1220 /* Indicate that VI has become the part of an rtx object. */
1221 void
vinsn_attach(vinsn_t vi)1222 vinsn_attach (vinsn_t vi)
1223 {
1224 /* Assert that VI is not pending for deletion. */
1225 gcc_assert (VINSN_INSN_RTX (vi));
1226
1227 VINSN_COUNT (vi)++;
1228 }
1229
1230 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1231 VINSN_TYPE (VI). */
1232 static vinsn_t
vinsn_create(insn_t insn,bool force_unique_p)1233 vinsn_create (insn_t insn, bool force_unique_p)
1234 {
1235 vinsn_t vi = XCNEW (struct vinsn_def);
1236
1237 vinsn_init (vi, insn, force_unique_p);
1238 return vi;
1239 }
1240
1241 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1242 the copy. */
1243 vinsn_t
vinsn_copy(vinsn_t vi,bool reattach_p)1244 vinsn_copy (vinsn_t vi, bool reattach_p)
1245 {
1246 rtx_insn *copy;
1247 bool unique = VINSN_UNIQUE_P (vi);
1248 vinsn_t new_vi;
1249
1250 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1251 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1252 if (reattach_p)
1253 {
1254 vinsn_detach (vi);
1255 vinsn_attach (new_vi);
1256 }
1257
1258 return new_vi;
1259 }
1260
1261 /* Delete the VI vinsn and free its data. */
1262 static void
vinsn_delete(vinsn_t vi)1263 vinsn_delete (vinsn_t vi)
1264 {
1265 gcc_assert (VINSN_COUNT (vi) == 0);
1266
1267 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1268 {
1269 return_regset_to_pool (VINSN_REG_SETS (vi));
1270 return_regset_to_pool (VINSN_REG_USES (vi));
1271 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1272 }
1273
1274 free (vi);
1275 }
1276
1277 /* Indicate that VI is no longer a part of some rtx object.
1278 Remove VI if it is no longer needed. */
1279 void
vinsn_detach(vinsn_t vi)1280 vinsn_detach (vinsn_t vi)
1281 {
1282 gcc_assert (VINSN_COUNT (vi) > 0);
1283
1284 if (--VINSN_COUNT (vi) == 0)
1285 vinsn_delete (vi);
1286 }
1287
1288 /* Returns TRUE if VI is a branch. */
1289 bool
vinsn_cond_branch_p(vinsn_t vi)1290 vinsn_cond_branch_p (vinsn_t vi)
1291 {
1292 insn_t insn;
1293
1294 if (!VINSN_UNIQUE_P (vi))
1295 return false;
1296
1297 insn = VINSN_INSN_RTX (vi);
1298 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1299 return false;
1300
1301 return control_flow_insn_p (insn);
1302 }
1303
1304 /* Return latency of INSN. */
1305 static int
sel_insn_rtx_cost(rtx_insn * insn)1306 sel_insn_rtx_cost (rtx_insn *insn)
1307 {
1308 int cost;
1309
1310 /* A USE insn, or something else we don't need to
1311 understand. We can't pass these directly to
1312 result_ready_cost or insn_default_latency because it will
1313 trigger a fatal error for unrecognizable insns. */
1314 if (recog_memoized (insn) < 0)
1315 cost = 0;
1316 else
1317 {
1318 cost = insn_default_latency (insn);
1319
1320 if (cost < 0)
1321 cost = 0;
1322 }
1323
1324 return cost;
1325 }
1326
1327 /* Return the cost of the VI.
1328 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1329 int
sel_vinsn_cost(vinsn_t vi)1330 sel_vinsn_cost (vinsn_t vi)
1331 {
1332 int cost = vi->cost;
1333
1334 if (cost < 0)
1335 {
1336 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1337 vi->cost = cost;
1338 }
1339
1340 return cost;
1341 }
1342
1343
1344 /* Functions for insn emitting. */
1345
1346 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1347 EXPR and SEQNO. */
1348 insn_t
sel_gen_insn_from_rtx_after(rtx pattern,expr_t expr,int seqno,insn_t after)1349 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1350 {
1351 insn_t new_insn;
1352
1353 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1354
1355 new_insn = emit_insn_after (pattern, after);
1356 set_insn_init (expr, NULL, seqno);
1357 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1358
1359 return new_insn;
1360 }
1361
1362 /* Force newly generated vinsns to be unique. */
1363 static bool init_insn_force_unique_p = false;
1364
1365 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1366 initialize its data from EXPR and SEQNO. */
1367 insn_t
sel_gen_recovery_insn_from_rtx_after(rtx pattern,expr_t expr,int seqno,insn_t after)1368 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1369 insn_t after)
1370 {
1371 insn_t insn;
1372
1373 gcc_assert (!init_insn_force_unique_p);
1374
1375 init_insn_force_unique_p = true;
1376 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1377 CANT_MOVE (insn) = 1;
1378 init_insn_force_unique_p = false;
1379
1380 return insn;
1381 }
1382
1383 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1384 take it as a new vinsn instead of EXPR's vinsn.
1385 We simplify insns later, after scheduling region in
1386 simplify_changed_insns. */
1387 insn_t
sel_gen_insn_from_expr_after(expr_t expr,vinsn_t vinsn,int seqno,insn_t after)1388 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1389 insn_t after)
1390 {
1391 expr_t emit_expr;
1392 insn_t insn;
1393 int flags;
1394
1395 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1396 seqno);
1397 insn = EXPR_INSN_RTX (emit_expr);
1398
1399 /* The insn may come from the transformation cache, which may hold already
1400 deleted insns, so mark it as not deleted. */
1401 insn->set_undeleted ();
1402
1403 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1404
1405 flags = INSN_INIT_TODO_SSID;
1406 if (INSN_LUID (insn) == 0)
1407 flags |= INSN_INIT_TODO_LUID;
1408 sel_init_new_insn (insn, flags);
1409
1410 return insn;
1411 }
1412
1413 /* Move insn from EXPR after AFTER. */
1414 insn_t
sel_move_insn(expr_t expr,int seqno,insn_t after)1415 sel_move_insn (expr_t expr, int seqno, insn_t after)
1416 {
1417 insn_t insn = EXPR_INSN_RTX (expr);
1418 basic_block bb = BLOCK_FOR_INSN (after);
1419 insn_t next = NEXT_INSN (after);
1420
1421 /* Assert that in move_op we disconnected this insn properly. */
1422 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1423 SET_PREV_INSN (insn) = after;
1424 SET_NEXT_INSN (insn) = next;
1425
1426 SET_NEXT_INSN (after) = insn;
1427 SET_PREV_INSN (next) = insn;
1428
1429 /* Update links from insn to bb and vice versa. */
1430 df_insn_change_bb (insn, bb);
1431 if (BB_END (bb) == after)
1432 BB_END (bb) = insn;
1433
1434 prepare_insn_expr (insn, seqno);
1435 return insn;
1436 }
1437
1438
1439 /* Functions to work with right-hand sides. */
1440
1441 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1442 VECT and return true when found. Use NEW_VINSN for comparison only when
1443 COMPARE_VINSNS is true. Write to INDP the index on which
1444 the search has stopped, such that inserting the new element at INDP will
1445 retain VECT's sort order. */
1446 static bool
find_in_history_vect_1(vec<expr_history_def> vect,unsigned uid,vinsn_t new_vinsn,bool compare_vinsns,int * indp)1447 find_in_history_vect_1 (vec<expr_history_def> vect,
1448 unsigned uid, vinsn_t new_vinsn,
1449 bool compare_vinsns, int *indp)
1450 {
1451 expr_history_def *arr;
1452 int i, j, len = vect.length ();
1453
1454 if (len == 0)
1455 {
1456 *indp = 0;
1457 return false;
1458 }
1459
1460 arr = vect.address ();
1461 i = 0, j = len - 1;
1462
1463 while (i <= j)
1464 {
1465 unsigned auid = arr[i].uid;
1466 vinsn_t avinsn = arr[i].new_expr_vinsn;
1467
1468 if (auid == uid
1469 /* When undoing transformation on a bookkeeping copy, the new vinsn
1470 may not be exactly equal to the one that is saved in the vector.
1471 This is because the insn whose copy we're checking was possibly
1472 substituted itself. */
1473 && (! compare_vinsns
1474 || vinsn_equal_p (avinsn, new_vinsn)))
1475 {
1476 *indp = i;
1477 return true;
1478 }
1479 else if (auid > uid)
1480 break;
1481 i++;
1482 }
1483
1484 *indp = i;
1485 return false;
1486 }
1487
1488 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1489 the position found or -1, if no such value is in vector.
1490 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1491 int
find_in_history_vect(vec<expr_history_def> vect,rtx insn,vinsn_t new_vinsn,bool originators_p)1492 find_in_history_vect (vec<expr_history_def> vect, rtx insn,
1493 vinsn_t new_vinsn, bool originators_p)
1494 {
1495 int ind;
1496
1497 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1498 false, &ind))
1499 return ind;
1500
1501 if (INSN_ORIGINATORS (insn) && originators_p)
1502 {
1503 unsigned uid;
1504 bitmap_iterator bi;
1505
1506 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1507 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1508 return ind;
1509 }
1510
1511 return -1;
1512 }
1513
1514 /* Insert new element in a sorted history vector pointed to by PVECT,
1515 if it is not there already. The element is searched using
1516 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1517 the history of a transformation. */
1518 void
insert_in_history_vect(vec<expr_history_def> * pvect,unsigned uid,enum local_trans_type type,vinsn_t old_expr_vinsn,vinsn_t new_expr_vinsn,ds_t spec_ds)1519 insert_in_history_vect (vec<expr_history_def> *pvect,
1520 unsigned uid, enum local_trans_type type,
1521 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1522 ds_t spec_ds)
1523 {
1524 vec<expr_history_def> vect = *pvect;
1525 expr_history_def temp;
1526 bool res;
1527 int ind;
1528
1529 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1530
1531 if (res)
1532 {
1533 expr_history_def *phist = &vect[ind];
1534
1535 /* It is possible that speculation types of expressions that were
1536 propagated through different paths will be different here. In this
1537 case, merge the status to get the correct check later. */
1538 if (phist->spec_ds != spec_ds)
1539 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1540 return;
1541 }
1542
1543 temp.uid = uid;
1544 temp.old_expr_vinsn = old_expr_vinsn;
1545 temp.new_expr_vinsn = new_expr_vinsn;
1546 temp.spec_ds = spec_ds;
1547 temp.type = type;
1548
1549 vinsn_attach (old_expr_vinsn);
1550 vinsn_attach (new_expr_vinsn);
1551 vect.safe_insert (ind, temp);
1552 *pvect = vect;
1553 }
1554
1555 /* Free history vector PVECT. */
1556 static void
free_history_vect(vec<expr_history_def> & pvect)1557 free_history_vect (vec<expr_history_def> &pvect)
1558 {
1559 unsigned i;
1560 expr_history_def *phist;
1561
1562 if (! pvect.exists ())
1563 return;
1564
1565 for (i = 0; pvect.iterate (i, &phist); i++)
1566 {
1567 vinsn_detach (phist->old_expr_vinsn);
1568 vinsn_detach (phist->new_expr_vinsn);
1569 }
1570
1571 pvect.release ();
1572 }
1573
1574 /* Merge vector FROM to PVECT. */
1575 static void
merge_history_vect(vec<expr_history_def> * pvect,vec<expr_history_def> from)1576 merge_history_vect (vec<expr_history_def> *pvect,
1577 vec<expr_history_def> from)
1578 {
1579 expr_history_def *phist;
1580 int i;
1581
1582 /* We keep this vector sorted. */
1583 for (i = 0; from.iterate (i, &phist); i++)
1584 insert_in_history_vect (pvect, phist->uid, phist->type,
1585 phist->old_expr_vinsn, phist->new_expr_vinsn,
1586 phist->spec_ds);
1587 }
1588
1589 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1590 bool
vinsn_equal_p(vinsn_t x,vinsn_t y)1591 vinsn_equal_p (vinsn_t x, vinsn_t y)
1592 {
1593 rtx_equal_p_callback_function repcf;
1594
1595 if (x == y)
1596 return true;
1597
1598 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1599 return false;
1600
1601 if (VINSN_HASH (x) != VINSN_HASH (y))
1602 return false;
1603
1604 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1605 if (VINSN_SEPARABLE_P (x))
1606 {
1607 /* Compare RHSes of VINSNs. */
1608 gcc_assert (VINSN_RHS (x));
1609 gcc_assert (VINSN_RHS (y));
1610
1611 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1612 }
1613
1614 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1615 }
1616
1617
1618 /* Functions for working with expressions. */
1619
1620 /* Initialize EXPR. */
1621 static void
init_expr(expr_t expr,vinsn_t vi,int spec,int use,int priority,int sched_times,int orig_bb_index,ds_t spec_done_ds,ds_t spec_to_check_ds,int orig_sched_cycle,vec<expr_history_def> history,signed char target_available,bool was_substituted,bool was_renamed,bool needs_spec_check_p,bool cant_move)1622 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1623 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1624 ds_t spec_to_check_ds, int orig_sched_cycle,
1625 vec<expr_history_def> history,
1626 signed char target_available,
1627 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1628 bool cant_move)
1629 {
1630 vinsn_attach (vi);
1631
1632 EXPR_VINSN (expr) = vi;
1633 EXPR_SPEC (expr) = spec;
1634 EXPR_USEFULNESS (expr) = use;
1635 EXPR_PRIORITY (expr) = priority;
1636 EXPR_PRIORITY_ADJ (expr) = 0;
1637 EXPR_SCHED_TIMES (expr) = sched_times;
1638 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1639 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1640 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1641 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1642
1643 if (history.exists ())
1644 EXPR_HISTORY_OF_CHANGES (expr) = history;
1645 else
1646 EXPR_HISTORY_OF_CHANGES (expr).create (0);
1647
1648 EXPR_TARGET_AVAILABLE (expr) = target_available;
1649 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1650 EXPR_WAS_RENAMED (expr) = was_renamed;
1651 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1652 EXPR_CANT_MOVE (expr) = cant_move;
1653 }
1654
1655 /* Make a copy of the expr FROM into the expr TO. */
1656 void
copy_expr(expr_t to,expr_t from)1657 copy_expr (expr_t to, expr_t from)
1658 {
1659 vec<expr_history_def> temp = vNULL;
1660
1661 if (EXPR_HISTORY_OF_CHANGES (from).exists ())
1662 {
1663 unsigned i;
1664 expr_history_def *phist;
1665
1666 temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
1667 for (i = 0;
1668 temp.iterate (i, &phist);
1669 i++)
1670 {
1671 vinsn_attach (phist->old_expr_vinsn);
1672 vinsn_attach (phist->new_expr_vinsn);
1673 }
1674 }
1675
1676 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1677 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1678 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1679 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1680 EXPR_ORIG_SCHED_CYCLE (from), temp,
1681 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1682 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1683 EXPR_CANT_MOVE (from));
1684 }
1685
1686 /* Same, but the final expr will not ever be in av sets, so don't copy
1687 "uninteresting" data such as bitmap cache. */
1688 void
copy_expr_onside(expr_t to,expr_t from)1689 copy_expr_onside (expr_t to, expr_t from)
1690 {
1691 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1692 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1693 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1694 vNULL,
1695 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1696 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1697 EXPR_CANT_MOVE (from));
1698 }
1699
1700 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1701 initializing new insns. */
1702 static void
prepare_insn_expr(insn_t insn,int seqno)1703 prepare_insn_expr (insn_t insn, int seqno)
1704 {
1705 expr_t expr = INSN_EXPR (insn);
1706 ds_t ds;
1707
1708 INSN_SEQNO (insn) = seqno;
1709 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1710 EXPR_SPEC (expr) = 0;
1711 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1712 EXPR_WAS_SUBSTITUTED (expr) = 0;
1713 EXPR_WAS_RENAMED (expr) = 0;
1714 EXPR_TARGET_AVAILABLE (expr) = 1;
1715 INSN_LIVE_VALID_P (insn) = false;
1716
1717 /* ??? If this expression is speculative, make its dependence
1718 as weak as possible. We can filter this expression later
1719 in process_spec_exprs, because we do not distinguish
1720 between the status we got during compute_av_set and the
1721 existing status. To be fixed. */
1722 ds = EXPR_SPEC_DONE_DS (expr);
1723 if (ds)
1724 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1725
1726 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1727 }
1728
1729 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1730 is non-null when expressions are merged from different successors at
1731 a split point. */
1732 static void
update_target_availability(expr_t to,expr_t from,insn_t split_point)1733 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1734 {
1735 if (EXPR_TARGET_AVAILABLE (to) < 0
1736 || EXPR_TARGET_AVAILABLE (from) < 0)
1737 EXPR_TARGET_AVAILABLE (to) = -1;
1738 else
1739 {
1740 /* We try to detect the case when one of the expressions
1741 can only be reached through another one. In this case,
1742 we can do better. */
1743 if (split_point == NULL)
1744 {
1745 int toind, fromind;
1746
1747 toind = EXPR_ORIG_BB_INDEX (to);
1748 fromind = EXPR_ORIG_BB_INDEX (from);
1749
1750 if (toind && toind == fromind)
1751 /* Do nothing -- everything is done in
1752 merge_with_other_exprs. */
1753 ;
1754 else
1755 EXPR_TARGET_AVAILABLE (to) = -1;
1756 }
1757 else if (EXPR_TARGET_AVAILABLE (from) == 0
1758 && EXPR_LHS (from)
1759 && REG_P (EXPR_LHS (from))
1760 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1761 EXPR_TARGET_AVAILABLE (to) = -1;
1762 else
1763 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1764 }
1765 }
1766
1767 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1768 is non-null when expressions are merged from different successors at
1769 a split point. */
1770 static void
update_speculative_bits(expr_t to,expr_t from,insn_t split_point)1771 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1772 {
1773 ds_t old_to_ds, old_from_ds;
1774
1775 old_to_ds = EXPR_SPEC_DONE_DS (to);
1776 old_from_ds = EXPR_SPEC_DONE_DS (from);
1777
1778 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1779 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1780 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1781
1782 /* When merging e.g. control & data speculative exprs, or a control
1783 speculative with a control&data speculative one, we really have
1784 to change vinsn too. Also, when speculative status is changed,
1785 we also need to record this as a transformation in expr's history. */
1786 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1787 {
1788 old_to_ds = ds_get_speculation_types (old_to_ds);
1789 old_from_ds = ds_get_speculation_types (old_from_ds);
1790
1791 if (old_to_ds != old_from_ds)
1792 {
1793 ds_t record_ds;
1794
1795 /* When both expressions are speculative, we need to change
1796 the vinsn first. */
1797 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1798 {
1799 int res;
1800
1801 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1802 gcc_assert (res >= 0);
1803 }
1804
1805 if (split_point != NULL)
1806 {
1807 /* Record the change with proper status. */
1808 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1809 record_ds &= ~(old_to_ds & SPECULATIVE);
1810 record_ds &= ~(old_from_ds & SPECULATIVE);
1811
1812 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1813 INSN_UID (split_point), TRANS_SPECULATION,
1814 EXPR_VINSN (from), EXPR_VINSN (to),
1815 record_ds);
1816 }
1817 }
1818 }
1819 }
1820
1821
1822 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1823 this is done along different paths. */
1824 void
merge_expr_data(expr_t to,expr_t from,insn_t split_point)1825 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1826 {
1827 /* Choose the maximum of the specs of merged exprs. This is required
1828 for correctness of bookkeeping. */
1829 if (EXPR_SPEC (to) < EXPR_SPEC (from))
1830 EXPR_SPEC (to) = EXPR_SPEC (from);
1831
1832 if (split_point)
1833 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1834 else
1835 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1836 EXPR_USEFULNESS (from));
1837
1838 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1839 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1840
1841 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1842 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1843
1844 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1845 EXPR_ORIG_BB_INDEX (to) = 0;
1846
1847 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1848 EXPR_ORIG_SCHED_CYCLE (from));
1849
1850 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1851 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1852 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1853
1854 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1855 EXPR_HISTORY_OF_CHANGES (from));
1856 update_target_availability (to, from, split_point);
1857 update_speculative_bits (to, from, split_point);
1858 }
1859
1860 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1861 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1862 are merged from different successors at a split point. */
1863 void
merge_expr(expr_t to,expr_t from,insn_t split_point)1864 merge_expr (expr_t to, expr_t from, insn_t split_point)
1865 {
1866 vinsn_t to_vi = EXPR_VINSN (to);
1867 vinsn_t from_vi = EXPR_VINSN (from);
1868
1869 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1870
1871 /* Make sure that speculative pattern is propagated into exprs that
1872 have non-speculative one. This will provide us with consistent
1873 speculative bits and speculative patterns inside expr. */
1874 if (EXPR_SPEC_DONE_DS (to) == 0
1875 && (EXPR_SPEC_DONE_DS (from) != 0
1876 /* Do likewise for volatile insns, so that we always retain
1877 the may_trap_p bit on the resulting expression. However,
1878 avoid propagating the trapping bit into the instructions
1879 already speculated. This would result in replacing the
1880 speculative pattern with the non-speculative one and breaking
1881 the speculation support. */
1882 || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to))
1883 && VINSN_MAY_TRAP_P (EXPR_VINSN (from)))))
1884 change_vinsn_in_expr (to, EXPR_VINSN (from));
1885
1886 merge_expr_data (to, from, split_point);
1887 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1888 }
1889
1890 /* Clear the information of this EXPR. */
1891 void
clear_expr(expr_t expr)1892 clear_expr (expr_t expr)
1893 {
1894
1895 vinsn_detach (EXPR_VINSN (expr));
1896 EXPR_VINSN (expr) = NULL;
1897
1898 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1899 }
1900
1901 /* For a given LV_SET, mark EXPR having unavailable target register. */
1902 static void
set_unavailable_target_for_expr(expr_t expr,regset lv_set)1903 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1904 {
1905 if (EXPR_SEPARABLE_P (expr))
1906 {
1907 if (REG_P (EXPR_LHS (expr))
1908 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1909 {
1910 /* If it's an insn like r1 = use (r1, ...), and it exists in
1911 different forms in each of the av_sets being merged, we can't say
1912 whether original destination register is available or not.
1913 However, this still works if destination register is not used
1914 in the original expression: if the branch at which LV_SET we're
1915 looking here is not actually 'other branch' in sense that same
1916 expression is available through it (but it can't be determined
1917 at computation stage because of transformations on one of the
1918 branches), it still won't affect the availability.
1919 Liveness of a register somewhere on a code motion path means
1920 it's either read somewhere on a codemotion path, live on
1921 'other' branch, live at the point immediately following
1922 the original operation, or is read by the original operation.
1923 The latter case is filtered out in the condition below.
1924 It still doesn't cover the case when register is defined and used
1925 somewhere within the code motion path, and in this case we could
1926 miss a unifying code motion along both branches using a renamed
1927 register, but it won't affect a code correctness since upon
1928 an actual code motion a bookkeeping code would be generated. */
1929 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1930 EXPR_LHS (expr)))
1931 EXPR_TARGET_AVAILABLE (expr) = -1;
1932 else
1933 EXPR_TARGET_AVAILABLE (expr) = false;
1934 }
1935 }
1936 else
1937 {
1938 unsigned regno;
1939 reg_set_iterator rsi;
1940
1941 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1942 0, regno, rsi)
1943 if (bitmap_bit_p (lv_set, regno))
1944 {
1945 EXPR_TARGET_AVAILABLE (expr) = false;
1946 break;
1947 }
1948
1949 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1950 0, regno, rsi)
1951 if (bitmap_bit_p (lv_set, regno))
1952 {
1953 EXPR_TARGET_AVAILABLE (expr) = false;
1954 break;
1955 }
1956 }
1957 }
1958
1959 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1960 or dependence status have changed, 2 when also the target register
1961 became unavailable, 0 if nothing had to be changed. */
1962 int
speculate_expr(expr_t expr,ds_t ds)1963 speculate_expr (expr_t expr, ds_t ds)
1964 {
1965 int res;
1966 rtx_insn *orig_insn_rtx;
1967 rtx spec_pat;
1968 ds_t target_ds, current_ds;
1969
1970 /* Obtain the status we need to put on EXPR. */
1971 target_ds = (ds & SPECULATIVE);
1972 current_ds = EXPR_SPEC_DONE_DS (expr);
1973 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1974
1975 orig_insn_rtx = EXPR_INSN_RTX (expr);
1976
1977 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1978
1979 switch (res)
1980 {
1981 case 0:
1982 EXPR_SPEC_DONE_DS (expr) = ds;
1983 return current_ds != ds ? 1 : 0;
1984
1985 case 1:
1986 {
1987 rtx_insn *spec_insn_rtx =
1988 create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1989 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1990
1991 change_vinsn_in_expr (expr, spec_vinsn);
1992 EXPR_SPEC_DONE_DS (expr) = ds;
1993 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1994
1995 /* Do not allow clobbering the address register of speculative
1996 insns. */
1997 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1998 expr_dest_reg (expr)))
1999 {
2000 EXPR_TARGET_AVAILABLE (expr) = false;
2001 return 2;
2002 }
2003
2004 return 1;
2005 }
2006
2007 case -1:
2008 return -1;
2009
2010 default:
2011 gcc_unreachable ();
2012 return -1;
2013 }
2014 }
2015
2016 /* Return a destination register, if any, of EXPR. */
2017 rtx
expr_dest_reg(expr_t expr)2018 expr_dest_reg (expr_t expr)
2019 {
2020 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2021
2022 if (dest != NULL_RTX && REG_P (dest))
2023 return dest;
2024
2025 return NULL_RTX;
2026 }
2027
2028 /* Returns the REGNO of the R's destination. */
2029 unsigned
expr_dest_regno(expr_t expr)2030 expr_dest_regno (expr_t expr)
2031 {
2032 rtx dest = expr_dest_reg (expr);
2033
2034 gcc_assert (dest != NULL_RTX);
2035 return REGNO (dest);
2036 }
2037
2038 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2039 AV_SET having unavailable target register. */
2040 void
mark_unavailable_targets(av_set_t join_set,av_set_t av_set,regset lv_set)2041 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2042 {
2043 expr_t expr;
2044 av_set_iterator avi;
2045
2046 FOR_EACH_EXPR (expr, avi, join_set)
2047 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2048 set_unavailable_target_for_expr (expr, lv_set);
2049 }
2050
2051
2052 /* Returns true if REG (at least partially) is present in REGS. */
2053 bool
register_unavailable_p(regset regs,rtx reg)2054 register_unavailable_p (regset regs, rtx reg)
2055 {
2056 unsigned regno, end_regno;
2057
2058 regno = REGNO (reg);
2059 if (bitmap_bit_p (regs, regno))
2060 return true;
2061
2062 end_regno = END_REGNO (reg);
2063
2064 while (++regno < end_regno)
2065 if (bitmap_bit_p (regs, regno))
2066 return true;
2067
2068 return false;
2069 }
2070
2071 /* Av set functions. */
2072
2073 /* Add a new element to av set SETP.
2074 Return the element added. */
2075 static av_set_t
av_set_add_element(av_set_t * setp)2076 av_set_add_element (av_set_t *setp)
2077 {
2078 /* Insert at the beginning of the list. */
2079 _list_add (setp);
2080 return *setp;
2081 }
2082
2083 /* Add EXPR to SETP. */
2084 void
av_set_add(av_set_t * setp,expr_t expr)2085 av_set_add (av_set_t *setp, expr_t expr)
2086 {
2087 av_set_t elem;
2088
2089 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2090 elem = av_set_add_element (setp);
2091 copy_expr (_AV_SET_EXPR (elem), expr);
2092 }
2093
2094 /* Same, but do not copy EXPR. */
2095 static void
av_set_add_nocopy(av_set_t * setp,expr_t expr)2096 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2097 {
2098 av_set_t elem;
2099
2100 elem = av_set_add_element (setp);
2101 *_AV_SET_EXPR (elem) = *expr;
2102 }
2103
2104 /* Remove expr pointed to by IP from the av_set. */
2105 void
av_set_iter_remove(av_set_iterator * ip)2106 av_set_iter_remove (av_set_iterator *ip)
2107 {
2108 clear_expr (_AV_SET_EXPR (*ip->lp));
2109 _list_iter_remove (ip);
2110 }
2111
2112 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2113 sense of vinsn_equal_p function. Return NULL if no such expr is
2114 in SET was found. */
2115 expr_t
av_set_lookup(av_set_t set,vinsn_t sought_vinsn)2116 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2117 {
2118 expr_t expr;
2119 av_set_iterator i;
2120
2121 FOR_EACH_EXPR (expr, i, set)
2122 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2123 return expr;
2124 return NULL;
2125 }
2126
2127 /* Same, but also remove the EXPR found. */
2128 static expr_t
av_set_lookup_and_remove(av_set_t * setp,vinsn_t sought_vinsn)2129 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2130 {
2131 expr_t expr;
2132 av_set_iterator i;
2133
2134 FOR_EACH_EXPR_1 (expr, i, setp)
2135 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2136 {
2137 _list_iter_remove_nofree (&i);
2138 return expr;
2139 }
2140 return NULL;
2141 }
2142
2143 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2144 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2145 Returns NULL if no such expr is in SET was found. */
2146 static expr_t
av_set_lookup_other_equiv_expr(av_set_t set,expr_t expr)2147 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2148 {
2149 expr_t cur_expr;
2150 av_set_iterator i;
2151
2152 FOR_EACH_EXPR (cur_expr, i, set)
2153 {
2154 if (cur_expr == expr)
2155 continue;
2156 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2157 return cur_expr;
2158 }
2159
2160 return NULL;
2161 }
2162
2163 /* If other expression is already in AVP, remove one of them. */
2164 expr_t
merge_with_other_exprs(av_set_t * avp,av_set_iterator * ip,expr_t expr)2165 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2166 {
2167 expr_t expr2;
2168
2169 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2170 if (expr2 != NULL)
2171 {
2172 /* Reset target availability on merge, since taking it only from one
2173 of the exprs would be controversial for different code. */
2174 EXPR_TARGET_AVAILABLE (expr2) = -1;
2175 EXPR_USEFULNESS (expr2) = 0;
2176
2177 merge_expr (expr2, expr, NULL);
2178
2179 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2180 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2181
2182 av_set_iter_remove (ip);
2183 return expr2;
2184 }
2185
2186 return expr;
2187 }
2188
2189 /* Return true if there is an expr that correlates to VI in SET. */
2190 bool
av_set_is_in_p(av_set_t set,vinsn_t vi)2191 av_set_is_in_p (av_set_t set, vinsn_t vi)
2192 {
2193 return av_set_lookup (set, vi) != NULL;
2194 }
2195
2196 /* Return a copy of SET. */
2197 av_set_t
av_set_copy(av_set_t set)2198 av_set_copy (av_set_t set)
2199 {
2200 expr_t expr;
2201 av_set_iterator i;
2202 av_set_t res = NULL;
2203
2204 FOR_EACH_EXPR (expr, i, set)
2205 av_set_add (&res, expr);
2206
2207 return res;
2208 }
2209
2210 /* Join two av sets that do not have common elements by attaching second set
2211 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2212 _AV_SET_NEXT of first set's last element). */
2213 static void
join_distinct_sets(av_set_t * to_tailp,av_set_t * fromp)2214 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2215 {
2216 gcc_assert (*to_tailp == NULL);
2217 *to_tailp = *fromp;
2218 *fromp = NULL;
2219 }
2220
2221 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2222 pointed to by FROMP afterwards. */
2223 void
av_set_union_and_clear(av_set_t * top,av_set_t * fromp,insn_t insn)2224 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2225 {
2226 expr_t expr1;
2227 av_set_iterator i;
2228
2229 /* Delete from TOP all exprs, that present in FROMP. */
2230 FOR_EACH_EXPR_1 (expr1, i, top)
2231 {
2232 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2233
2234 if (expr2)
2235 {
2236 merge_expr (expr2, expr1, insn);
2237 av_set_iter_remove (&i);
2238 }
2239 }
2240
2241 join_distinct_sets (i.lp, fromp);
2242 }
2243
2244 /* Same as above, but also update availability of target register in
2245 TOP judging by TO_LV_SET and FROM_LV_SET. */
2246 void
av_set_union_and_live(av_set_t * top,av_set_t * fromp,regset to_lv_set,regset from_lv_set,insn_t insn)2247 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2248 regset from_lv_set, insn_t insn)
2249 {
2250 expr_t expr1;
2251 av_set_iterator i;
2252 av_set_t *to_tailp, in_both_set = NULL;
2253
2254 /* Delete from TOP all expres, that present in FROMP. */
2255 FOR_EACH_EXPR_1 (expr1, i, top)
2256 {
2257 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2258
2259 if (expr2)
2260 {
2261 /* It may be that the expressions have different destination
2262 registers, in which case we need to check liveness here. */
2263 if (EXPR_SEPARABLE_P (expr1))
2264 {
2265 int regno1 = (REG_P (EXPR_LHS (expr1))
2266 ? (int) expr_dest_regno (expr1) : -1);
2267 int regno2 = (REG_P (EXPR_LHS (expr2))
2268 ? (int) expr_dest_regno (expr2) : -1);
2269
2270 /* ??? We don't have a way to check restrictions for
2271 *other* register on the current path, we did it only
2272 for the current target register. Give up. */
2273 if (regno1 != regno2)
2274 EXPR_TARGET_AVAILABLE (expr2) = -1;
2275 }
2276 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2277 EXPR_TARGET_AVAILABLE (expr2) = -1;
2278
2279 merge_expr (expr2, expr1, insn);
2280 av_set_add_nocopy (&in_both_set, expr2);
2281 av_set_iter_remove (&i);
2282 }
2283 else
2284 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2285 FROM_LV_SET. */
2286 set_unavailable_target_for_expr (expr1, from_lv_set);
2287 }
2288 to_tailp = i.lp;
2289
2290 /* These expressions are not present in TOP. Check liveness
2291 restrictions on TO_LV_SET. */
2292 FOR_EACH_EXPR (expr1, i, *fromp)
2293 set_unavailable_target_for_expr (expr1, to_lv_set);
2294
2295 join_distinct_sets (i.lp, &in_both_set);
2296 join_distinct_sets (to_tailp, fromp);
2297 }
2298
2299 /* Clear av_set pointed to by SETP. */
2300 void
av_set_clear(av_set_t * setp)2301 av_set_clear (av_set_t *setp)
2302 {
2303 expr_t expr;
2304 av_set_iterator i;
2305
2306 FOR_EACH_EXPR_1 (expr, i, setp)
2307 av_set_iter_remove (&i);
2308
2309 gcc_assert (*setp == NULL);
2310 }
2311
2312 /* Leave only one non-speculative element in the SETP. */
2313 void
av_set_leave_one_nonspec(av_set_t * setp)2314 av_set_leave_one_nonspec (av_set_t *setp)
2315 {
2316 expr_t expr;
2317 av_set_iterator i;
2318 bool has_one_nonspec = false;
2319
2320 /* Keep all speculative exprs, and leave one non-speculative
2321 (the first one). */
2322 FOR_EACH_EXPR_1 (expr, i, setp)
2323 {
2324 if (!EXPR_SPEC_DONE_DS (expr))
2325 {
2326 if (has_one_nonspec)
2327 av_set_iter_remove (&i);
2328 else
2329 has_one_nonspec = true;
2330 }
2331 }
2332 }
2333
2334 /* Return the N'th element of the SET. */
2335 expr_t
av_set_element(av_set_t set,int n)2336 av_set_element (av_set_t set, int n)
2337 {
2338 expr_t expr;
2339 av_set_iterator i;
2340
2341 FOR_EACH_EXPR (expr, i, set)
2342 if (n-- == 0)
2343 return expr;
2344
2345 gcc_unreachable ();
2346 return NULL;
2347 }
2348
2349 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2350 void
av_set_substract_cond_branches(av_set_t * avp)2351 av_set_substract_cond_branches (av_set_t *avp)
2352 {
2353 av_set_iterator i;
2354 expr_t expr;
2355
2356 FOR_EACH_EXPR_1 (expr, i, avp)
2357 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2358 av_set_iter_remove (&i);
2359 }
2360
2361 /* Multiplies usefulness attribute of each member of av-set *AVP by
2362 value PROB / ALL_PROB. */
2363 void
av_set_split_usefulness(av_set_t av,int prob,int all_prob)2364 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2365 {
2366 av_set_iterator i;
2367 expr_t expr;
2368
2369 FOR_EACH_EXPR (expr, i, av)
2370 EXPR_USEFULNESS (expr) = (all_prob
2371 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2372 : 0);
2373 }
2374
2375 /* Leave in AVP only those expressions, which are present in AV,
2376 and return it, merging history expressions. */
2377 void
av_set_code_motion_filter(av_set_t * avp,av_set_t av)2378 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2379 {
2380 av_set_iterator i;
2381 expr_t expr, expr2;
2382
2383 FOR_EACH_EXPR_1 (expr, i, avp)
2384 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2385 av_set_iter_remove (&i);
2386 else
2387 /* When updating av sets in bookkeeping blocks, we can add more insns
2388 there which will be transformed but the upper av sets will not
2389 reflect those transformations. We then fail to undo those
2390 when searching for such insns. So merge the history saved
2391 in the av set of the block we are processing. */
2392 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2393 EXPR_HISTORY_OF_CHANGES (expr2));
2394 }
2395
2396
2397
2398 /* Dependence hooks to initialize insn data. */
2399
2400 /* This is used in hooks callable from dependence analysis when initializing
2401 instruction's data. */
2402 static struct
2403 {
2404 /* Where the dependence was found (lhs/rhs). */
2405 deps_where_t where;
2406
2407 /* The actual data object to initialize. */
2408 idata_t id;
2409
2410 /* True when the insn should not be made clonable. */
2411 bool force_unique_p;
2412
2413 /* True when insn should be treated as of type USE, i.e. never renamed. */
2414 bool force_use_p;
2415 } deps_init_id_data;
2416
2417
2418 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2419 clonable. */
2420 static void
setup_id_for_insn(idata_t id,insn_t insn,bool force_unique_p)2421 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2422 {
2423 int type;
2424
2425 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2426 That clonable insns which can be separated into lhs and rhs have type SET.
2427 Other clonable insns have type USE. */
2428 type = GET_CODE (insn);
2429
2430 /* Only regular insns could be cloned. */
2431 if (type == INSN && !force_unique_p)
2432 type = SET;
2433 else if (type == JUMP_INSN && simplejump_p (insn))
2434 type = PC;
2435 else if (type == DEBUG_INSN)
2436 type = !force_unique_p ? USE : INSN;
2437
2438 IDATA_TYPE (id) = type;
2439 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2440 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2441 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2442 }
2443
2444 /* Start initializing insn data. */
2445 static void
deps_init_id_start_insn(insn_t insn)2446 deps_init_id_start_insn (insn_t insn)
2447 {
2448 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2449
2450 setup_id_for_insn (deps_init_id_data.id, insn,
2451 deps_init_id_data.force_unique_p);
2452 deps_init_id_data.where = DEPS_IN_INSN;
2453 }
2454
2455 /* Start initializing lhs data. */
2456 static void
deps_init_id_start_lhs(rtx lhs)2457 deps_init_id_start_lhs (rtx lhs)
2458 {
2459 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2460 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2461
2462 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2463 {
2464 IDATA_LHS (deps_init_id_data.id) = lhs;
2465 deps_init_id_data.where = DEPS_IN_LHS;
2466 }
2467 }
2468
2469 /* Finish initializing lhs data. */
2470 static void
deps_init_id_finish_lhs(void)2471 deps_init_id_finish_lhs (void)
2472 {
2473 deps_init_id_data.where = DEPS_IN_INSN;
2474 }
2475
2476 /* Note a set of REGNO. */
2477 static void
deps_init_id_note_reg_set(int regno)2478 deps_init_id_note_reg_set (int regno)
2479 {
2480 haifa_note_reg_set (regno);
2481
2482 if (deps_init_id_data.where == DEPS_IN_RHS)
2483 deps_init_id_data.force_use_p = true;
2484
2485 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2486 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2487
2488 #ifdef STACK_REGS
2489 /* Make instructions that set stack registers to be ineligible for
2490 renaming to avoid issues with find_used_regs. */
2491 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2492 deps_init_id_data.force_use_p = true;
2493 #endif
2494 }
2495
2496 /* Note a clobber of REGNO. */
2497 static void
deps_init_id_note_reg_clobber(int regno)2498 deps_init_id_note_reg_clobber (int regno)
2499 {
2500 haifa_note_reg_clobber (regno);
2501
2502 if (deps_init_id_data.where == DEPS_IN_RHS)
2503 deps_init_id_data.force_use_p = true;
2504
2505 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2506 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2507 }
2508
2509 /* Note a use of REGNO. */
2510 static void
deps_init_id_note_reg_use(int regno)2511 deps_init_id_note_reg_use (int regno)
2512 {
2513 haifa_note_reg_use (regno);
2514
2515 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2516 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2517 }
2518
2519 /* Start initializing rhs data. */
2520 static void
deps_init_id_start_rhs(rtx rhs)2521 deps_init_id_start_rhs (rtx rhs)
2522 {
2523 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2524
2525 /* And there was no sel_deps_reset_to_insn (). */
2526 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2527 {
2528 IDATA_RHS (deps_init_id_data.id) = rhs;
2529 deps_init_id_data.where = DEPS_IN_RHS;
2530 }
2531 }
2532
2533 /* Finish initializing rhs data. */
2534 static void
deps_init_id_finish_rhs(void)2535 deps_init_id_finish_rhs (void)
2536 {
2537 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2538 || deps_init_id_data.where == DEPS_IN_INSN);
2539 deps_init_id_data.where = DEPS_IN_INSN;
2540 }
2541
2542 /* Finish initializing insn data. */
2543 static void
deps_init_id_finish_insn(void)2544 deps_init_id_finish_insn (void)
2545 {
2546 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2547
2548 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2549 {
2550 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2551 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2552
2553 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2554 || deps_init_id_data.force_use_p)
2555 {
2556 /* This should be a USE, as we don't want to schedule its RHS
2557 separately. However, we still want to have them recorded
2558 for the purposes of substitution. That's why we don't
2559 simply call downgrade_to_use () here. */
2560 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2561 gcc_assert (!lhs == !rhs);
2562
2563 IDATA_TYPE (deps_init_id_data.id) = USE;
2564 }
2565 }
2566
2567 deps_init_id_data.where = DEPS_IN_NOWHERE;
2568 }
2569
2570 /* This is dependence info used for initializing insn's data. */
2571 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2572
2573 /* This initializes most of the static part of the above structure. */
2574 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2575 {
2576 NULL,
2577
2578 deps_init_id_start_insn,
2579 deps_init_id_finish_insn,
2580 deps_init_id_start_lhs,
2581 deps_init_id_finish_lhs,
2582 deps_init_id_start_rhs,
2583 deps_init_id_finish_rhs,
2584 deps_init_id_note_reg_set,
2585 deps_init_id_note_reg_clobber,
2586 deps_init_id_note_reg_use,
2587 NULL, /* note_mem_dep */
2588 NULL, /* note_dep */
2589
2590 0, /* use_cselib */
2591 0, /* use_deps_list */
2592 0 /* generate_spec_deps */
2593 };
2594
2595 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2596 we don't actually need information about lhs and rhs. */
2597 static void
setup_id_lhs_rhs(idata_t id,insn_t insn,bool force_unique_p)2598 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2599 {
2600 rtx pat = PATTERN (insn);
2601
2602 if (NONJUMP_INSN_P (insn)
2603 && GET_CODE (pat) == SET
2604 && !force_unique_p)
2605 {
2606 IDATA_RHS (id) = SET_SRC (pat);
2607 IDATA_LHS (id) = SET_DEST (pat);
2608 }
2609 else
2610 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2611 }
2612
2613 /* Possibly downgrade INSN to USE. */
2614 static void
maybe_downgrade_id_to_use(idata_t id,insn_t insn)2615 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2616 {
2617 bool must_be_use = false;
2618 df_ref def;
2619 rtx lhs = IDATA_LHS (id);
2620 rtx rhs = IDATA_RHS (id);
2621
2622 /* We downgrade only SETs. */
2623 if (IDATA_TYPE (id) != SET)
2624 return;
2625
2626 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2627 {
2628 IDATA_TYPE (id) = USE;
2629 return;
2630 }
2631
2632 FOR_EACH_INSN_DEF (def, insn)
2633 {
2634 if (DF_REF_INSN (def)
2635 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2636 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2637 {
2638 must_be_use = true;
2639 break;
2640 }
2641
2642 #ifdef STACK_REGS
2643 /* Make instructions that set stack registers to be ineligible for
2644 renaming to avoid issues with find_used_regs. */
2645 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2646 {
2647 must_be_use = true;
2648 break;
2649 }
2650 #endif
2651 }
2652
2653 if (must_be_use)
2654 IDATA_TYPE (id) = USE;
2655 }
2656
2657 /* Setup implicit register clobbers calculated by sched-deps for INSN
2658 before reload and save them in ID. */
2659 static void
setup_id_implicit_regs(idata_t id,insn_t insn)2660 setup_id_implicit_regs (idata_t id, insn_t insn)
2661 {
2662 if (reload_completed)
2663 return;
2664
2665 HARD_REG_SET temp;
2666 unsigned regno;
2667 hard_reg_set_iterator hrsi;
2668
2669 get_implicit_reg_pending_clobbers (&temp, insn);
2670 EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
2671 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2672 }
2673
2674 /* Setup register sets describing INSN in ID. */
2675 static void
setup_id_reg_sets(idata_t id,insn_t insn)2676 setup_id_reg_sets (idata_t id, insn_t insn)
2677 {
2678 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2679 df_ref def, use;
2680 regset tmp = get_clear_regset_from_pool ();
2681
2682 FOR_EACH_INSN_INFO_DEF (def, insn_info)
2683 {
2684 unsigned int regno = DF_REF_REGNO (def);
2685
2686 /* Post modifies are treated like clobbers by sched-deps.c. */
2687 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2688 | DF_REF_PRE_POST_MODIFY)))
2689 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2690 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2691 {
2692 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2693
2694 #ifdef STACK_REGS
2695 /* For stack registers, treat writes to them as writes
2696 to the first one to be consistent with sched-deps.c. */
2697 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2698 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2699 #endif
2700 }
2701 /* Mark special refs that generate read/write def pair. */
2702 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2703 || regno == STACK_POINTER_REGNUM)
2704 bitmap_set_bit (tmp, regno);
2705 }
2706
2707 FOR_EACH_INSN_INFO_USE (use, insn_info)
2708 {
2709 unsigned int regno = DF_REF_REGNO (use);
2710
2711 /* When these refs are met for the first time, skip them, as
2712 these uses are just counterparts of some defs. */
2713 if (bitmap_bit_p (tmp, regno))
2714 bitmap_clear_bit (tmp, regno);
2715 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2716 {
2717 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2718
2719 #ifdef STACK_REGS
2720 /* For stack registers, treat reads from them as reads from
2721 the first one to be consistent with sched-deps.c. */
2722 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2723 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2724 #endif
2725 }
2726 }
2727
2728 /* Also get implicit reg clobbers from sched-deps. */
2729 setup_id_implicit_regs (id, insn);
2730
2731 return_regset_to_pool (tmp);
2732 }
2733
2734 /* Initialize instruction data for INSN in ID using DF's data. */
2735 static void
init_id_from_df(idata_t id,insn_t insn,bool force_unique_p)2736 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2737 {
2738 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2739
2740 setup_id_for_insn (id, insn, force_unique_p);
2741 setup_id_lhs_rhs (id, insn, force_unique_p);
2742
2743 if (INSN_NOP_P (insn))
2744 return;
2745
2746 maybe_downgrade_id_to_use (id, insn);
2747 setup_id_reg_sets (id, insn);
2748 }
2749
2750 /* Initialize instruction data for INSN in ID. */
2751 static void
deps_init_id(idata_t id,insn_t insn,bool force_unique_p)2752 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2753 {
2754 struct deps_desc _dc, *dc = &_dc;
2755
2756 deps_init_id_data.where = DEPS_IN_NOWHERE;
2757 deps_init_id_data.id = id;
2758 deps_init_id_data.force_unique_p = force_unique_p;
2759 deps_init_id_data.force_use_p = false;
2760
2761 init_deps (dc, false);
2762 memcpy (&deps_init_id_sched_deps_info,
2763 &const_deps_init_id_sched_deps_info,
2764 sizeof (deps_init_id_sched_deps_info));
2765 if (spec_info != NULL)
2766 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2767 sched_deps_info = &deps_init_id_sched_deps_info;
2768
2769 deps_analyze_insn (dc, insn);
2770 /* Implicit reg clobbers received from sched-deps separately. */
2771 setup_id_implicit_regs (id, insn);
2772
2773 free_deps (dc);
2774 deps_init_id_data.id = NULL;
2775 }
2776
2777
2778 struct sched_scan_info_def
2779 {
2780 /* This hook notifies scheduler frontend to extend its internal per basic
2781 block data structures. This hook should be called once before a series of
2782 calls to bb_init (). */
2783 void (*extend_bb) (void);
2784
2785 /* This hook makes scheduler frontend to initialize its internal data
2786 structures for the passed basic block. */
2787 void (*init_bb) (basic_block);
2788
2789 /* This hook notifies scheduler frontend to extend its internal per insn data
2790 structures. This hook should be called once before a series of calls to
2791 insn_init (). */
2792 void (*extend_insn) (void);
2793
2794 /* This hook makes scheduler frontend to initialize its internal data
2795 structures for the passed insn. */
2796 void (*init_insn) (insn_t);
2797 };
2798
2799 /* A driver function to add a set of basic blocks (BBS) to the
2800 scheduling region. */
2801 static void
sched_scan(const struct sched_scan_info_def * ssi,bb_vec_t bbs)2802 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2803 {
2804 unsigned i;
2805 basic_block bb;
2806
2807 if (ssi->extend_bb)
2808 ssi->extend_bb ();
2809
2810 if (ssi->init_bb)
2811 FOR_EACH_VEC_ELT (bbs, i, bb)
2812 ssi->init_bb (bb);
2813
2814 if (ssi->extend_insn)
2815 ssi->extend_insn ();
2816
2817 if (ssi->init_insn)
2818 FOR_EACH_VEC_ELT (bbs, i, bb)
2819 {
2820 rtx_insn *insn;
2821
2822 FOR_BB_INSNS (bb, insn)
2823 ssi->init_insn (insn);
2824 }
2825 }
2826
2827 /* Implement hooks for collecting fundamental insn properties like if insn is
2828 an ASM or is within a SCHED_GROUP. */
2829
2830 /* True when a "one-time init" data for INSN was already inited. */
2831 static bool
first_time_insn_init(insn_t insn)2832 first_time_insn_init (insn_t insn)
2833 {
2834 return INSN_LIVE (insn) == NULL;
2835 }
2836
2837 /* Hash an entry in a transformed_insns hashtable. */
2838 static hashval_t
hash_transformed_insns(const void * p)2839 hash_transformed_insns (const void *p)
2840 {
2841 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2842 }
2843
2844 /* Compare the entries in a transformed_insns hashtable. */
2845 static int
eq_transformed_insns(const void * p,const void * q)2846 eq_transformed_insns (const void *p, const void *q)
2847 {
2848 rtx_insn *i1 =
2849 VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2850 rtx_insn *i2 =
2851 VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2852
2853 if (INSN_UID (i1) == INSN_UID (i2))
2854 return 1;
2855 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2856 }
2857
2858 /* Free an entry in a transformed_insns hashtable. */
2859 static void
free_transformed_insns(void * p)2860 free_transformed_insns (void *p)
2861 {
2862 struct transformed_insns *pti = (struct transformed_insns *) p;
2863
2864 vinsn_detach (pti->vinsn_old);
2865 vinsn_detach (pti->vinsn_new);
2866 free (pti);
2867 }
2868
2869 /* Init the s_i_d data for INSN which should be inited just once, when
2870 we first see the insn. */
2871 static void
init_first_time_insn_data(insn_t insn)2872 init_first_time_insn_data (insn_t insn)
2873 {
2874 /* This should not be set if this is the first time we init data for
2875 insn. */
2876 gcc_assert (first_time_insn_init (insn));
2877
2878 /* These are needed for nops too. */
2879 INSN_LIVE (insn) = get_regset_from_pool ();
2880 INSN_LIVE_VALID_P (insn) = false;
2881
2882 if (!INSN_NOP_P (insn))
2883 {
2884 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2885 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2886 INSN_TRANSFORMED_INSNS (insn)
2887 = htab_create (16, hash_transformed_insns,
2888 eq_transformed_insns, free_transformed_insns);
2889 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2890 }
2891 }
2892
2893 /* Free almost all above data for INSN that is scheduled already.
2894 Used for extra-large basic blocks. */
2895 void
free_data_for_scheduled_insn(insn_t insn)2896 free_data_for_scheduled_insn (insn_t insn)
2897 {
2898 gcc_assert (! first_time_insn_init (insn));
2899
2900 if (! INSN_ANALYZED_DEPS (insn))
2901 return;
2902
2903 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2904 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2905 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2906
2907 /* This is allocated only for bookkeeping insns. */
2908 if (INSN_ORIGINATORS (insn))
2909 BITMAP_FREE (INSN_ORIGINATORS (insn));
2910 free_deps (&INSN_DEPS_CONTEXT (insn));
2911
2912 INSN_ANALYZED_DEPS (insn) = NULL;
2913
2914 /* Clear the readonly flag so we would ICE when trying to recalculate
2915 the deps context (as we believe that it should not happen). */
2916 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2917 }
2918
2919 /* Free the same data as above for INSN. */
2920 static void
free_first_time_insn_data(insn_t insn)2921 free_first_time_insn_data (insn_t insn)
2922 {
2923 gcc_assert (! first_time_insn_init (insn));
2924
2925 free_data_for_scheduled_insn (insn);
2926 return_regset_to_pool (INSN_LIVE (insn));
2927 INSN_LIVE (insn) = NULL;
2928 INSN_LIVE_VALID_P (insn) = false;
2929 }
2930
2931 /* Initialize region-scope data structures for basic blocks. */
2932 static void
init_global_and_expr_for_bb(basic_block bb)2933 init_global_and_expr_for_bb (basic_block bb)
2934 {
2935 if (sel_bb_empty_p (bb))
2936 return;
2937
2938 invalidate_av_set (bb);
2939 }
2940
2941 /* Data for global dependency analysis (to initialize CANT_MOVE and
2942 SCHED_GROUP_P). */
2943 static struct
2944 {
2945 /* Previous insn. */
2946 insn_t prev_insn;
2947 } init_global_data;
2948
2949 /* Determine if INSN is in the sched_group, is an asm or should not be
2950 cloned. After that initialize its expr. */
2951 static void
init_global_and_expr_for_insn(insn_t insn)2952 init_global_and_expr_for_insn (insn_t insn)
2953 {
2954 if (LABEL_P (insn))
2955 return;
2956
2957 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2958 {
2959 init_global_data.prev_insn = NULL;
2960 return;
2961 }
2962
2963 gcc_assert (INSN_P (insn));
2964
2965 if (SCHED_GROUP_P (insn))
2966 /* Setup a sched_group. */
2967 {
2968 insn_t prev_insn = init_global_data.prev_insn;
2969
2970 if (prev_insn)
2971 INSN_SCHED_NEXT (prev_insn) = insn;
2972
2973 init_global_data.prev_insn = insn;
2974 }
2975 else
2976 init_global_data.prev_insn = NULL;
2977
2978 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2979 || asm_noperands (PATTERN (insn)) >= 0)
2980 /* Mark INSN as an asm. */
2981 INSN_ASM_P (insn) = true;
2982
2983 {
2984 bool force_unique_p;
2985 ds_t spec_done_ds;
2986
2987 /* Certain instructions cannot be cloned, and frame related insns and
2988 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2989 their block. */
2990 if (prologue_epilogue_contains (insn))
2991 {
2992 if (RTX_FRAME_RELATED_P (insn))
2993 CANT_MOVE (insn) = 1;
2994 else
2995 {
2996 rtx note;
2997 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2998 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2999 && ((enum insn_note) INTVAL (XEXP (note, 0))
3000 == NOTE_INSN_EPILOGUE_BEG))
3001 {
3002 CANT_MOVE (insn) = 1;
3003 break;
3004 }
3005 }
3006 force_unique_p = true;
3007 }
3008 else
3009 if (CANT_MOVE (insn)
3010 || INSN_ASM_P (insn)
3011 || SCHED_GROUP_P (insn)
3012 || CALL_P (insn)
3013 /* Exception handling insns are always unique. */
3014 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
3015 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
3016 || control_flow_insn_p (insn)
3017 || volatile_insn_p (PATTERN (insn))
3018 || (targetm.cannot_copy_insn_p
3019 && targetm.cannot_copy_insn_p (insn)))
3020 force_unique_p = true;
3021 else
3022 force_unique_p = false;
3023
3024 if (targetm.sched.get_insn_spec_ds)
3025 {
3026 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3027 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3028 }
3029 else
3030 spec_done_ds = 0;
3031
3032 /* Initialize INSN's expr. */
3033 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3034 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3035 spec_done_ds, 0, 0, vNULL, true,
3036 false, false, false, CANT_MOVE (insn));
3037 }
3038
3039 init_first_time_insn_data (insn);
3040 }
3041
3042 /* Scan the region and initialize instruction data for basic blocks BBS. */
3043 void
sel_init_global_and_expr(bb_vec_t bbs)3044 sel_init_global_and_expr (bb_vec_t bbs)
3045 {
3046 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3047 const struct sched_scan_info_def ssi =
3048 {
3049 NULL, /* extend_bb */
3050 init_global_and_expr_for_bb, /* init_bb */
3051 extend_insn_data, /* extend_insn */
3052 init_global_and_expr_for_insn /* init_insn */
3053 };
3054
3055 sched_scan (&ssi, bbs);
3056 }
3057
3058 /* Finalize region-scope data structures for basic blocks. */
3059 static void
finish_global_and_expr_for_bb(basic_block bb)3060 finish_global_and_expr_for_bb (basic_block bb)
3061 {
3062 av_set_clear (&BB_AV_SET (bb));
3063 BB_AV_LEVEL (bb) = 0;
3064 }
3065
3066 /* Finalize INSN's data. */
3067 static void
finish_global_and_expr_insn(insn_t insn)3068 finish_global_and_expr_insn (insn_t insn)
3069 {
3070 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3071 return;
3072
3073 gcc_assert (INSN_P (insn));
3074
3075 if (INSN_LUID (insn) > 0)
3076 {
3077 free_first_time_insn_data (insn);
3078 INSN_WS_LEVEL (insn) = 0;
3079 CANT_MOVE (insn) = 0;
3080
3081 /* We can no longer assert this, as vinsns of this insn could be
3082 easily live in other insn's caches. This should be changed to
3083 a counter-like approach among all vinsns. */
3084 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3085 clear_expr (INSN_EXPR (insn));
3086 }
3087 }
3088
3089 /* Finalize per instruction data for the whole region. */
3090 void
sel_finish_global_and_expr(void)3091 sel_finish_global_and_expr (void)
3092 {
3093 {
3094 bb_vec_t bbs;
3095 int i;
3096
3097 bbs.create (current_nr_blocks);
3098
3099 for (i = 0; i < current_nr_blocks; i++)
3100 bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
3101
3102 /* Clear AV_SETs and INSN_EXPRs. */
3103 {
3104 const struct sched_scan_info_def ssi =
3105 {
3106 NULL, /* extend_bb */
3107 finish_global_and_expr_for_bb, /* init_bb */
3108 NULL, /* extend_insn */
3109 finish_global_and_expr_insn /* init_insn */
3110 };
3111
3112 sched_scan (&ssi, bbs);
3113 }
3114
3115 bbs.release ();
3116 }
3117
3118 finish_insns ();
3119 }
3120
3121
3122 /* In the below hooks, we merely calculate whether or not a dependence
3123 exists, and in what part of insn. However, we will need more data
3124 when we'll start caching dependence requests. */
3125
3126 /* Container to hold information for dependency analysis. */
3127 static struct
3128 {
3129 deps_t dc;
3130
3131 /* A variable to track which part of rtx we are scanning in
3132 sched-deps.c: sched_analyze_insn (). */
3133 deps_where_t where;
3134
3135 /* Current producer. */
3136 insn_t pro;
3137
3138 /* Current consumer. */
3139 vinsn_t con;
3140
3141 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3142 X is from { INSN, LHS, RHS }. */
3143 ds_t has_dep_p[DEPS_IN_NOWHERE];
3144 } has_dependence_data;
3145
3146 /* Start analyzing dependencies of INSN. */
3147 static void
has_dependence_start_insn(insn_t insn ATTRIBUTE_UNUSED)3148 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3149 {
3150 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3151
3152 has_dependence_data.where = DEPS_IN_INSN;
3153 }
3154
3155 /* Finish analyzing dependencies of an insn. */
3156 static void
has_dependence_finish_insn(void)3157 has_dependence_finish_insn (void)
3158 {
3159 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3160
3161 has_dependence_data.where = DEPS_IN_NOWHERE;
3162 }
3163
3164 /* Start analyzing dependencies of LHS. */
3165 static void
has_dependence_start_lhs(rtx lhs ATTRIBUTE_UNUSED)3166 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3167 {
3168 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3169
3170 if (VINSN_LHS (has_dependence_data.con) != NULL)
3171 has_dependence_data.where = DEPS_IN_LHS;
3172 }
3173
3174 /* Finish analyzing dependencies of an lhs. */
3175 static void
has_dependence_finish_lhs(void)3176 has_dependence_finish_lhs (void)
3177 {
3178 has_dependence_data.where = DEPS_IN_INSN;
3179 }
3180
3181 /* Start analyzing dependencies of RHS. */
3182 static void
has_dependence_start_rhs(rtx rhs ATTRIBUTE_UNUSED)3183 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3184 {
3185 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3186
3187 if (VINSN_RHS (has_dependence_data.con) != NULL)
3188 has_dependence_data.where = DEPS_IN_RHS;
3189 }
3190
3191 /* Start analyzing dependencies of an rhs. */
3192 static void
has_dependence_finish_rhs(void)3193 has_dependence_finish_rhs (void)
3194 {
3195 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3196 || has_dependence_data.where == DEPS_IN_INSN);
3197
3198 has_dependence_data.where = DEPS_IN_INSN;
3199 }
3200
3201 /* Note a set of REGNO. */
3202 static void
has_dependence_note_reg_set(int regno)3203 has_dependence_note_reg_set (int regno)
3204 {
3205 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3206
3207 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3208 VINSN_INSN_RTX
3209 (has_dependence_data.con)))
3210 {
3211 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3212
3213 if (reg_last->sets != NULL
3214 || reg_last->clobbers != NULL)
3215 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3216
3217 if (reg_last->uses || reg_last->implicit_sets)
3218 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3219 }
3220 }
3221
3222 /* Note a clobber of REGNO. */
3223 static void
has_dependence_note_reg_clobber(int regno)3224 has_dependence_note_reg_clobber (int regno)
3225 {
3226 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3227
3228 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3229 VINSN_INSN_RTX
3230 (has_dependence_data.con)))
3231 {
3232 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3233
3234 if (reg_last->sets)
3235 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3236
3237 if (reg_last->uses || reg_last->implicit_sets)
3238 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3239 }
3240 }
3241
3242 /* Note a use of REGNO. */
3243 static void
has_dependence_note_reg_use(int regno)3244 has_dependence_note_reg_use (int regno)
3245 {
3246 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3247
3248 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3249 VINSN_INSN_RTX
3250 (has_dependence_data.con)))
3251 {
3252 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3253
3254 if (reg_last->sets)
3255 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3256
3257 if (reg_last->clobbers || reg_last->implicit_sets)
3258 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3259
3260 /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3261 is actually a check insn. We need to do this for any register
3262 read-read dependency with the check unless we track properly
3263 all registers written by BE_IN_SPEC-speculated insns, as
3264 we don't have explicit dependence lists. See PR 53975. */
3265 if (reg_last->uses)
3266 {
3267 ds_t pro_spec_checked_ds;
3268
3269 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3270 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3271
3272 if (pro_spec_checked_ds != 0)
3273 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3274 NULL_RTX, NULL_RTX);
3275 }
3276 }
3277 }
3278
3279 /* Note a memory dependence. */
3280 static void
has_dependence_note_mem_dep(rtx mem ATTRIBUTE_UNUSED,rtx pending_mem ATTRIBUTE_UNUSED,insn_t pending_insn ATTRIBUTE_UNUSED,ds_t ds ATTRIBUTE_UNUSED)3281 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3282 rtx pending_mem ATTRIBUTE_UNUSED,
3283 insn_t pending_insn ATTRIBUTE_UNUSED,
3284 ds_t ds ATTRIBUTE_UNUSED)
3285 {
3286 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3287 VINSN_INSN_RTX (has_dependence_data.con)))
3288 {
3289 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3290
3291 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3292 }
3293 }
3294
3295 /* Note a dependence. */
3296 static void
has_dependence_note_dep(insn_t pro ATTRIBUTE_UNUSED,ds_t ds ATTRIBUTE_UNUSED)3297 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3298 ds_t ds ATTRIBUTE_UNUSED)
3299 {
3300 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3301 VINSN_INSN_RTX (has_dependence_data.con)))
3302 {
3303 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3304
3305 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3306 }
3307 }
3308
3309 /* Mark the insn as having a hard dependence that prevents speculation. */
3310 void
sel_mark_hard_insn(rtx insn)3311 sel_mark_hard_insn (rtx insn)
3312 {
3313 int i;
3314
3315 /* Only work when we're in has_dependence_p mode.
3316 ??? This is a hack, this should actually be a hook. */
3317 if (!has_dependence_data.dc || !has_dependence_data.pro)
3318 return;
3319
3320 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3321 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3322
3323 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3324 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3325 }
3326
3327 /* This structure holds the hooks for the dependency analysis used when
3328 actually processing dependencies in the scheduler. */
3329 static struct sched_deps_info_def has_dependence_sched_deps_info;
3330
3331 /* This initializes most of the fields of the above structure. */
3332 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3333 {
3334 NULL,
3335
3336 has_dependence_start_insn,
3337 has_dependence_finish_insn,
3338 has_dependence_start_lhs,
3339 has_dependence_finish_lhs,
3340 has_dependence_start_rhs,
3341 has_dependence_finish_rhs,
3342 has_dependence_note_reg_set,
3343 has_dependence_note_reg_clobber,
3344 has_dependence_note_reg_use,
3345 has_dependence_note_mem_dep,
3346 has_dependence_note_dep,
3347
3348 0, /* use_cselib */
3349 0, /* use_deps_list */
3350 0 /* generate_spec_deps */
3351 };
3352
3353 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3354 static void
setup_has_dependence_sched_deps_info(void)3355 setup_has_dependence_sched_deps_info (void)
3356 {
3357 memcpy (&has_dependence_sched_deps_info,
3358 &const_has_dependence_sched_deps_info,
3359 sizeof (has_dependence_sched_deps_info));
3360
3361 if (spec_info != NULL)
3362 has_dependence_sched_deps_info.generate_spec_deps = 1;
3363
3364 sched_deps_info = &has_dependence_sched_deps_info;
3365 }
3366
3367 /* Remove all dependences found and recorded in has_dependence_data array. */
3368 void
sel_clear_has_dependence(void)3369 sel_clear_has_dependence (void)
3370 {
3371 int i;
3372
3373 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3374 has_dependence_data.has_dep_p[i] = 0;
3375 }
3376
3377 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3378 to the dependence information array in HAS_DEP_PP. */
3379 ds_t
has_dependence_p(expr_t expr,insn_t pred,ds_t ** has_dep_pp)3380 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3381 {
3382 int i;
3383 ds_t ds;
3384 struct deps_desc *dc;
3385
3386 if (INSN_SIMPLEJUMP_P (pred))
3387 /* Unconditional jump is just a transfer of control flow.
3388 Ignore it. */
3389 return false;
3390
3391 dc = &INSN_DEPS_CONTEXT (pred);
3392
3393 /* We init this field lazily. */
3394 if (dc->reg_last == NULL)
3395 init_deps_reg_last (dc);
3396
3397 if (!dc->readonly)
3398 {
3399 has_dependence_data.pro = NULL;
3400 /* Initialize empty dep context with information about PRED. */
3401 advance_deps_context (dc, pred);
3402 dc->readonly = 1;
3403 }
3404
3405 has_dependence_data.where = DEPS_IN_NOWHERE;
3406 has_dependence_data.pro = pred;
3407 has_dependence_data.con = EXPR_VINSN (expr);
3408 has_dependence_data.dc = dc;
3409
3410 sel_clear_has_dependence ();
3411
3412 /* Now catch all dependencies that would be generated between PRED and
3413 INSN. */
3414 setup_has_dependence_sched_deps_info ();
3415 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3416 has_dependence_data.dc = NULL;
3417
3418 /* When a barrier was found, set DEPS_IN_INSN bits. */
3419 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3420 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3421 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3422 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3423
3424 /* Do not allow stores to memory to move through checks. Currently
3425 we don't move this to sched-deps.c as the check doesn't have
3426 obvious places to which this dependence can be attached.
3427 FIMXE: this should go to a hook. */
3428 if (EXPR_LHS (expr)
3429 && MEM_P (EXPR_LHS (expr))
3430 && sel_insn_is_speculation_check (pred))
3431 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3432
3433 *has_dep_pp = has_dependence_data.has_dep_p;
3434 ds = 0;
3435 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3436 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3437 NULL_RTX, NULL_RTX);
3438
3439 return ds;
3440 }
3441
3442
3443 /* Dependence hooks implementation that checks dependence latency constraints
3444 on the insns being scheduled. The entry point for these routines is
3445 tick_check_p predicate. */
3446
3447 static struct
3448 {
3449 /* An expr we are currently checking. */
3450 expr_t expr;
3451
3452 /* A minimal cycle for its scheduling. */
3453 int cycle;
3454
3455 /* Whether we have seen a true dependence while checking. */
3456 bool seen_true_dep_p;
3457 } tick_check_data;
3458
3459 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3460 on PRO with status DS and weight DW. */
3461 static void
tick_check_dep_with_dw(insn_t pro_insn,ds_t ds,dw_t dw)3462 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3463 {
3464 expr_t con_expr = tick_check_data.expr;
3465 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3466
3467 if (con_insn != pro_insn)
3468 {
3469 enum reg_note dt;
3470 int tick;
3471
3472 if (/* PROducer was removed from above due to pipelining. */
3473 !INSN_IN_STREAM_P (pro_insn)
3474 /* Or PROducer was originally on the next iteration regarding the
3475 CONsumer. */
3476 || (INSN_SCHED_TIMES (pro_insn)
3477 - EXPR_SCHED_TIMES (con_expr)) > 1)
3478 /* Don't count this dependence. */
3479 return;
3480
3481 dt = ds_to_dt (ds);
3482 if (dt == REG_DEP_TRUE)
3483 tick_check_data.seen_true_dep_p = true;
3484
3485 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3486
3487 {
3488 dep_def _dep, *dep = &_dep;
3489
3490 init_dep (dep, pro_insn, con_insn, dt);
3491
3492 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3493 }
3494
3495 /* When there are several kinds of dependencies between pro and con,
3496 only REG_DEP_TRUE should be taken into account. */
3497 if (tick > tick_check_data.cycle
3498 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3499 tick_check_data.cycle = tick;
3500 }
3501 }
3502
3503 /* An implementation of note_dep hook. */
3504 static void
tick_check_note_dep(insn_t pro,ds_t ds)3505 tick_check_note_dep (insn_t pro, ds_t ds)
3506 {
3507 tick_check_dep_with_dw (pro, ds, 0);
3508 }
3509
3510 /* An implementation of note_mem_dep hook. */
3511 static void
tick_check_note_mem_dep(rtx mem1,rtx mem2,insn_t pro,ds_t ds)3512 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3513 {
3514 dw_t dw;
3515
3516 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3517 ? estimate_dep_weak (mem1, mem2)
3518 : 0);
3519
3520 tick_check_dep_with_dw (pro, ds, dw);
3521 }
3522
3523 /* This structure contains hooks for dependence analysis used when determining
3524 whether an insn is ready for scheduling. */
3525 static struct sched_deps_info_def tick_check_sched_deps_info =
3526 {
3527 NULL,
3528
3529 NULL,
3530 NULL,
3531 NULL,
3532 NULL,
3533 NULL,
3534 NULL,
3535 haifa_note_reg_set,
3536 haifa_note_reg_clobber,
3537 haifa_note_reg_use,
3538 tick_check_note_mem_dep,
3539 tick_check_note_dep,
3540
3541 0, 0, 0
3542 };
3543
3544 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3545 scheduled. Return 0 if all data from producers in DC is ready. */
3546 int
tick_check_p(expr_t expr,deps_t dc,fence_t fence)3547 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3548 {
3549 int cycles_left;
3550 /* Initialize variables. */
3551 tick_check_data.expr = expr;
3552 tick_check_data.cycle = 0;
3553 tick_check_data.seen_true_dep_p = false;
3554 sched_deps_info = &tick_check_sched_deps_info;
3555
3556 gcc_assert (!dc->readonly);
3557 dc->readonly = 1;
3558 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3559 dc->readonly = 0;
3560
3561 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3562
3563 return cycles_left >= 0 ? cycles_left : 0;
3564 }
3565
3566
3567 /* Functions to work with insns. */
3568
3569 /* Returns true if LHS of INSN is the same as DEST of an insn
3570 being moved. */
3571 bool
lhs_of_insn_equals_to_dest_p(insn_t insn,rtx dest)3572 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3573 {
3574 rtx lhs = INSN_LHS (insn);
3575
3576 if (lhs == NULL || dest == NULL)
3577 return false;
3578
3579 return rtx_equal_p (lhs, dest);
3580 }
3581
3582 /* Return s_i_d entry of INSN. Callable from debugger. */
3583 sel_insn_data_def
insn_sid(insn_t insn)3584 insn_sid (insn_t insn)
3585 {
3586 return *SID (insn);
3587 }
3588
3589 /* True when INSN is a speculative check. We can tell this by looking
3590 at the data structures of the selective scheduler, not by examining
3591 the pattern. */
3592 bool
sel_insn_is_speculation_check(rtx insn)3593 sel_insn_is_speculation_check (rtx insn)
3594 {
3595 return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
3596 }
3597
3598 /* Extracts machine mode MODE and destination location DST_LOC
3599 for given INSN. */
3600 void
get_dest_and_mode(rtx insn,rtx * dst_loc,machine_mode * mode)3601 get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode)
3602 {
3603 rtx pat = PATTERN (insn);
3604
3605 gcc_assert (dst_loc);
3606 gcc_assert (GET_CODE (pat) == SET);
3607
3608 *dst_loc = SET_DEST (pat);
3609
3610 gcc_assert (*dst_loc);
3611 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3612
3613 if (mode)
3614 *mode = GET_MODE (*dst_loc);
3615 }
3616
3617 /* Returns true when moving through JUMP will result in bookkeeping
3618 creation. */
3619 bool
bookkeeping_can_be_created_if_moved_through_p(insn_t jump)3620 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3621 {
3622 insn_t succ;
3623 succ_iterator si;
3624
3625 FOR_EACH_SUCC (succ, si, jump)
3626 if (sel_num_cfg_preds_gt_1 (succ))
3627 return true;
3628
3629 return false;
3630 }
3631
3632 /* Return 'true' if INSN is the only one in its basic block. */
3633 static bool
insn_is_the_only_one_in_bb_p(insn_t insn)3634 insn_is_the_only_one_in_bb_p (insn_t insn)
3635 {
3636 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3637 }
3638
3639 /* Check that the region we're scheduling still has at most one
3640 backedge. */
3641 static void
verify_backedges(void)3642 verify_backedges (void)
3643 {
3644 if (pipelining_p)
3645 {
3646 int i, n = 0;
3647 edge e;
3648 edge_iterator ei;
3649
3650 for (i = 0; i < current_nr_blocks; i++)
3651 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
3652 if (in_current_region_p (e->dest)
3653 && BLOCK_TO_BB (e->dest->index) < i)
3654 n++;
3655
3656 gcc_assert (n <= 1);
3657 }
3658 }
3659
3660
3661 /* Functions to work with control flow. */
3662
3663 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3664 are sorted in topological order (it might have been invalidated by
3665 redirecting an edge). */
3666 static void
sel_recompute_toporder(void)3667 sel_recompute_toporder (void)
3668 {
3669 int i, n, rgn;
3670 int *postorder, n_blocks;
3671
3672 postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
3673 n_blocks = post_order_compute (postorder, false, false);
3674
3675 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3676 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3677 if (CONTAINING_RGN (postorder[i]) == rgn)
3678 {
3679 BLOCK_TO_BB (postorder[i]) = n;
3680 BB_TO_BLOCK (n) = postorder[i];
3681 n++;
3682 }
3683
3684 /* Assert that we updated info for all blocks. We may miss some blocks if
3685 this function is called when redirecting an edge made a block
3686 unreachable, but that block is not deleted yet. */
3687 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3688 }
3689
3690 /* Tidy the possibly empty block BB. */
3691 static bool
maybe_tidy_empty_bb(basic_block bb)3692 maybe_tidy_empty_bb (basic_block bb)
3693 {
3694 basic_block succ_bb, pred_bb, note_bb;
3695 vec<basic_block> dom_bbs;
3696 edge e;
3697 edge_iterator ei;
3698 bool rescan_p;
3699
3700 /* Keep empty bb only if this block immediately precedes EXIT and
3701 has incoming non-fallthrough edge, or it has no predecessors or
3702 successors. Otherwise remove it. */
3703 if (!sel_bb_empty_p (bb)
3704 || (single_succ_p (bb)
3705 && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
3706 && (!single_pred_p (bb)
3707 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3708 || EDGE_COUNT (bb->preds) == 0
3709 || EDGE_COUNT (bb->succs) == 0)
3710 return false;
3711
3712 /* Do not attempt to redirect complex edges. */
3713 FOR_EACH_EDGE (e, ei, bb->preds)
3714 if (e->flags & EDGE_COMPLEX)
3715 return false;
3716 else if (e->flags & EDGE_FALLTHRU)
3717 {
3718 rtx note;
3719 /* If prev bb ends with asm goto, see if any of the
3720 ASM_OPERANDS_LABELs don't point to the fallthru
3721 label. Do not attempt to redirect it in that case. */
3722 if (JUMP_P (BB_END (e->src))
3723 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3724 {
3725 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3726
3727 for (i = 0; i < n; ++i)
3728 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3729 return false;
3730 }
3731 }
3732
3733 free_data_sets (bb);
3734
3735 /* Do not delete BB if it has more than one successor.
3736 That can occur when we moving a jump. */
3737 if (!single_succ_p (bb))
3738 {
3739 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3740 sel_merge_blocks (bb->prev_bb, bb);
3741 return true;
3742 }
3743
3744 succ_bb = single_succ (bb);
3745 rescan_p = true;
3746 pred_bb = NULL;
3747 dom_bbs.create (0);
3748
3749 /* Save a pred/succ from the current region to attach the notes to. */
3750 note_bb = NULL;
3751 FOR_EACH_EDGE (e, ei, bb->preds)
3752 if (in_current_region_p (e->src))
3753 {
3754 note_bb = e->src;
3755 break;
3756 }
3757 if (note_bb == NULL)
3758 note_bb = succ_bb;
3759
3760 /* Redirect all non-fallthru edges to the next bb. */
3761 while (rescan_p)
3762 {
3763 rescan_p = false;
3764
3765 FOR_EACH_EDGE (e, ei, bb->preds)
3766 {
3767 pred_bb = e->src;
3768
3769 if (!(e->flags & EDGE_FALLTHRU))
3770 {
3771 /* We can not invalidate computed topological order by moving
3772 the edge destination block (E->SUCC) along a fallthru edge.
3773
3774 We will update dominators here only when we'll get
3775 an unreachable block when redirecting, otherwise
3776 sel_redirect_edge_and_branch will take care of it. */
3777 if (e->dest != bb
3778 && single_pred_p (e->dest))
3779 dom_bbs.safe_push (e->dest);
3780 sel_redirect_edge_and_branch (e, succ_bb);
3781 rescan_p = true;
3782 break;
3783 }
3784 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3785 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3786 still have to adjust it. */
3787 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3788 {
3789 /* If possible, try to remove the unneeded conditional jump. */
3790 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3791 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3792 {
3793 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3794 tidy_fallthru_edge (e);
3795 }
3796 else
3797 sel_redirect_edge_and_branch (e, succ_bb);
3798 rescan_p = true;
3799 break;
3800 }
3801 }
3802 }
3803
3804 if (can_merge_blocks_p (bb->prev_bb, bb))
3805 sel_merge_blocks (bb->prev_bb, bb);
3806 else
3807 {
3808 /* This is a block without fallthru predecessor. Just delete it. */
3809 gcc_assert (note_bb);
3810 move_bb_info (note_bb, bb);
3811 remove_empty_bb (bb, true);
3812 }
3813
3814 if (!dom_bbs.is_empty ())
3815 {
3816 dom_bbs.safe_push (succ_bb);
3817 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3818 dom_bbs.release ();
3819 }
3820
3821 return true;
3822 }
3823
3824 /* Tidy the control flow after we have removed original insn from
3825 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3826 is true, also try to optimize control flow on non-empty blocks. */
3827 bool
tidy_control_flow(basic_block xbb,bool full_tidying)3828 tidy_control_flow (basic_block xbb, bool full_tidying)
3829 {
3830 bool changed = true;
3831 insn_t first, last;
3832
3833 /* First check whether XBB is empty. */
3834 changed = maybe_tidy_empty_bb (xbb);
3835 if (changed || !full_tidying)
3836 return changed;
3837
3838 /* Check if there is a unnecessary jump after insn left. */
3839 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3840 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3841 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3842 {
3843 if (sel_remove_insn (BB_END (xbb), false, false))
3844 return true;
3845 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3846 }
3847
3848 first = sel_bb_head (xbb);
3849 last = sel_bb_end (xbb);
3850 if (MAY_HAVE_DEBUG_INSNS)
3851 {
3852 if (first != last && DEBUG_INSN_P (first))
3853 do
3854 first = NEXT_INSN (first);
3855 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3856
3857 if (first != last && DEBUG_INSN_P (last))
3858 do
3859 last = PREV_INSN (last);
3860 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3861 }
3862 /* Check if there is an unnecessary jump in previous basic block leading
3863 to next basic block left after removing INSN from stream.
3864 If it is so, remove that jump and redirect edge to current
3865 basic block (where there was INSN before deletion). This way
3866 when NOP will be deleted several instructions later with its
3867 basic block we will not get a jump to next instruction, which
3868 can be harmful. */
3869 if (first == last
3870 && !sel_bb_empty_p (xbb)
3871 && INSN_NOP_P (last)
3872 /* Flow goes fallthru from current block to the next. */
3873 && EDGE_COUNT (xbb->succs) == 1
3874 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3875 /* When successor is an EXIT block, it may not be the next block. */
3876 && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
3877 /* And unconditional jump in previous basic block leads to
3878 next basic block of XBB and this jump can be safely removed. */
3879 && in_current_region_p (xbb->prev_bb)
3880 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3881 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3882 /* Also this jump is not at the scheduling boundary. */
3883 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3884 {
3885 bool recompute_toporder_p;
3886 /* Clear data structures of jump - jump itself will be removed
3887 by sel_redirect_edge_and_branch. */
3888 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3889 recompute_toporder_p
3890 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3891
3892 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3893
3894 /* It can turn out that after removing unused jump, basic block
3895 that contained that jump, becomes empty too. In such case
3896 remove it too. */
3897 if (sel_bb_empty_p (xbb->prev_bb))
3898 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3899 if (recompute_toporder_p)
3900 sel_recompute_toporder ();
3901 }
3902
3903 /* TODO: use separate flag for CFG checking. */
3904 if (flag_checking)
3905 {
3906 verify_backedges ();
3907 verify_dominators (CDI_DOMINATORS);
3908 }
3909
3910 return changed;
3911 }
3912
3913 /* Purge meaningless empty blocks in the middle of a region. */
3914 void
purge_empty_blocks(void)3915 purge_empty_blocks (void)
3916 {
3917 int i;
3918
3919 /* Do not attempt to delete the first basic block in the region. */
3920 for (i = 1; i < current_nr_blocks; )
3921 {
3922 basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
3923
3924 if (maybe_tidy_empty_bb (b))
3925 continue;
3926
3927 i++;
3928 }
3929 }
3930
3931 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3932 do not delete insn's data, because it will be later re-emitted.
3933 Return true if we have removed some blocks afterwards. */
3934 bool
sel_remove_insn(insn_t insn,bool only_disconnect,bool full_tidying)3935 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3936 {
3937 basic_block bb = BLOCK_FOR_INSN (insn);
3938
3939 gcc_assert (INSN_IN_STREAM_P (insn));
3940
3941 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3942 {
3943 expr_t expr;
3944 av_set_iterator i;
3945
3946 /* When we remove a debug insn that is head of a BB, it remains
3947 in the AV_SET of the block, but it shouldn't. */
3948 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3949 if (EXPR_INSN_RTX (expr) == insn)
3950 {
3951 av_set_iter_remove (&i);
3952 break;
3953 }
3954 }
3955
3956 if (only_disconnect)
3957 remove_insn (insn);
3958 else
3959 {
3960 delete_insn (insn);
3961 clear_expr (INSN_EXPR (insn));
3962 }
3963
3964 /* It is necessary to NULL these fields in case we are going to re-insert
3965 INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT
3966 case, but also for NOPs that we will return to the nop pool. */
3967 SET_PREV_INSN (insn) = NULL_RTX;
3968 SET_NEXT_INSN (insn) = NULL_RTX;
3969 set_block_for_insn (insn, NULL);
3970
3971 return tidy_control_flow (bb, full_tidying);
3972 }
3973
3974 /* Estimate number of the insns in BB. */
3975 static int
sel_estimate_number_of_insns(basic_block bb)3976 sel_estimate_number_of_insns (basic_block bb)
3977 {
3978 int res = 0;
3979 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3980
3981 for (; insn != next_tail; insn = NEXT_INSN (insn))
3982 if (NONDEBUG_INSN_P (insn))
3983 res++;
3984
3985 return res;
3986 }
3987
3988 /* We don't need separate luids for notes or labels. */
3989 static int
sel_luid_for_non_insn(rtx x)3990 sel_luid_for_non_insn (rtx x)
3991 {
3992 gcc_assert (NOTE_P (x) || LABEL_P (x));
3993
3994 return -1;
3995 }
3996
3997 /* Find the proper seqno for inserting at INSN by successors.
3998 Return -1 if no successors with positive seqno exist. */
3999 static int
get_seqno_by_succs(rtx_insn * insn)4000 get_seqno_by_succs (rtx_insn *insn)
4001 {
4002 basic_block bb = BLOCK_FOR_INSN (insn);
4003 rtx_insn *tmp = insn, *end = BB_END (bb);
4004 int seqno;
4005 insn_t succ = NULL;
4006 succ_iterator si;
4007
4008 while (tmp != end)
4009 {
4010 tmp = NEXT_INSN (tmp);
4011 if (INSN_P (tmp))
4012 return INSN_SEQNO (tmp);
4013 }
4014
4015 seqno = INT_MAX;
4016
4017 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4018 if (INSN_SEQNO (succ) > 0)
4019 seqno = MIN (seqno, INSN_SEQNO (succ));
4020
4021 if (seqno == INT_MAX)
4022 return -1;
4023
4024 return seqno;
4025 }
4026
4027 /* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute
4028 seqno in corner cases. */
4029 static int
get_seqno_for_a_jump(insn_t insn,int old_seqno)4030 get_seqno_for_a_jump (insn_t insn, int old_seqno)
4031 {
4032 int seqno;
4033
4034 gcc_assert (INSN_SIMPLEJUMP_P (insn));
4035
4036 if (!sel_bb_head_p (insn))
4037 seqno = INSN_SEQNO (PREV_INSN (insn));
4038 else
4039 {
4040 basic_block bb = BLOCK_FOR_INSN (insn);
4041
4042 if (single_pred_p (bb)
4043 && !in_current_region_p (single_pred (bb)))
4044 {
4045 /* We can have preds outside a region when splitting edges
4046 for pipelining of an outer loop. Use succ instead.
4047 There should be only one of them. */
4048 insn_t succ = NULL;
4049 succ_iterator si;
4050 bool first = true;
4051
4052 gcc_assert (flag_sel_sched_pipelining_outer_loops
4053 && current_loop_nest);
4054 FOR_EACH_SUCC_1 (succ, si, insn,
4055 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4056 {
4057 gcc_assert (first);
4058 first = false;
4059 }
4060
4061 gcc_assert (succ != NULL);
4062 seqno = INSN_SEQNO (succ);
4063 }
4064 else
4065 {
4066 insn_t *preds;
4067 int n;
4068
4069 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4070
4071 gcc_assert (n > 0);
4072 /* For one predecessor, use simple method. */
4073 if (n == 1)
4074 seqno = INSN_SEQNO (preds[0]);
4075 else
4076 seqno = get_seqno_by_preds (insn);
4077
4078 free (preds);
4079 }
4080 }
4081
4082 /* We were unable to find a good seqno among preds. */
4083 if (seqno < 0)
4084 seqno = get_seqno_by_succs (insn);
4085
4086 if (seqno < 0)
4087 {
4088 /* The only case where this could be here legally is that the only
4089 unscheduled insn was a conditional jump that got removed and turned
4090 into this unconditional one. Initialize from the old seqno
4091 of that jump passed down to here. */
4092 seqno = old_seqno;
4093 }
4094
4095 gcc_assert (seqno >= 0);
4096 return seqno;
4097 }
4098
4099 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4100 with positive seqno exist. */
4101 int
get_seqno_by_preds(rtx_insn * insn)4102 get_seqno_by_preds (rtx_insn *insn)
4103 {
4104 basic_block bb = BLOCK_FOR_INSN (insn);
4105 rtx_insn *tmp = insn, *head = BB_HEAD (bb);
4106 insn_t *preds;
4107 int n, i, seqno;
4108
4109 /* Loop backwards from INSN to HEAD including both. */
4110 while (1)
4111 {
4112 if (INSN_P (tmp))
4113 return INSN_SEQNO (tmp);
4114 if (tmp == head)
4115 break;
4116 tmp = PREV_INSN (tmp);
4117 }
4118
4119 cfg_preds (bb, &preds, &n);
4120 for (i = 0, seqno = -1; i < n; i++)
4121 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4122
4123 return seqno;
4124 }
4125
4126
4127
4128 /* Extend pass-scope data structures for basic blocks. */
4129 void
sel_extend_global_bb_info(void)4130 sel_extend_global_bb_info (void)
4131 {
4132 sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4133 }
4134
4135 /* Extend region-scope data structures for basic blocks. */
4136 static void
extend_region_bb_info(void)4137 extend_region_bb_info (void)
4138 {
4139 sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4140 }
4141
4142 /* Extend all data structures to fit for all basic blocks. */
4143 static void
extend_bb_info(void)4144 extend_bb_info (void)
4145 {
4146 sel_extend_global_bb_info ();
4147 extend_region_bb_info ();
4148 }
4149
4150 /* Finalize pass-scope data structures for basic blocks. */
4151 void
sel_finish_global_bb_info(void)4152 sel_finish_global_bb_info (void)
4153 {
4154 sel_global_bb_info.release ();
4155 }
4156
4157 /* Finalize region-scope data structures for basic blocks. */
4158 static void
finish_region_bb_info(void)4159 finish_region_bb_info (void)
4160 {
4161 sel_region_bb_info.release ();
4162 }
4163
4164
4165 /* Data for each insn in current region. */
4166 vec<sel_insn_data_def> s_i_d = vNULL;
4167
4168 /* Extend data structures for insns from current region. */
4169 static void
extend_insn_data(void)4170 extend_insn_data (void)
4171 {
4172 int reserve;
4173
4174 sched_extend_target ();
4175 sched_deps_init (false);
4176
4177 /* Extend data structures for insns from current region. */
4178 reserve = (sched_max_luid + 1 - s_i_d.length ());
4179 if (reserve > 0 && ! s_i_d.space (reserve))
4180 {
4181 int size;
4182
4183 if (sched_max_luid / 2 > 1024)
4184 size = sched_max_luid + 1024;
4185 else
4186 size = 3 * sched_max_luid / 2;
4187
4188
4189 s_i_d.safe_grow_cleared (size);
4190 }
4191 }
4192
4193 /* Finalize data structures for insns from current region. */
4194 static void
finish_insns(void)4195 finish_insns (void)
4196 {
4197 unsigned i;
4198
4199 /* Clear here all dependence contexts that may have left from insns that were
4200 removed during the scheduling. */
4201 for (i = 0; i < s_i_d.length (); i++)
4202 {
4203 sel_insn_data_def *sid_entry = &s_i_d[i];
4204
4205 if (sid_entry->live)
4206 return_regset_to_pool (sid_entry->live);
4207 if (sid_entry->analyzed_deps)
4208 {
4209 BITMAP_FREE (sid_entry->analyzed_deps);
4210 BITMAP_FREE (sid_entry->found_deps);
4211 htab_delete (sid_entry->transformed_insns);
4212 free_deps (&sid_entry->deps_context);
4213 }
4214 if (EXPR_VINSN (&sid_entry->expr))
4215 {
4216 clear_expr (&sid_entry->expr);
4217
4218 /* Also, clear CANT_MOVE bit here, because we really don't want it
4219 to be passed to the next region. */
4220 CANT_MOVE_BY_LUID (i) = 0;
4221 }
4222 }
4223
4224 s_i_d.release ();
4225 }
4226
4227 /* A proxy to pass initialization data to init_insn (). */
4228 static sel_insn_data_def _insn_init_ssid;
4229 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4230
4231 /* If true create a new vinsn. Otherwise use the one from EXPR. */
4232 static bool insn_init_create_new_vinsn_p;
4233
4234 /* Set all necessary data for initialization of the new insn[s]. */
4235 static expr_t
set_insn_init(expr_t expr,vinsn_t vi,int seqno)4236 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4237 {
4238 expr_t x = &insn_init_ssid->expr;
4239
4240 copy_expr_onside (x, expr);
4241 if (vi != NULL)
4242 {
4243 insn_init_create_new_vinsn_p = false;
4244 change_vinsn_in_expr (x, vi);
4245 }
4246 else
4247 insn_init_create_new_vinsn_p = true;
4248
4249 insn_init_ssid->seqno = seqno;
4250 return x;
4251 }
4252
4253 /* Init data for INSN. */
4254 static void
init_insn_data(insn_t insn)4255 init_insn_data (insn_t insn)
4256 {
4257 expr_t expr;
4258 sel_insn_data_t ssid = insn_init_ssid;
4259
4260 /* The fields mentioned below are special and hence are not being
4261 propagated to the new insns. */
4262 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4263 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4264 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4265
4266 expr = INSN_EXPR (insn);
4267 copy_expr (expr, &ssid->expr);
4268 prepare_insn_expr (insn, ssid->seqno);
4269
4270 if (insn_init_create_new_vinsn_p)
4271 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4272
4273 if (first_time_insn_init (insn))
4274 init_first_time_insn_data (insn);
4275 }
4276
4277 /* This is used to initialize spurious jumps generated by
4278 sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos
4279 in corner cases within get_seqno_for_a_jump. */
4280 static void
init_simplejump_data(insn_t insn,int old_seqno)4281 init_simplejump_data (insn_t insn, int old_seqno)
4282 {
4283 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4284 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
4285 vNULL, true, false, false,
4286 false, true);
4287 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno);
4288 init_first_time_insn_data (insn);
4289 }
4290
4291 /* Perform deferred initialization of insns. This is used to process
4292 a new jump that may be created by redirect_edge. OLD_SEQNO is used
4293 for initializing simplejumps in init_simplejump_data. */
4294 static void
sel_init_new_insn(insn_t insn,int flags,int old_seqno)4295 sel_init_new_insn (insn_t insn, int flags, int old_seqno)
4296 {
4297 /* We create data structures for bb when the first insn is emitted in it. */
4298 if (INSN_P (insn)
4299 && INSN_IN_STREAM_P (insn)
4300 && insn_is_the_only_one_in_bb_p (insn))
4301 {
4302 extend_bb_info ();
4303 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4304 }
4305
4306 if (flags & INSN_INIT_TODO_LUID)
4307 {
4308 sched_extend_luids ();
4309 sched_init_insn_luid (insn);
4310 }
4311
4312 if (flags & INSN_INIT_TODO_SSID)
4313 {
4314 extend_insn_data ();
4315 init_insn_data (insn);
4316 clear_expr (&insn_init_ssid->expr);
4317 }
4318
4319 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4320 {
4321 extend_insn_data ();
4322 init_simplejump_data (insn, old_seqno);
4323 }
4324
4325 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4326 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4327 }
4328
4329
4330 /* Functions to init/finish work with lv sets. */
4331
4332 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4333 static void
init_lv_set(basic_block bb)4334 init_lv_set (basic_block bb)
4335 {
4336 gcc_assert (!BB_LV_SET_VALID_P (bb));
4337
4338 BB_LV_SET (bb) = get_regset_from_pool ();
4339 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4340 BB_LV_SET_VALID_P (bb) = true;
4341 }
4342
4343 /* Copy liveness information to BB from FROM_BB. */
4344 static void
copy_lv_set_from(basic_block bb,basic_block from_bb)4345 copy_lv_set_from (basic_block bb, basic_block from_bb)
4346 {
4347 gcc_assert (!BB_LV_SET_VALID_P (bb));
4348
4349 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4350 BB_LV_SET_VALID_P (bb) = true;
4351 }
4352
4353 /* Initialize lv set of all bb headers. */
4354 void
init_lv_sets(void)4355 init_lv_sets (void)
4356 {
4357 basic_block bb;
4358
4359 /* Initialize of LV sets. */
4360 FOR_EACH_BB_FN (bb, cfun)
4361 init_lv_set (bb);
4362
4363 /* Don't forget EXIT_BLOCK. */
4364 init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4365 }
4366
4367 /* Release lv set of HEAD. */
4368 static void
free_lv_set(basic_block bb)4369 free_lv_set (basic_block bb)
4370 {
4371 gcc_assert (BB_LV_SET (bb) != NULL);
4372
4373 return_regset_to_pool (BB_LV_SET (bb));
4374 BB_LV_SET (bb) = NULL;
4375 BB_LV_SET_VALID_P (bb) = false;
4376 }
4377
4378 /* Finalize lv sets of all bb headers. */
4379 void
free_lv_sets(void)4380 free_lv_sets (void)
4381 {
4382 basic_block bb;
4383
4384 /* Don't forget EXIT_BLOCK. */
4385 free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4386
4387 /* Free LV sets. */
4388 FOR_EACH_BB_FN (bb, cfun)
4389 if (BB_LV_SET (bb))
4390 free_lv_set (bb);
4391 }
4392
4393 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4394 compute_av() processes BB. This function is called when creating new basic
4395 blocks, as well as for blocks (either new or existing) where new jumps are
4396 created when the control flow is being updated. */
4397 static void
invalidate_av_set(basic_block bb)4398 invalidate_av_set (basic_block bb)
4399 {
4400 BB_AV_LEVEL (bb) = -1;
4401 }
4402
4403 /* Create initial data sets for BB (they will be invalid). */
4404 static void
create_initial_data_sets(basic_block bb)4405 create_initial_data_sets (basic_block bb)
4406 {
4407 if (BB_LV_SET (bb))
4408 BB_LV_SET_VALID_P (bb) = false;
4409 else
4410 BB_LV_SET (bb) = get_regset_from_pool ();
4411 invalidate_av_set (bb);
4412 }
4413
4414 /* Free av set of BB. */
4415 static void
free_av_set(basic_block bb)4416 free_av_set (basic_block bb)
4417 {
4418 av_set_clear (&BB_AV_SET (bb));
4419 BB_AV_LEVEL (bb) = 0;
4420 }
4421
4422 /* Free data sets of BB. */
4423 void
free_data_sets(basic_block bb)4424 free_data_sets (basic_block bb)
4425 {
4426 free_lv_set (bb);
4427 free_av_set (bb);
4428 }
4429
4430 /* Exchange data sets of TO and FROM. */
4431 void
exchange_data_sets(basic_block to,basic_block from)4432 exchange_data_sets (basic_block to, basic_block from)
4433 {
4434 /* Exchange lv sets of TO and FROM. */
4435 std::swap (BB_LV_SET (from), BB_LV_SET (to));
4436 std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to));
4437
4438 /* Exchange av sets of TO and FROM. */
4439 std::swap (BB_AV_SET (from), BB_AV_SET (to));
4440 std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to));
4441 }
4442
4443 /* Copy data sets of FROM to TO. */
4444 void
copy_data_sets(basic_block to,basic_block from)4445 copy_data_sets (basic_block to, basic_block from)
4446 {
4447 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4448 gcc_assert (BB_AV_SET (to) == NULL);
4449
4450 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4451 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4452
4453 if (BB_AV_SET_VALID_P (from))
4454 {
4455 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4456 }
4457 if (BB_LV_SET_VALID_P (from))
4458 {
4459 gcc_assert (BB_LV_SET (to) != NULL);
4460 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4461 }
4462 }
4463
4464 /* Return an av set for INSN, if any. */
4465 av_set_t
get_av_set(insn_t insn)4466 get_av_set (insn_t insn)
4467 {
4468 av_set_t av_set;
4469
4470 gcc_assert (AV_SET_VALID_P (insn));
4471
4472 if (sel_bb_head_p (insn))
4473 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4474 else
4475 av_set = NULL;
4476
4477 return av_set;
4478 }
4479
4480 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4481 int
get_av_level(insn_t insn)4482 get_av_level (insn_t insn)
4483 {
4484 int av_level;
4485
4486 gcc_assert (INSN_P (insn));
4487
4488 if (sel_bb_head_p (insn))
4489 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4490 else
4491 av_level = INSN_WS_LEVEL (insn);
4492
4493 return av_level;
4494 }
4495
4496
4497
4498 /* Variables to work with control-flow graph. */
4499
4500 /* The basic block that already has been processed by the sched_data_update (),
4501 but hasn't been in sel_add_bb () yet. */
4502 static vec<basic_block>
4503 last_added_blocks = vNULL;
4504
4505 /* A pool for allocating successor infos. */
4506 static struct
4507 {
4508 /* A stack for saving succs_info structures. */
4509 struct succs_info *stack;
4510
4511 /* Its size. */
4512 int size;
4513
4514 /* Top of the stack. */
4515 int top;
4516
4517 /* Maximal value of the top. */
4518 int max_top;
4519 } succs_info_pool;
4520
4521 /* Functions to work with control-flow graph. */
4522
4523 /* Return basic block note of BB. */
4524 rtx_insn *
sel_bb_head(basic_block bb)4525 sel_bb_head (basic_block bb)
4526 {
4527 rtx_insn *head;
4528
4529 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4530 {
4531 gcc_assert (exit_insn != NULL_RTX);
4532 head = exit_insn;
4533 }
4534 else
4535 {
4536 rtx_note *note = bb_note (bb);
4537 head = next_nonnote_insn (note);
4538
4539 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4540 head = NULL;
4541 }
4542
4543 return head;
4544 }
4545
4546 /* Return true if INSN is a basic block header. */
4547 bool
sel_bb_head_p(insn_t insn)4548 sel_bb_head_p (insn_t insn)
4549 {
4550 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4551 }
4552
4553 /* Return last insn of BB. */
4554 rtx_insn *
sel_bb_end(basic_block bb)4555 sel_bb_end (basic_block bb)
4556 {
4557 if (sel_bb_empty_p (bb))
4558 return NULL;
4559
4560 gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
4561
4562 return BB_END (bb);
4563 }
4564
4565 /* Return true if INSN is the last insn in its basic block. */
4566 bool
sel_bb_end_p(insn_t insn)4567 sel_bb_end_p (insn_t insn)
4568 {
4569 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4570 }
4571
4572 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4573 bool
sel_bb_empty_p(basic_block bb)4574 sel_bb_empty_p (basic_block bb)
4575 {
4576 return sel_bb_head (bb) == NULL;
4577 }
4578
4579 /* True when BB belongs to the current scheduling region. */
4580 bool
in_current_region_p(basic_block bb)4581 in_current_region_p (basic_block bb)
4582 {
4583 if (bb->index < NUM_FIXED_BLOCKS)
4584 return false;
4585
4586 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4587 }
4588
4589 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4590 basic_block
fallthru_bb_of_jump(const rtx_insn * jump)4591 fallthru_bb_of_jump (const rtx_insn *jump)
4592 {
4593 if (!JUMP_P (jump))
4594 return NULL;
4595
4596 if (!any_condjump_p (jump))
4597 return NULL;
4598
4599 /* A basic block that ends with a conditional jump may still have one successor
4600 (and be followed by a barrier), we are not interested. */
4601 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4602 return NULL;
4603
4604 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4605 }
4606
4607 /* Remove all notes from BB. */
4608 static void
init_bb(basic_block bb)4609 init_bb (basic_block bb)
4610 {
4611 remove_notes (bb_note (bb), BB_END (bb));
4612 BB_NOTE_LIST (bb) = note_list;
4613 }
4614
4615 void
sel_init_bbs(bb_vec_t bbs)4616 sel_init_bbs (bb_vec_t bbs)
4617 {
4618 const struct sched_scan_info_def ssi =
4619 {
4620 extend_bb_info, /* extend_bb */
4621 init_bb, /* init_bb */
4622 NULL, /* extend_insn */
4623 NULL /* init_insn */
4624 };
4625
4626 sched_scan (&ssi, bbs);
4627 }
4628
4629 /* Restore notes for the whole region. */
4630 static void
sel_restore_notes(void)4631 sel_restore_notes (void)
4632 {
4633 int bb;
4634 insn_t insn;
4635
4636 for (bb = 0; bb < current_nr_blocks; bb++)
4637 {
4638 basic_block first, last;
4639
4640 first = EBB_FIRST_BB (bb);
4641 last = EBB_LAST_BB (bb)->next_bb;
4642
4643 do
4644 {
4645 note_list = BB_NOTE_LIST (first);
4646 restore_other_notes (NULL, first);
4647 BB_NOTE_LIST (first) = NULL;
4648
4649 FOR_BB_INSNS (first, insn)
4650 if (NONDEBUG_INSN_P (insn))
4651 reemit_notes (insn);
4652
4653 first = first->next_bb;
4654 }
4655 while (first != last);
4656 }
4657 }
4658
4659 /* Free per-bb data structures. */
4660 void
sel_finish_bbs(void)4661 sel_finish_bbs (void)
4662 {
4663 sel_restore_notes ();
4664
4665 /* Remove current loop preheader from this loop. */
4666 if (current_loop_nest)
4667 sel_remove_loop_preheader ();
4668
4669 finish_region_bb_info ();
4670 }
4671
4672 /* Return true if INSN has a single successor of type FLAGS. */
4673 bool
sel_insn_has_single_succ_p(insn_t insn,int flags)4674 sel_insn_has_single_succ_p (insn_t insn, int flags)
4675 {
4676 insn_t succ;
4677 succ_iterator si;
4678 bool first_p = true;
4679
4680 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4681 {
4682 if (first_p)
4683 first_p = false;
4684 else
4685 return false;
4686 }
4687
4688 return true;
4689 }
4690
4691 /* Allocate successor's info. */
4692 static struct succs_info *
alloc_succs_info(void)4693 alloc_succs_info (void)
4694 {
4695 if (succs_info_pool.top == succs_info_pool.max_top)
4696 {
4697 int i;
4698
4699 if (++succs_info_pool.max_top >= succs_info_pool.size)
4700 gcc_unreachable ();
4701
4702 i = ++succs_info_pool.top;
4703 succs_info_pool.stack[i].succs_ok.create (10);
4704 succs_info_pool.stack[i].succs_other.create (10);
4705 succs_info_pool.stack[i].probs_ok.create (10);
4706 }
4707 else
4708 succs_info_pool.top++;
4709
4710 return &succs_info_pool.stack[succs_info_pool.top];
4711 }
4712
4713 /* Free successor's info. */
4714 void
free_succs_info(struct succs_info * sinfo)4715 free_succs_info (struct succs_info * sinfo)
4716 {
4717 gcc_assert (succs_info_pool.top >= 0
4718 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4719 succs_info_pool.top--;
4720
4721 /* Clear stale info. */
4722 sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4723 sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4724 sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
4725 sinfo->all_prob = 0;
4726 sinfo->succs_ok_n = 0;
4727 sinfo->all_succs_n = 0;
4728 }
4729
4730 /* Compute successor info for INSN. FLAGS are the flags passed
4731 to the FOR_EACH_SUCC_1 iterator. */
4732 struct succs_info *
compute_succs_info(insn_t insn,short flags)4733 compute_succs_info (insn_t insn, short flags)
4734 {
4735 succ_iterator si;
4736 insn_t succ;
4737 struct succs_info *sinfo = alloc_succs_info ();
4738
4739 /* Traverse *all* successors and decide what to do with each. */
4740 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4741 {
4742 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4743 perform code motion through inner loops. */
4744 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4745
4746 if (current_flags & flags)
4747 {
4748 sinfo->succs_ok.safe_push (succ);
4749 sinfo->probs_ok.safe_push (
4750 /* FIXME: Improve calculation when skipping
4751 inner loop to exits. */
4752 si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
4753 sinfo->succs_ok_n++;
4754 }
4755 else
4756 sinfo->succs_other.safe_push (succ);
4757
4758 /* Compute all_prob. */
4759 if (!si.bb_end)
4760 sinfo->all_prob = REG_BR_PROB_BASE;
4761 else
4762 sinfo->all_prob += si.e1->probability;
4763
4764 sinfo->all_succs_n++;
4765 }
4766
4767 return sinfo;
4768 }
4769
4770 /* Return the predecessors of BB in PREDS and their number in N.
4771 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4772 static void
cfg_preds_1(basic_block bb,insn_t ** preds,int * n,int * size)4773 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4774 {
4775 edge e;
4776 edge_iterator ei;
4777
4778 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4779
4780 FOR_EACH_EDGE (e, ei, bb->preds)
4781 {
4782 basic_block pred_bb = e->src;
4783 insn_t bb_end = BB_END (pred_bb);
4784
4785 if (!in_current_region_p (pred_bb))
4786 {
4787 gcc_assert (flag_sel_sched_pipelining_outer_loops
4788 && current_loop_nest);
4789 continue;
4790 }
4791
4792 if (sel_bb_empty_p (pred_bb))
4793 cfg_preds_1 (pred_bb, preds, n, size);
4794 else
4795 {
4796 if (*n == *size)
4797 *preds = XRESIZEVEC (insn_t, *preds,
4798 (*size = 2 * *size + 1));
4799 (*preds)[(*n)++] = bb_end;
4800 }
4801 }
4802
4803 gcc_assert (*n != 0
4804 || (flag_sel_sched_pipelining_outer_loops
4805 && current_loop_nest));
4806 }
4807
4808 /* Find all predecessors of BB and record them in PREDS and their number
4809 in N. Empty blocks are skipped, and only normal (forward in-region)
4810 edges are processed. */
4811 static void
cfg_preds(basic_block bb,insn_t ** preds,int * n)4812 cfg_preds (basic_block bb, insn_t **preds, int *n)
4813 {
4814 int size = 0;
4815
4816 *preds = NULL;
4817 *n = 0;
4818 cfg_preds_1 (bb, preds, n, &size);
4819 }
4820
4821 /* Returns true if we are moving INSN through join point. */
4822 bool
sel_num_cfg_preds_gt_1(insn_t insn)4823 sel_num_cfg_preds_gt_1 (insn_t insn)
4824 {
4825 basic_block bb;
4826
4827 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4828 return false;
4829
4830 bb = BLOCK_FOR_INSN (insn);
4831
4832 while (1)
4833 {
4834 if (EDGE_COUNT (bb->preds) > 1)
4835 return true;
4836
4837 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4838 bb = EDGE_PRED (bb, 0)->src;
4839
4840 if (!sel_bb_empty_p (bb))
4841 break;
4842 }
4843
4844 return false;
4845 }
4846
4847 /* Returns true when BB should be the end of an ebb. Adapted from the
4848 code in sched-ebb.c. */
4849 bool
bb_ends_ebb_p(basic_block bb)4850 bb_ends_ebb_p (basic_block bb)
4851 {
4852 basic_block next_bb = bb_next_bb (bb);
4853 edge e;
4854
4855 if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4856 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4857 || (LABEL_P (BB_HEAD (next_bb))
4858 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4859 Work around that. */
4860 && !single_pred_p (next_bb)))
4861 return true;
4862
4863 if (!in_current_region_p (next_bb))
4864 return true;
4865
4866 e = find_fallthru_edge (bb->succs);
4867 if (e)
4868 {
4869 gcc_assert (e->dest == next_bb);
4870
4871 return false;
4872 }
4873
4874 return true;
4875 }
4876
4877 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4878 successor of INSN. */
4879 bool
in_same_ebb_p(insn_t insn,insn_t succ)4880 in_same_ebb_p (insn_t insn, insn_t succ)
4881 {
4882 basic_block ptr = BLOCK_FOR_INSN (insn);
4883
4884 for (;;)
4885 {
4886 if (ptr == BLOCK_FOR_INSN (succ))
4887 return true;
4888
4889 if (bb_ends_ebb_p (ptr))
4890 return false;
4891
4892 ptr = bb_next_bb (ptr);
4893 }
4894
4895 gcc_unreachable ();
4896 return false;
4897 }
4898
4899 /* Recomputes the reverse topological order for the function and
4900 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4901 modified appropriately. */
4902 static void
recompute_rev_top_order(void)4903 recompute_rev_top_order (void)
4904 {
4905 int *postorder;
4906 int n_blocks, i;
4907
4908 if (!rev_top_order_index
4909 || rev_top_order_index_len < last_basic_block_for_fn (cfun))
4910 {
4911 rev_top_order_index_len = last_basic_block_for_fn (cfun);
4912 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4913 rev_top_order_index_len);
4914 }
4915
4916 postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
4917
4918 n_blocks = post_order_compute (postorder, true, false);
4919 gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
4920
4921 /* Build reverse function: for each basic block with BB->INDEX == K
4922 rev_top_order_index[K] is it's reverse topological sort number. */
4923 for (i = 0; i < n_blocks; i++)
4924 {
4925 gcc_assert (postorder[i] < rev_top_order_index_len);
4926 rev_top_order_index[postorder[i]] = i;
4927 }
4928
4929 free (postorder);
4930 }
4931
4932 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4933 void
clear_outdated_rtx_info(basic_block bb)4934 clear_outdated_rtx_info (basic_block bb)
4935 {
4936 rtx_insn *insn;
4937
4938 FOR_BB_INSNS (bb, insn)
4939 if (INSN_P (insn))
4940 {
4941 SCHED_GROUP_P (insn) = 0;
4942 INSN_AFTER_STALL_P (insn) = 0;
4943 INSN_SCHED_TIMES (insn) = 0;
4944 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4945
4946 /* We cannot use the changed caches, as previously we could ignore
4947 the LHS dependence due to enabled renaming and transform
4948 the expression, and currently we'll be unable to do this. */
4949 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4950 }
4951 }
4952
4953 /* Add BB_NOTE to the pool of available basic block notes. */
4954 static void
return_bb_to_pool(basic_block bb)4955 return_bb_to_pool (basic_block bb)
4956 {
4957 rtx_note *note = bb_note (bb);
4958
4959 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4960 && bb->aux == NULL);
4961
4962 /* It turns out that current cfg infrastructure does not support
4963 reuse of basic blocks. Don't bother for now. */
4964 /*bb_note_pool.safe_push (note);*/
4965 }
4966
4967 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4968 static rtx_note *
get_bb_note_from_pool(void)4969 get_bb_note_from_pool (void)
4970 {
4971 if (bb_note_pool.is_empty ())
4972 return NULL;
4973 else
4974 {
4975 rtx_note *note = bb_note_pool.pop ();
4976
4977 SET_PREV_INSN (note) = NULL_RTX;
4978 SET_NEXT_INSN (note) = NULL_RTX;
4979
4980 return note;
4981 }
4982 }
4983
4984 /* Free bb_note_pool. */
4985 void
free_bb_note_pool(void)4986 free_bb_note_pool (void)
4987 {
4988 bb_note_pool.release ();
4989 }
4990
4991 /* Setup scheduler pool and successor structure. */
4992 void
alloc_sched_pools(void)4993 alloc_sched_pools (void)
4994 {
4995 int succs_size;
4996
4997 succs_size = MAX_WS + 1;
4998 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4999 succs_info_pool.size = succs_size;
5000 succs_info_pool.top = -1;
5001 succs_info_pool.max_top = -1;
5002 }
5003
5004 /* Free the pools. */
5005 void
free_sched_pools(void)5006 free_sched_pools (void)
5007 {
5008 int i;
5009
5010 sched_lists_pool.release ();
5011 gcc_assert (succs_info_pool.top == -1);
5012 for (i = 0; i <= succs_info_pool.max_top; i++)
5013 {
5014 succs_info_pool.stack[i].succs_ok.release ();
5015 succs_info_pool.stack[i].succs_other.release ();
5016 succs_info_pool.stack[i].probs_ok.release ();
5017 }
5018 free (succs_info_pool.stack);
5019 }
5020
5021
5022 /* Returns a position in RGN where BB can be inserted retaining
5023 topological order. */
5024 static int
find_place_to_insert_bb(basic_block bb,int rgn)5025 find_place_to_insert_bb (basic_block bb, int rgn)
5026 {
5027 bool has_preds_outside_rgn = false;
5028 edge e;
5029 edge_iterator ei;
5030
5031 /* Find whether we have preds outside the region. */
5032 FOR_EACH_EDGE (e, ei, bb->preds)
5033 if (!in_current_region_p (e->src))
5034 {
5035 has_preds_outside_rgn = true;
5036 break;
5037 }
5038
5039 /* Recompute the top order -- needed when we have > 1 pred
5040 and in case we don't have preds outside. */
5041 if (flag_sel_sched_pipelining_outer_loops
5042 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5043 {
5044 int i, bbi = bb->index, cur_bbi;
5045
5046 recompute_rev_top_order ();
5047 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5048 {
5049 cur_bbi = BB_TO_BLOCK (i);
5050 if (rev_top_order_index[bbi]
5051 < rev_top_order_index[cur_bbi])
5052 break;
5053 }
5054
5055 /* We skipped the right block, so we increase i. We accommodate
5056 it for increasing by step later, so we decrease i. */
5057 return (i + 1) - 1;
5058 }
5059 else if (has_preds_outside_rgn)
5060 {
5061 /* This is the case when we generate an extra empty block
5062 to serve as region head during pipelining. */
5063 e = EDGE_SUCC (bb, 0);
5064 gcc_assert (EDGE_COUNT (bb->succs) == 1
5065 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5066 && (BLOCK_TO_BB (e->dest->index) == 0));
5067 return -1;
5068 }
5069
5070 /* We don't have preds outside the region. We should have
5071 the only pred, because the multiple preds case comes from
5072 the pipelining of outer loops, and that is handled above.
5073 Just take the bbi of this single pred. */
5074 if (EDGE_COUNT (bb->succs) > 0)
5075 {
5076 int pred_bbi;
5077
5078 gcc_assert (EDGE_COUNT (bb->preds) == 1);
5079
5080 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5081 return BLOCK_TO_BB (pred_bbi);
5082 }
5083 else
5084 /* BB has no successors. It is safe to put it in the end. */
5085 return current_nr_blocks - 1;
5086 }
5087
5088 /* Deletes an empty basic block freeing its data. */
5089 static void
delete_and_free_basic_block(basic_block bb)5090 delete_and_free_basic_block (basic_block bb)
5091 {
5092 gcc_assert (sel_bb_empty_p (bb));
5093
5094 if (BB_LV_SET (bb))
5095 free_lv_set (bb);
5096
5097 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5098
5099 /* Can't assert av_set properties because we use sel_aremove_bb
5100 when removing loop preheader from the region. At the point of
5101 removing the preheader we already have deallocated sel_region_bb_info. */
5102 gcc_assert (BB_LV_SET (bb) == NULL
5103 && !BB_LV_SET_VALID_P (bb)
5104 && BB_AV_LEVEL (bb) == 0
5105 && BB_AV_SET (bb) == NULL);
5106
5107 delete_basic_block (bb);
5108 }
5109
5110 /* Add BB to the current region and update the region data. */
5111 static void
add_block_to_current_region(basic_block bb)5112 add_block_to_current_region (basic_block bb)
5113 {
5114 int i, pos, bbi = -2, rgn;
5115
5116 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5117 bbi = find_place_to_insert_bb (bb, rgn);
5118 bbi += 1;
5119 pos = RGN_BLOCKS (rgn) + bbi;
5120
5121 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5122 && ebb_head[bbi] == pos);
5123
5124 /* Make a place for the new block. */
5125 extend_regions ();
5126
5127 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5128 BLOCK_TO_BB (rgn_bb_table[i])++;
5129
5130 memmove (rgn_bb_table + pos + 1,
5131 rgn_bb_table + pos,
5132 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5133
5134 /* Initialize data for BB. */
5135 rgn_bb_table[pos] = bb->index;
5136 BLOCK_TO_BB (bb->index) = bbi;
5137 CONTAINING_RGN (bb->index) = rgn;
5138
5139 RGN_NR_BLOCKS (rgn)++;
5140
5141 for (i = rgn + 1; i <= nr_regions; i++)
5142 RGN_BLOCKS (i)++;
5143 }
5144
5145 /* Remove BB from the current region and update the region data. */
5146 static void
remove_bb_from_region(basic_block bb)5147 remove_bb_from_region (basic_block bb)
5148 {
5149 int i, pos, bbi = -2, rgn;
5150
5151 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5152 bbi = BLOCK_TO_BB (bb->index);
5153 pos = RGN_BLOCKS (rgn) + bbi;
5154
5155 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5156 && ebb_head[bbi] == pos);
5157
5158 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5159 BLOCK_TO_BB (rgn_bb_table[i])--;
5160
5161 memmove (rgn_bb_table + pos,
5162 rgn_bb_table + pos + 1,
5163 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5164
5165 RGN_NR_BLOCKS (rgn)--;
5166 for (i = rgn + 1; i <= nr_regions; i++)
5167 RGN_BLOCKS (i)--;
5168 }
5169
5170 /* Add BB to the current region and update all data. If BB is NULL, add all
5171 blocks from last_added_blocks vector. */
5172 static void
sel_add_bb(basic_block bb)5173 sel_add_bb (basic_block bb)
5174 {
5175 /* Extend luids so that new notes will receive zero luids. */
5176 sched_extend_luids ();
5177 sched_init_bbs ();
5178 sel_init_bbs (last_added_blocks);
5179
5180 /* When bb is passed explicitly, the vector should contain
5181 the only element that equals to bb; otherwise, the vector
5182 should not be NULL. */
5183 gcc_assert (last_added_blocks.exists ());
5184
5185 if (bb != NULL)
5186 {
5187 gcc_assert (last_added_blocks.length () == 1
5188 && last_added_blocks[0] == bb);
5189 add_block_to_current_region (bb);
5190
5191 /* We associate creating/deleting data sets with the first insn
5192 appearing / disappearing in the bb. */
5193 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5194 create_initial_data_sets (bb);
5195
5196 last_added_blocks.release ();
5197 }
5198 else
5199 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5200 {
5201 int i;
5202 basic_block temp_bb = NULL;
5203
5204 for (i = 0;
5205 last_added_blocks.iterate (i, &bb); i++)
5206 {
5207 add_block_to_current_region (bb);
5208 temp_bb = bb;
5209 }
5210
5211 /* We need to fetch at least one bb so we know the region
5212 to update. */
5213 gcc_assert (temp_bb != NULL);
5214 bb = temp_bb;
5215
5216 last_added_blocks.release ();
5217 }
5218
5219 rgn_setup_region (CONTAINING_RGN (bb->index));
5220 }
5221
5222 /* Remove BB from the current region and update all data.
5223 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5224 static void
sel_remove_bb(basic_block bb,bool remove_from_cfg_p)5225 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5226 {
5227 unsigned idx = bb->index;
5228
5229 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5230
5231 remove_bb_from_region (bb);
5232 return_bb_to_pool (bb);
5233 bitmap_clear_bit (blocks_to_reschedule, idx);
5234
5235 if (remove_from_cfg_p)
5236 {
5237 basic_block succ = single_succ (bb);
5238 delete_and_free_basic_block (bb);
5239 set_immediate_dominator (CDI_DOMINATORS, succ,
5240 recompute_dominator (CDI_DOMINATORS, succ));
5241 }
5242
5243 rgn_setup_region (CONTAINING_RGN (idx));
5244 }
5245
5246 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5247 static void
move_bb_info(basic_block merge_bb,basic_block empty_bb)5248 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5249 {
5250 if (in_current_region_p (merge_bb))
5251 concat_note_lists (BB_NOTE_LIST (empty_bb),
5252 &BB_NOTE_LIST (merge_bb));
5253 BB_NOTE_LIST (empty_bb) = NULL;
5254
5255 }
5256
5257 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5258 region, but keep it in CFG. */
5259 static void
remove_empty_bb(basic_block empty_bb,bool remove_from_cfg_p)5260 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5261 {
5262 /* The block should contain just a note or a label.
5263 We try to check whether it is unused below. */
5264 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5265 || LABEL_P (BB_HEAD (empty_bb)));
5266
5267 /* If basic block has predecessors or successors, redirect them. */
5268 if (remove_from_cfg_p
5269 && (EDGE_COUNT (empty_bb->preds) > 0
5270 || EDGE_COUNT (empty_bb->succs) > 0))
5271 {
5272 basic_block pred;
5273 basic_block succ;
5274
5275 /* We need to init PRED and SUCC before redirecting edges. */
5276 if (EDGE_COUNT (empty_bb->preds) > 0)
5277 {
5278 edge e;
5279
5280 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5281
5282 e = EDGE_PRED (empty_bb, 0);
5283 gcc_assert (e->src == empty_bb->prev_bb
5284 && (e->flags & EDGE_FALLTHRU));
5285
5286 pred = empty_bb->prev_bb;
5287 }
5288 else
5289 pred = NULL;
5290
5291 if (EDGE_COUNT (empty_bb->succs) > 0)
5292 {
5293 /* We do not check fallthruness here as above, because
5294 after removing a jump the edge may actually be not fallthru. */
5295 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5296 succ = EDGE_SUCC (empty_bb, 0)->dest;
5297 }
5298 else
5299 succ = NULL;
5300
5301 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5302 {
5303 edge e = EDGE_PRED (empty_bb, 0);
5304
5305 if (e->flags & EDGE_FALLTHRU)
5306 redirect_edge_succ_nodup (e, succ);
5307 else
5308 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5309 }
5310
5311 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5312 {
5313 edge e = EDGE_SUCC (empty_bb, 0);
5314
5315 if (find_edge (pred, e->dest) == NULL)
5316 redirect_edge_pred (e, pred);
5317 }
5318 }
5319
5320 /* Finish removing. */
5321 sel_remove_bb (empty_bb, remove_from_cfg_p);
5322 }
5323
5324 /* An implementation of create_basic_block hook, which additionally updates
5325 per-bb data structures. */
5326 static basic_block
sel_create_basic_block(void * headp,void * endp,basic_block after)5327 sel_create_basic_block (void *headp, void *endp, basic_block after)
5328 {
5329 basic_block new_bb;
5330 rtx_note *new_bb_note;
5331
5332 gcc_assert (flag_sel_sched_pipelining_outer_loops
5333 || !last_added_blocks.exists ());
5334
5335 new_bb_note = get_bb_note_from_pool ();
5336
5337 if (new_bb_note == NULL_RTX)
5338 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5339 else
5340 {
5341 new_bb = create_basic_block_structure ((rtx_insn *) headp,
5342 (rtx_insn *) endp,
5343 new_bb_note, after);
5344 new_bb->aux = NULL;
5345 }
5346
5347 last_added_blocks.safe_push (new_bb);
5348
5349 return new_bb;
5350 }
5351
5352 /* Implement sched_init_only_bb (). */
5353 static void
sel_init_only_bb(basic_block bb,basic_block after)5354 sel_init_only_bb (basic_block bb, basic_block after)
5355 {
5356 gcc_assert (after == NULL);
5357
5358 extend_regions ();
5359 rgn_make_new_region_out_of_new_block (bb);
5360 }
5361
5362 /* Update the latch when we've splitted or merged it from FROM block to TO.
5363 This should be checked for all outer loops, too. */
5364 static void
change_loops_latches(basic_block from,basic_block to)5365 change_loops_latches (basic_block from, basic_block to)
5366 {
5367 gcc_assert (from != to);
5368
5369 if (current_loop_nest)
5370 {
5371 struct loop *loop;
5372
5373 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5374 if (considered_for_pipelining_p (loop) && loop->latch == from)
5375 {
5376 gcc_assert (loop == current_loop_nest);
5377 loop->latch = to;
5378 gcc_assert (loop_latch_edge (loop));
5379 }
5380 }
5381 }
5382
5383 /* Splits BB on two basic blocks, adding it to the region and extending
5384 per-bb data structures. Returns the newly created bb. */
5385 static basic_block
sel_split_block(basic_block bb,rtx after)5386 sel_split_block (basic_block bb, rtx after)
5387 {
5388 basic_block new_bb;
5389 insn_t insn;
5390
5391 new_bb = sched_split_block_1 (bb, after);
5392 sel_add_bb (new_bb);
5393
5394 /* This should be called after sel_add_bb, because this uses
5395 CONTAINING_RGN for the new block, which is not yet initialized.
5396 FIXME: this function may be a no-op now. */
5397 change_loops_latches (bb, new_bb);
5398
5399 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5400 FOR_BB_INSNS (new_bb, insn)
5401 if (INSN_P (insn))
5402 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5403
5404 if (sel_bb_empty_p (bb))
5405 {
5406 gcc_assert (!sel_bb_empty_p (new_bb));
5407
5408 /* NEW_BB has data sets that need to be updated and BB holds
5409 data sets that should be removed. Exchange these data sets
5410 so that we won't lose BB's valid data sets. */
5411 exchange_data_sets (new_bb, bb);
5412 free_data_sets (bb);
5413 }
5414
5415 if (!sel_bb_empty_p (new_bb)
5416 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5417 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5418
5419 return new_bb;
5420 }
5421
5422 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5423 Otherwise returns NULL. */
5424 static rtx_insn *
check_for_new_jump(basic_block bb,int prev_max_uid)5425 check_for_new_jump (basic_block bb, int prev_max_uid)
5426 {
5427 rtx_insn *end;
5428
5429 end = sel_bb_end (bb);
5430 if (end && INSN_UID (end) >= prev_max_uid)
5431 return end;
5432 return NULL;
5433 }
5434
5435 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5436 New means having UID at least equal to PREV_MAX_UID. */
5437 static rtx_insn *
find_new_jump(basic_block from,basic_block jump_bb,int prev_max_uid)5438 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5439 {
5440 rtx_insn *jump;
5441
5442 /* Return immediately if no new insns were emitted. */
5443 if (get_max_uid () == prev_max_uid)
5444 return NULL;
5445
5446 /* Now check both blocks for new jumps. It will ever be only one. */
5447 if ((jump = check_for_new_jump (from, prev_max_uid)))
5448 return jump;
5449
5450 if (jump_bb != NULL
5451 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5452 return jump;
5453 return NULL;
5454 }
5455
5456 /* Splits E and adds the newly created basic block to the current region.
5457 Returns this basic block. */
5458 basic_block
sel_split_edge(edge e)5459 sel_split_edge (edge e)
5460 {
5461 basic_block new_bb, src, other_bb = NULL;
5462 int prev_max_uid;
5463 rtx_insn *jump;
5464
5465 src = e->src;
5466 prev_max_uid = get_max_uid ();
5467 new_bb = split_edge (e);
5468
5469 if (flag_sel_sched_pipelining_outer_loops
5470 && current_loop_nest)
5471 {
5472 int i;
5473 basic_block bb;
5474
5475 /* Some of the basic blocks might not have been added to the loop.
5476 Add them here, until this is fixed in force_fallthru. */
5477 for (i = 0;
5478 last_added_blocks.iterate (i, &bb); i++)
5479 if (!bb->loop_father)
5480 {
5481 add_bb_to_loop (bb, e->dest->loop_father);
5482
5483 gcc_assert (!other_bb && (new_bb->index != bb->index));
5484 other_bb = bb;
5485 }
5486 }
5487
5488 /* Add all last_added_blocks to the region. */
5489 sel_add_bb (NULL);
5490
5491 jump = find_new_jump (src, new_bb, prev_max_uid);
5492 if (jump)
5493 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5494
5495 /* Put the correct lv set on this block. */
5496 if (other_bb && !sel_bb_empty_p (other_bb))
5497 compute_live (sel_bb_head (other_bb));
5498
5499 return new_bb;
5500 }
5501
5502 /* Implement sched_create_empty_bb (). */
5503 static basic_block
sel_create_empty_bb(basic_block after)5504 sel_create_empty_bb (basic_block after)
5505 {
5506 basic_block new_bb;
5507
5508 new_bb = sched_create_empty_bb_1 (after);
5509
5510 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5511 later. */
5512 gcc_assert (last_added_blocks.length () == 1
5513 && last_added_blocks[0] == new_bb);
5514
5515 last_added_blocks.release ();
5516 return new_bb;
5517 }
5518
5519 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5520 will be splitted to insert a check. */
5521 basic_block
sel_create_recovery_block(insn_t orig_insn)5522 sel_create_recovery_block (insn_t orig_insn)
5523 {
5524 basic_block first_bb, second_bb, recovery_block;
5525 basic_block before_recovery = NULL;
5526 rtx_insn *jump;
5527
5528 first_bb = BLOCK_FOR_INSN (orig_insn);
5529 if (sel_bb_end_p (orig_insn))
5530 {
5531 /* Avoid introducing an empty block while splitting. */
5532 gcc_assert (single_succ_p (first_bb));
5533 second_bb = single_succ (first_bb);
5534 }
5535 else
5536 second_bb = sched_split_block (first_bb, orig_insn);
5537
5538 recovery_block = sched_create_recovery_block (&before_recovery);
5539 if (before_recovery)
5540 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
5541
5542 gcc_assert (sel_bb_empty_p (recovery_block));
5543 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5544 if (current_loops != NULL)
5545 add_bb_to_loop (recovery_block, first_bb->loop_father);
5546
5547 sel_add_bb (recovery_block);
5548
5549 jump = BB_END (recovery_block);
5550 gcc_assert (sel_bb_head (recovery_block) == jump);
5551 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5552
5553 return recovery_block;
5554 }
5555
5556 /* Merge basic block B into basic block A. */
5557 static void
sel_merge_blocks(basic_block a,basic_block b)5558 sel_merge_blocks (basic_block a, basic_block b)
5559 {
5560 gcc_assert (sel_bb_empty_p (b)
5561 && EDGE_COUNT (b->preds) == 1
5562 && EDGE_PRED (b, 0)->src == b->prev_bb);
5563
5564 move_bb_info (b->prev_bb, b);
5565 remove_empty_bb (b, false);
5566 merge_blocks (a, b);
5567 change_loops_latches (b, a);
5568 }
5569
5570 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5571 data structures for possibly created bb and insns. */
5572 void
sel_redirect_edge_and_branch_force(edge e,basic_block to)5573 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5574 {
5575 basic_block jump_bb, src, orig_dest = e->dest;
5576 int prev_max_uid;
5577 rtx_insn *jump;
5578 int old_seqno = -1;
5579
5580 /* This function is now used only for bookkeeping code creation, where
5581 we'll never get the single pred of orig_dest block and thus will not
5582 hit unreachable blocks when updating dominator info. */
5583 gcc_assert (!sel_bb_empty_p (e->src)
5584 && !single_pred_p (orig_dest));
5585 src = e->src;
5586 prev_max_uid = get_max_uid ();
5587 /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5588 when the conditional jump being redirected may become unconditional. */
5589 if (any_condjump_p (BB_END (src))
5590 && INSN_SEQNO (BB_END (src)) >= 0)
5591 old_seqno = INSN_SEQNO (BB_END (src));
5592
5593 jump_bb = redirect_edge_and_branch_force (e, to);
5594 if (jump_bb != NULL)
5595 sel_add_bb (jump_bb);
5596
5597 /* This function could not be used to spoil the loop structure by now,
5598 thus we don't care to update anything. But check it to be sure. */
5599 if (current_loop_nest
5600 && pipelining_p)
5601 gcc_assert (loop_latch_edge (current_loop_nest));
5602
5603 jump = find_new_jump (src, jump_bb, prev_max_uid);
5604 if (jump)
5605 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP,
5606 old_seqno);
5607 set_immediate_dominator (CDI_DOMINATORS, to,
5608 recompute_dominator (CDI_DOMINATORS, to));
5609 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5610 recompute_dominator (CDI_DOMINATORS, orig_dest));
5611 }
5612
5613 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5614 redirected edge are in reverse topological order. */
5615 bool
sel_redirect_edge_and_branch(edge e,basic_block to)5616 sel_redirect_edge_and_branch (edge e, basic_block to)
5617 {
5618 bool latch_edge_p;
5619 basic_block src, orig_dest = e->dest;
5620 int prev_max_uid;
5621 rtx_insn *jump;
5622 edge redirected;
5623 bool recompute_toporder_p = false;
5624 bool maybe_unreachable = single_pred_p (orig_dest);
5625 int old_seqno = -1;
5626
5627 latch_edge_p = (pipelining_p
5628 && current_loop_nest
5629 && e == loop_latch_edge (current_loop_nest));
5630
5631 src = e->src;
5632 prev_max_uid = get_max_uid ();
5633
5634 /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5635 when the conditional jump being redirected may become unconditional. */
5636 if (any_condjump_p (BB_END (src))
5637 && INSN_SEQNO (BB_END (src)) >= 0)
5638 old_seqno = INSN_SEQNO (BB_END (src));
5639
5640 redirected = redirect_edge_and_branch (e, to);
5641
5642 gcc_assert (redirected && !last_added_blocks.exists ());
5643
5644 /* When we've redirected a latch edge, update the header. */
5645 if (latch_edge_p)
5646 {
5647 current_loop_nest->header = to;
5648 gcc_assert (loop_latch_edge (current_loop_nest));
5649 }
5650
5651 /* In rare situations, the topological relation between the blocks connected
5652 by the redirected edge can change (see PR42245 for an example). Update
5653 block_to_bb/bb_to_block. */
5654 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5655 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5656 recompute_toporder_p = true;
5657
5658 jump = find_new_jump (src, NULL, prev_max_uid);
5659 if (jump)
5660 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno);
5661
5662 /* Only update dominator info when we don't have unreachable blocks.
5663 Otherwise we'll update in maybe_tidy_empty_bb. */
5664 if (!maybe_unreachable)
5665 {
5666 set_immediate_dominator (CDI_DOMINATORS, to,
5667 recompute_dominator (CDI_DOMINATORS, to));
5668 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5669 recompute_dominator (CDI_DOMINATORS, orig_dest));
5670 }
5671 return recompute_toporder_p;
5672 }
5673
5674 /* This variable holds the cfg hooks used by the selective scheduler. */
5675 static struct cfg_hooks sel_cfg_hooks;
5676
5677 /* Register sel-sched cfg hooks. */
5678 void
sel_register_cfg_hooks(void)5679 sel_register_cfg_hooks (void)
5680 {
5681 sched_split_block = sel_split_block;
5682
5683 orig_cfg_hooks = get_cfg_hooks ();
5684 sel_cfg_hooks = orig_cfg_hooks;
5685
5686 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5687
5688 set_cfg_hooks (sel_cfg_hooks);
5689
5690 sched_init_only_bb = sel_init_only_bb;
5691 sched_split_block = sel_split_block;
5692 sched_create_empty_bb = sel_create_empty_bb;
5693 }
5694
5695 /* Unregister sel-sched cfg hooks. */
5696 void
sel_unregister_cfg_hooks(void)5697 sel_unregister_cfg_hooks (void)
5698 {
5699 sched_create_empty_bb = NULL;
5700 sched_split_block = NULL;
5701 sched_init_only_bb = NULL;
5702
5703 set_cfg_hooks (orig_cfg_hooks);
5704 }
5705
5706
5707 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5708 LABEL is where this jump should be directed. */
5709 rtx_insn *
create_insn_rtx_from_pattern(rtx pattern,rtx label)5710 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5711 {
5712 rtx_insn *insn_rtx;
5713
5714 gcc_assert (!INSN_P (pattern));
5715
5716 start_sequence ();
5717
5718 if (label == NULL_RTX)
5719 insn_rtx = emit_insn (pattern);
5720 else if (DEBUG_INSN_P (label))
5721 insn_rtx = emit_debug_insn (pattern);
5722 else
5723 {
5724 insn_rtx = emit_jump_insn (pattern);
5725 JUMP_LABEL (insn_rtx) = label;
5726 ++LABEL_NUSES (label);
5727 }
5728
5729 end_sequence ();
5730
5731 sched_extend_luids ();
5732 sched_extend_target ();
5733 sched_deps_init (false);
5734
5735 /* Initialize INSN_CODE now. */
5736 recog_memoized (insn_rtx);
5737 return insn_rtx;
5738 }
5739
5740 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5741 must not be clonable. */
5742 vinsn_t
create_vinsn_from_insn_rtx(rtx_insn * insn_rtx,bool force_unique_p)5743 create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p)
5744 {
5745 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5746
5747 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5748 return vinsn_create (insn_rtx, force_unique_p);
5749 }
5750
5751 /* Create a copy of INSN_RTX. */
5752 rtx_insn *
create_copy_of_insn_rtx(rtx insn_rtx)5753 create_copy_of_insn_rtx (rtx insn_rtx)
5754 {
5755 rtx_insn *res;
5756 rtx link;
5757
5758 if (DEBUG_INSN_P (insn_rtx))
5759 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5760 insn_rtx);
5761
5762 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5763
5764 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5765 NULL_RTX);
5766
5767 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5768 since mark_jump_label will make them. REG_LABEL_TARGETs are created
5769 there too, but are supposed to be sticky, so we copy them. */
5770 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5771 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5772 && REG_NOTE_KIND (link) != REG_EQUAL
5773 && REG_NOTE_KIND (link) != REG_EQUIV)
5774 {
5775 if (GET_CODE (link) == EXPR_LIST)
5776 add_reg_note (res, REG_NOTE_KIND (link),
5777 copy_insn_1 (XEXP (link, 0)));
5778 else
5779 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5780 }
5781
5782 return res;
5783 }
5784
5785 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5786 void
change_vinsn_in_expr(expr_t expr,vinsn_t new_vinsn)5787 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5788 {
5789 vinsn_detach (EXPR_VINSN (expr));
5790
5791 EXPR_VINSN (expr) = new_vinsn;
5792 vinsn_attach (new_vinsn);
5793 }
5794
5795 /* Helpers for global init. */
5796 /* This structure is used to be able to call existing bundling mechanism
5797 and calculate insn priorities. */
5798 static struct haifa_sched_info sched_sel_haifa_sched_info =
5799 {
5800 NULL, /* init_ready_list */
5801 NULL, /* can_schedule_ready_p */
5802 NULL, /* schedule_more_p */
5803 NULL, /* new_ready */
5804 NULL, /* rgn_rank */
5805 sel_print_insn, /* rgn_print_insn */
5806 contributes_to_priority,
5807 NULL, /* insn_finishes_block_p */
5808
5809 NULL, NULL,
5810 NULL, NULL,
5811 0, 0,
5812
5813 NULL, /* add_remove_insn */
5814 NULL, /* begin_schedule_ready */
5815 NULL, /* begin_move_insn */
5816 NULL, /* advance_target_bb */
5817
5818 NULL,
5819 NULL,
5820
5821 SEL_SCHED | NEW_BBS
5822 };
5823
5824 /* Setup special insns used in the scheduler. */
5825 void
setup_nop_and_exit_insns(void)5826 setup_nop_and_exit_insns (void)
5827 {
5828 gcc_assert (nop_pattern == NULL_RTX
5829 && exit_insn == NULL_RTX);
5830
5831 nop_pattern = constm1_rtx;
5832
5833 start_sequence ();
5834 emit_insn (nop_pattern);
5835 exit_insn = get_insns ();
5836 end_sequence ();
5837 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
5838 }
5839
5840 /* Free special insns used in the scheduler. */
5841 void
free_nop_and_exit_insns(void)5842 free_nop_and_exit_insns (void)
5843 {
5844 exit_insn = NULL;
5845 nop_pattern = NULL_RTX;
5846 }
5847
5848 /* Setup a special vinsn used in new insns initialization. */
5849 void
setup_nop_vinsn(void)5850 setup_nop_vinsn (void)
5851 {
5852 nop_vinsn = vinsn_create (exit_insn, false);
5853 vinsn_attach (nop_vinsn);
5854 }
5855
5856 /* Free a special vinsn used in new insns initialization. */
5857 void
free_nop_vinsn(void)5858 free_nop_vinsn (void)
5859 {
5860 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5861 vinsn_detach (nop_vinsn);
5862 nop_vinsn = NULL;
5863 }
5864
5865 /* Call a set_sched_flags hook. */
5866 void
sel_set_sched_flags(void)5867 sel_set_sched_flags (void)
5868 {
5869 /* ??? This means that set_sched_flags were called, and we decided to
5870 support speculation. However, set_sched_flags also modifies flags
5871 on current_sched_info, doing this only at global init. And we
5872 sometimes change c_s_i later. So put the correct flags again. */
5873 if (spec_info && targetm.sched.set_sched_flags)
5874 targetm.sched.set_sched_flags (spec_info);
5875 }
5876
5877 /* Setup pointers to global sched info structures. */
5878 void
sel_setup_sched_infos(void)5879 sel_setup_sched_infos (void)
5880 {
5881 rgn_setup_common_sched_info ();
5882
5883 memcpy (&sel_common_sched_info, common_sched_info,
5884 sizeof (sel_common_sched_info));
5885
5886 sel_common_sched_info.fix_recovery_cfg = NULL;
5887 sel_common_sched_info.add_block = NULL;
5888 sel_common_sched_info.estimate_number_of_insns
5889 = sel_estimate_number_of_insns;
5890 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5891 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5892
5893 common_sched_info = &sel_common_sched_info;
5894
5895 current_sched_info = &sched_sel_haifa_sched_info;
5896 current_sched_info->sched_max_insns_priority =
5897 get_rgn_sched_max_insns_priority ();
5898
5899 sel_set_sched_flags ();
5900 }
5901
5902
5903 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5904 *BB_ORD_INDEX after that is increased. */
5905 static void
sel_add_block_to_region(basic_block bb,int * bb_ord_index,int rgn)5906 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5907 {
5908 RGN_NR_BLOCKS (rgn) += 1;
5909 RGN_DONT_CALC_DEPS (rgn) = 0;
5910 RGN_HAS_REAL_EBB (rgn) = 0;
5911 CONTAINING_RGN (bb->index) = rgn;
5912 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5913 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5914 (*bb_ord_index)++;
5915
5916 /* FIXME: it is true only when not scheduling ebbs. */
5917 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5918 }
5919
5920 /* Functions to support pipelining of outer loops. */
5921
5922 /* Creates a new empty region and returns it's number. */
5923 static int
sel_create_new_region(void)5924 sel_create_new_region (void)
5925 {
5926 int new_rgn_number = nr_regions;
5927
5928 RGN_NR_BLOCKS (new_rgn_number) = 0;
5929
5930 /* FIXME: This will work only when EBBs are not created. */
5931 if (new_rgn_number != 0)
5932 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5933 RGN_NR_BLOCKS (new_rgn_number - 1);
5934 else
5935 RGN_BLOCKS (new_rgn_number) = 0;
5936
5937 /* Set the blocks of the next region so the other functions may
5938 calculate the number of blocks in the region. */
5939 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5940 RGN_NR_BLOCKS (new_rgn_number);
5941
5942 nr_regions++;
5943
5944 return new_rgn_number;
5945 }
5946
5947 /* If X has a smaller topological sort number than Y, returns -1;
5948 if greater, returns 1. */
5949 static int
bb_top_order_comparator(const void * x,const void * y)5950 bb_top_order_comparator (const void *x, const void *y)
5951 {
5952 basic_block bb1 = *(const basic_block *) x;
5953 basic_block bb2 = *(const basic_block *) y;
5954
5955 gcc_assert (bb1 == bb2
5956 || rev_top_order_index[bb1->index]
5957 != rev_top_order_index[bb2->index]);
5958
5959 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5960 bbs with greater number should go earlier. */
5961 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5962 return -1;
5963 else
5964 return 1;
5965 }
5966
5967 /* Create a region for LOOP and return its number. If we don't want
5968 to pipeline LOOP, return -1. */
5969 static int
make_region_from_loop(struct loop * loop)5970 make_region_from_loop (struct loop *loop)
5971 {
5972 unsigned int i;
5973 int new_rgn_number = -1;
5974 struct loop *inner;
5975
5976 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5977 int bb_ord_index = 0;
5978 basic_block *loop_blocks;
5979 basic_block preheader_block;
5980
5981 if (loop->num_nodes
5982 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5983 return -1;
5984
5985 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5986 for (inner = loop->inner; inner; inner = inner->inner)
5987 if (flow_bb_inside_loop_p (inner, loop->latch))
5988 return -1;
5989
5990 loop->ninsns = num_loop_insns (loop);
5991 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5992 return -1;
5993
5994 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5995
5996 for (i = 0; i < loop->num_nodes; i++)
5997 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5998 {
5999 free (loop_blocks);
6000 return -1;
6001 }
6002
6003 preheader_block = loop_preheader_edge (loop)->src;
6004 gcc_assert (preheader_block);
6005 gcc_assert (loop_blocks[0] == loop->header);
6006
6007 new_rgn_number = sel_create_new_region ();
6008
6009 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
6010 bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
6011
6012 for (i = 0; i < loop->num_nodes; i++)
6013 {
6014 /* Add only those blocks that haven't been scheduled in the inner loop.
6015 The exception is the basic blocks with bookkeeping code - they should
6016 be added to the region (and they actually don't belong to the loop
6017 body, but to the region containing that loop body). */
6018
6019 gcc_assert (new_rgn_number >= 0);
6020
6021 if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
6022 {
6023 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
6024 new_rgn_number);
6025 bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
6026 }
6027 }
6028
6029 free (loop_blocks);
6030 MARK_LOOP_FOR_PIPELINING (loop);
6031
6032 return new_rgn_number;
6033 }
6034
6035 /* Create a new region from preheader blocks LOOP_BLOCKS. */
6036 void
make_region_from_loop_preheader(vec<basic_block> * & loop_blocks)6037 make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
6038 {
6039 unsigned int i;
6040 int new_rgn_number = -1;
6041 basic_block bb;
6042
6043 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6044 int bb_ord_index = 0;
6045
6046 new_rgn_number = sel_create_new_region ();
6047
6048 FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
6049 {
6050 gcc_assert (new_rgn_number >= 0);
6051
6052 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6053 }
6054
6055 vec_free (loop_blocks);
6056 }
6057
6058
6059 /* Create region(s) from loop nest LOOP, such that inner loops will be
6060 pipelined before outer loops. Returns true when a region for LOOP
6061 is created. */
6062 static bool
make_regions_from_loop_nest(struct loop * loop)6063 make_regions_from_loop_nest (struct loop *loop)
6064 {
6065 struct loop *cur_loop;
6066 int rgn_number;
6067
6068 /* Traverse all inner nodes of the loop. */
6069 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6070 if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
6071 return false;
6072
6073 /* At this moment all regular inner loops should have been pipelined.
6074 Try to create a region from this loop. */
6075 rgn_number = make_region_from_loop (loop);
6076
6077 if (rgn_number < 0)
6078 return false;
6079
6080 loop_nests.safe_push (loop);
6081 return true;
6082 }
6083
6084 /* Initalize data structures needed. */
6085 void
sel_init_pipelining(void)6086 sel_init_pipelining (void)
6087 {
6088 /* Collect loop information to be used in outer loops pipelining. */
6089 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6090 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6091 | LOOPS_HAVE_RECORDED_EXITS
6092 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6093 current_loop_nest = NULL;
6094
6095 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
6096 bitmap_clear (bbs_in_loop_rgns);
6097
6098 recompute_rev_top_order ();
6099 }
6100
6101 /* Returns a struct loop for region RGN. */
6102 loop_p
get_loop_nest_for_rgn(unsigned int rgn)6103 get_loop_nest_for_rgn (unsigned int rgn)
6104 {
6105 /* Regions created with extend_rgns don't have corresponding loop nests,
6106 because they don't represent loops. */
6107 if (rgn < loop_nests.length ())
6108 return loop_nests[rgn];
6109 else
6110 return NULL;
6111 }
6112
6113 /* True when LOOP was included into pipelining regions. */
6114 bool
considered_for_pipelining_p(struct loop * loop)6115 considered_for_pipelining_p (struct loop *loop)
6116 {
6117 if (loop_depth (loop) == 0)
6118 return false;
6119
6120 /* Now, the loop could be too large or irreducible. Check whether its
6121 region is in LOOP_NESTS.
6122 We determine the region number of LOOP as the region number of its
6123 latch. We can't use header here, because this header could be
6124 just removed preheader and it will give us the wrong region number.
6125 Latch can't be used because it could be in the inner loop too. */
6126 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6127 {
6128 int rgn = CONTAINING_RGN (loop->latch->index);
6129
6130 gcc_assert ((unsigned) rgn < loop_nests.length ());
6131 return true;
6132 }
6133
6134 return false;
6135 }
6136
6137 /* Makes regions from the rest of the blocks, after loops are chosen
6138 for pipelining. */
6139 static void
make_regions_from_the_rest(void)6140 make_regions_from_the_rest (void)
6141 {
6142 int cur_rgn_blocks;
6143 int *loop_hdr;
6144 int i;
6145
6146 basic_block bb;
6147 edge e;
6148 edge_iterator ei;
6149 int *degree;
6150
6151 /* Index in rgn_bb_table where to start allocating new regions. */
6152 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6153
6154 /* Make regions from all the rest basic blocks - those that don't belong to
6155 any loop or belong to irreducible loops. Prepare the data structures
6156 for extend_rgns. */
6157
6158 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6159 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6160 loop. */
6161 loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
6162 degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6163
6164
6165 /* For each basic block that belongs to some loop assign the number
6166 of innermost loop it belongs to. */
6167 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
6168 loop_hdr[i] = -1;
6169
6170 FOR_EACH_BB_FN (bb, cfun)
6171 {
6172 if (bb->loop_father && bb->loop_father->num != 0
6173 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6174 loop_hdr[bb->index] = bb->loop_father->num;
6175 }
6176
6177 /* For each basic block degree is calculated as the number of incoming
6178 edges, that are going out of bbs that are not yet scheduled.
6179 The basic blocks that are scheduled have degree value of zero. */
6180 FOR_EACH_BB_FN (bb, cfun)
6181 {
6182 degree[bb->index] = 0;
6183
6184 if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
6185 {
6186 FOR_EACH_EDGE (e, ei, bb->preds)
6187 if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
6188 degree[bb->index]++;
6189 }
6190 else
6191 degree[bb->index] = -1;
6192 }
6193
6194 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6195
6196 /* Any block that did not end up in a region is placed into a region
6197 by itself. */
6198 FOR_EACH_BB_FN (bb, cfun)
6199 if (degree[bb->index] >= 0)
6200 {
6201 rgn_bb_table[cur_rgn_blocks] = bb->index;
6202 RGN_NR_BLOCKS (nr_regions) = 1;
6203 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6204 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6205 RGN_HAS_REAL_EBB (nr_regions) = 0;
6206 CONTAINING_RGN (bb->index) = nr_regions++;
6207 BLOCK_TO_BB (bb->index) = 0;
6208 }
6209
6210 free (degree);
6211 free (loop_hdr);
6212 }
6213
6214 /* Free data structures used in pipelining of loops. */
sel_finish_pipelining(void)6215 void sel_finish_pipelining (void)
6216 {
6217 struct loop *loop;
6218
6219 /* Release aux fields so we don't free them later by mistake. */
6220 FOR_EACH_LOOP (loop, 0)
6221 loop->aux = NULL;
6222
6223 loop_optimizer_finalize ();
6224
6225 loop_nests.release ();
6226
6227 free (rev_top_order_index);
6228 rev_top_order_index = NULL;
6229 }
6230
6231 /* This function replaces the find_rgns when
6232 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6233 void
sel_find_rgns(void)6234 sel_find_rgns (void)
6235 {
6236 sel_init_pipelining ();
6237 extend_regions ();
6238
6239 if (current_loops)
6240 {
6241 loop_p loop;
6242
6243 FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
6244 ? LI_FROM_INNERMOST
6245 : LI_ONLY_INNERMOST))
6246 make_regions_from_loop_nest (loop);
6247 }
6248
6249 /* Make regions from all the rest basic blocks and schedule them.
6250 These blocks include blocks that don't belong to any loop or belong
6251 to irreducible loops. */
6252 make_regions_from_the_rest ();
6253
6254 /* We don't need bbs_in_loop_rgns anymore. */
6255 sbitmap_free (bbs_in_loop_rgns);
6256 bbs_in_loop_rgns = NULL;
6257 }
6258
6259 /* Add the preheader blocks from previous loop to current region taking
6260 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6261 This function is only used with -fsel-sched-pipelining-outer-loops. */
6262 void
sel_add_loop_preheaders(bb_vec_t * bbs)6263 sel_add_loop_preheaders (bb_vec_t *bbs)
6264 {
6265 int i;
6266 basic_block bb;
6267 vec<basic_block> *preheader_blocks
6268 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6269
6270 if (!preheader_blocks)
6271 return;
6272
6273 for (i = 0; preheader_blocks->iterate (i, &bb); i++)
6274 {
6275 bbs->safe_push (bb);
6276 last_added_blocks.safe_push (bb);
6277 sel_add_bb (bb);
6278 }
6279
6280 vec_free (preheader_blocks);
6281 }
6282
6283 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6284 Please note that the function should also work when pipelining_p is
6285 false, because it is used when deciding whether we should or should
6286 not reschedule pipelined code. */
6287 bool
sel_is_loop_preheader_p(basic_block bb)6288 sel_is_loop_preheader_p (basic_block bb)
6289 {
6290 if (current_loop_nest)
6291 {
6292 struct loop *outer;
6293
6294 if (preheader_removed)
6295 return false;
6296
6297 /* Preheader is the first block in the region. */
6298 if (BLOCK_TO_BB (bb->index) == 0)
6299 return true;
6300
6301 /* We used to find a preheader with the topological information.
6302 Check that the above code is equivalent to what we did before. */
6303
6304 if (in_current_region_p (current_loop_nest->header))
6305 gcc_assert (!(BLOCK_TO_BB (bb->index)
6306 < BLOCK_TO_BB (current_loop_nest->header->index)));
6307
6308 /* Support the situation when the latch block of outer loop
6309 could be from here. */
6310 for (outer = loop_outer (current_loop_nest);
6311 outer;
6312 outer = loop_outer (outer))
6313 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6314 gcc_unreachable ();
6315 }
6316
6317 return false;
6318 }
6319
6320 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6321 can be removed, making the corresponding edge fallthrough (assuming that
6322 all basic blocks between JUMP_BB and DEST_BB are empty). */
6323 static bool
bb_has_removable_jump_to_p(basic_block jump_bb,basic_block dest_bb)6324 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6325 {
6326 if (!onlyjump_p (BB_END (jump_bb))
6327 || tablejump_p (BB_END (jump_bb), NULL, NULL))
6328 return false;
6329
6330 /* Several outgoing edges, abnormal edge or destination of jump is
6331 not DEST_BB. */
6332 if (EDGE_COUNT (jump_bb->succs) != 1
6333 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6334 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6335 return false;
6336
6337 /* If not anything of the upper. */
6338 return true;
6339 }
6340
6341 /* Removes the loop preheader from the current region and saves it in
6342 PREHEADER_BLOCKS of the father loop, so they will be added later to
6343 region that represents an outer loop. */
6344 static void
sel_remove_loop_preheader(void)6345 sel_remove_loop_preheader (void)
6346 {
6347 int i, old_len;
6348 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6349 basic_block bb;
6350 bool all_empty_p = true;
6351 vec<basic_block> *preheader_blocks
6352 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6353
6354 vec_check_alloc (preheader_blocks, 0);
6355
6356 gcc_assert (current_loop_nest);
6357 old_len = preheader_blocks->length ();
6358
6359 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6360 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6361 {
6362 bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
6363
6364 /* If the basic block belongs to region, but doesn't belong to
6365 corresponding loop, then it should be a preheader. */
6366 if (sel_is_loop_preheader_p (bb))
6367 {
6368 preheader_blocks->safe_push (bb);
6369 if (BB_END (bb) != bb_note (bb))
6370 all_empty_p = false;
6371 }
6372 }
6373
6374 /* Remove these blocks only after iterating over the whole region. */
6375 for (i = preheader_blocks->length () - 1; i >= old_len; i--)
6376 {
6377 bb = (*preheader_blocks)[i];
6378 sel_remove_bb (bb, false);
6379 }
6380
6381 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6382 {
6383 if (!all_empty_p)
6384 /* Immediately create new region from preheader. */
6385 make_region_from_loop_preheader (preheader_blocks);
6386 else
6387 {
6388 /* If all preheader blocks are empty - dont create new empty region.
6389 Instead, remove them completely. */
6390 FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
6391 {
6392 edge e;
6393 edge_iterator ei;
6394 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6395
6396 /* Redirect all incoming edges to next basic block. */
6397 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6398 {
6399 if (! (e->flags & EDGE_FALLTHRU))
6400 redirect_edge_and_branch (e, bb->next_bb);
6401 else
6402 redirect_edge_succ (e, bb->next_bb);
6403 }
6404 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6405 delete_and_free_basic_block (bb);
6406
6407 /* Check if after deleting preheader there is a nonconditional
6408 jump in PREV_BB that leads to the next basic block NEXT_BB.
6409 If it is so - delete this jump and clear data sets of its
6410 basic block if it becomes empty. */
6411 if (next_bb->prev_bb == prev_bb
6412 && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
6413 && bb_has_removable_jump_to_p (prev_bb, next_bb))
6414 {
6415 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6416 if (BB_END (prev_bb) == bb_note (prev_bb))
6417 free_data_sets (prev_bb);
6418 }
6419
6420 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6421 recompute_dominator (CDI_DOMINATORS,
6422 next_bb));
6423 }
6424 }
6425 vec_free (preheader_blocks);
6426 }
6427 else
6428 /* Store preheader within the father's loop structure. */
6429 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6430 preheader_blocks);
6431 }
6432
6433 #endif
6434