xref: /dragonfly/contrib/gcc-8.0/gcc/sched-ebb.c (revision 97fa55c4)
1 /* Instruction scheduling pass.
2    Copyright (C) 1992-2018 Free Software Foundation, Inc.
3    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4    and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "cfghooks.h"
29 #include "df.h"
30 #include "profile.h"
31 #include "insn-attr.h"
32 #include "params.h"
33 #include "cfgrtl.h"
34 #include "cfgbuild.h"
35 #include "sched-int.h"
36 
37 
38 #ifdef INSN_SCHEDULING
39 
40 /* The number of insns to be scheduled in total.  */
41 static int rgn_n_insns;
42 
43 /* The number of insns scheduled so far.  */
44 static int sched_rgn_n_insns;
45 
46 /* Set of blocks, that already have their dependencies calculated.  */
47 static bitmap_head dont_calc_deps;
48 
49 /* Last basic block in current ebb.  */
50 static basic_block last_bb;
51 
52 /* Implementations of the sched_info functions for region scheduling.  */
53 static void init_ready_list (void);
54 static void begin_schedule_ready (rtx_insn *);
55 static int schedule_more_p (void);
56 static const char *ebb_print_insn (const rtx_insn *, int);
57 static int rank (rtx_insn *, rtx_insn *);
58 static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
59 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
60 static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
61 static void debug_ebb_dependencies (rtx_insn *, rtx_insn *);
62 
63 static void ebb_add_remove_insn (rtx_insn *, int);
64 static void ebb_add_block (basic_block, basic_block);
65 static basic_block advance_target_bb (basic_block, rtx_insn *);
66 static void ebb_fix_recovery_cfg (int, int, int);
67 
68 /* Allocate memory and store the state of the frontend.  Return the allocated
69    memory.  */
70 static void *
71 save_ebb_state (void)
72 {
73   int *p = XNEW (int);
74   *p = sched_rgn_n_insns;
75   return p;
76 }
77 
78 /* Restore the state of the frontend from P_, then free it.  */
79 static void
80 restore_ebb_state (void *p_)
81 {
82   int *p = (int *)p_;
83   sched_rgn_n_insns = *p;
84   free (p_);
85 }
86 
87 /* Return nonzero if there are more insns that should be scheduled.  */
88 
89 static int
90 schedule_more_p (void)
91 {
92   return sched_rgn_n_insns < rgn_n_insns;
93 }
94 
95 /* Print dependency information about ebb between HEAD and TAIL.  */
96 static void
97 debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail)
98 {
99   fprintf (sched_dump,
100 	   ";;   --------------- forward dependences: ------------ \n");
101 
102   fprintf (sched_dump, "\n;;   --- EBB Dependences --- from bb%d to bb%d \n",
103 	   BLOCK_NUM (head), BLOCK_NUM (tail));
104 
105   debug_dependencies (head, tail);
106 }
107 
108 /* Add all insns that are initially ready to the ready list READY.  Called
109    once before scheduling a set of insns.  */
110 
111 static void
112 init_ready_list (void)
113 {
114   int n = 0;
115   rtx_insn *prev_head = current_sched_info->prev_head;
116   rtx_insn *next_tail = current_sched_info->next_tail;
117   rtx_insn *insn;
118 
119   sched_rgn_n_insns = 0;
120 
121   /* Print debugging information.  */
122   if (sched_verbose >= 5)
123     debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
124 
125   /* Initialize ready list with all 'ready' insns in target block.
126      Count number of insns in the target block being scheduled.  */
127   for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
128     {
129       try_ready (insn);
130       n++;
131     }
132 
133   gcc_assert (n == rgn_n_insns);
134 }
135 
136 /* INSN is being scheduled after LAST.  Update counters.  */
137 static void
138 begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
139 {
140   sched_rgn_n_insns++;
141 }
142 
143 /* INSN is being moved to its place in the schedule, after LAST.  */
144 static void
145 begin_move_insn (rtx_insn *insn, rtx_insn *last)
146 {
147   if (BLOCK_FOR_INSN (insn) == last_bb
148       /* INSN is a jump in the last block, ...  */
149       && control_flow_insn_p (insn)
150       /* that is going to be moved over some instructions.  */
151       && last != PREV_INSN (insn))
152     {
153       edge e;
154       basic_block bb;
155 
156       /* An obscure special case, where we do have partially dead
157 	 instruction scheduled after last control flow instruction.
158 	 In this case we can create new basic block.  It is
159 	 always exactly one basic block last in the sequence.  */
160 
161       e = find_fallthru_edge (last_bb->succs);
162 
163       gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
164 
165       gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
166 			   && !IS_SPECULATION_CHECK_P (insn)
167 			   && BB_HEAD (last_bb) != insn
168 			   && BB_END (last_bb) == insn);
169 
170       {
171 	rtx_insn *x = NEXT_INSN (insn);
172 	if (e)
173 	  gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
174 	else
175 	  gcc_checking_assert (BARRIER_P (x));
176       }
177 
178       if (e)
179 	{
180 	  bb = split_edge (e);
181 	  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
182 	}
183       else
184 	{
185 	  /* Create an empty unreachable block after the INSN.  */
186 	  rtx_insn *next = NEXT_INSN (insn);
187 	  if (next && BARRIER_P (next))
188 	    next = NEXT_INSN (next);
189 	  bb = create_basic_block (next, NULL_RTX, last_bb);
190 	}
191 
192       /* split_edge () creates BB before E->DEST.  Keep in mind, that
193 	 this operation extends scheduling region till the end of BB.
194 	 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
195 	 of the scheduling region.  */
196       current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
197       gcc_assert (current_sched_info->next_tail);
198 
199       /* Append new basic block to the end of the ebb.  */
200       sched_init_only_bb (bb, last_bb);
201       gcc_assert (last_bb == bb);
202     }
203 }
204 
205 /* Return a string that contains the insn uid and optionally anything else
206    necessary to identify this insn in an output.  It's valid to use a
207    static buffer for this.  The ALIGNED parameter should cause the string
208    to be formatted so that multiple output lines will line up nicely.  */
209 
210 static const char *
211 ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
212 {
213   static char tmp[80];
214 
215   /* '+' before insn means it is a new cycle start.  */
216   if (GET_MODE (insn) == TImode)
217     sprintf (tmp, "+ %4d", INSN_UID (insn));
218   else
219     sprintf (tmp, "  %4d", INSN_UID (insn));
220 
221   return tmp;
222 }
223 
224 /* Compare priority of two insns.  Return a positive number if the second
225    insn is to be preferred for scheduling, and a negative one if the first
226    is to be preferred.  Zero if they are equally good.  */
227 
228 static int
229 rank (rtx_insn *insn1, rtx_insn *insn2)
230 {
231   basic_block bb1 = BLOCK_FOR_INSN (insn1);
232   basic_block bb2 = BLOCK_FOR_INSN (insn2);
233 
234   if (bb1->count > bb2->count)
235     return -1;
236   if (bb1->count < bb2->count)
237     return 1;
238   return 0;
239 }
240 
241 /* NEXT is an instruction that depends on INSN (a backward dependence);
242    return nonzero if we should include this dependence in priority
243    calculations.  */
244 
245 static int
246 ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
247                              rtx_insn *insn ATTRIBUTE_UNUSED)
248 {
249   return 1;
250 }
251 
252  /* INSN is a JUMP_INSN.  Store the set of registers that
253     must be considered as used by this jump in USED.  */
254 
255 void
256 ebb_compute_jump_reg_dependencies (rtx insn, regset used)
257 {
258   basic_block b = BLOCK_FOR_INSN (insn);
259   edge e;
260   edge_iterator ei;
261 
262   FOR_EACH_EDGE (e, ei, b->succs)
263     if ((e->flags & EDGE_FALLTHRU) == 0)
264       bitmap_ior_into (used, df_get_live_in (e->dest));
265 }
266 
267 /* Used in schedule_insns to initialize current_sched_info for scheduling
268    regions (or single basic blocks).  */
269 
270 static struct common_sched_info_def ebb_common_sched_info;
271 
272 static struct sched_deps_info_def ebb_sched_deps_info =
273   {
274     ebb_compute_jump_reg_dependencies,
275     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
276     NULL,
277     1, 0, 0
278   };
279 
280 static struct haifa_sched_info ebb_sched_info =
281 {
282   init_ready_list,
283   NULL,
284   schedule_more_p,
285   NULL,
286   rank,
287   ebb_print_insn,
288   ebb_contributes_to_priority,
289   NULL, /* insn_finishes_block_p */
290 
291   NULL, NULL,
292   NULL, NULL,
293   1, 0,
294 
295   ebb_add_remove_insn,
296   begin_schedule_ready,
297   begin_move_insn,
298   advance_target_bb,
299 
300   save_ebb_state,
301   restore_ebb_state,
302 
303   SCHED_EBB
304   /* We can create new blocks in begin_schedule_ready ().  */
305   | NEW_BBS
306 };
307 
308 /* Returns the earliest block in EBB currently being processed where a
309    "similar load" 'insn2' is found, and hence LOAD_INSN can move
310    speculatively into the found block.  All the following must hold:
311 
312    (1) both loads have 1 base register (PFREE_CANDIDATEs).
313    (2) load_insn and load2 have a def-use dependence upon
314    the same insn 'insn1'.
315 
316    From all these we can conclude that the two loads access memory
317    addresses that differ at most by a constant, and hence if moving
318    load_insn would cause an exception, it would have been caused by
319    load2 anyhow.
320 
321    The function uses list (given by LAST_BLOCK) of already processed
322    blocks in EBB.  The list is formed in `add_deps_for_risky_insns'.  */
323 
324 static basic_block
325 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
326 {
327   sd_iterator_def back_sd_it;
328   dep_t back_dep;
329   basic_block bb, earliest_block = NULL;
330 
331   FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
332     {
333       rtx_insn *insn1 = DEP_PRO (back_dep);
334 
335       if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
336 	/* Found a DEF-USE dependence (insn1, load_insn).  */
337 	{
338 	  sd_iterator_def fore_sd_it;
339 	  dep_t fore_dep;
340 
341 	  FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
342 	    {
343 	      rtx_insn *insn2 = DEP_CON (fore_dep);
344 	      basic_block insn2_block = BLOCK_FOR_INSN (insn2);
345 
346 	      if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
347 		{
348 		  if (earliest_block != NULL
349 		      && earliest_block->index < insn2_block->index)
350 		    continue;
351 
352 		  /* Found a DEF-USE dependence (insn1, insn2).  */
353 		  if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
354 		    /* insn2 not guaranteed to be a 1 base reg load.  */
355 		    continue;
356 
357 		  for (bb = last_block; bb; bb = (basic_block) bb->aux)
358 		    if (insn2_block == bb)
359 		      break;
360 
361 		  if (!bb)
362 		    /* insn2 is the similar load.  */
363 		    earliest_block = insn2_block;
364 		}
365 	    }
366 	}
367     }
368 
369   return earliest_block;
370 }
371 
372 /* The following function adds dependencies between jumps and risky
373    insns in given ebb.  */
374 
375 static void
376 add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail)
377 {
378   rtx_insn *insn, *prev;
379   int classification;
380   rtx_insn *last_jump = NULL;
381   rtx_insn *next_tail = NEXT_INSN (tail);
382   basic_block last_block = NULL, bb;
383 
384   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
385     {
386       add_delay_dependencies (insn);
387       if (control_flow_insn_p (insn))
388 	{
389 	  bb = BLOCK_FOR_INSN (insn);
390 	  bb->aux = last_block;
391 	  last_block = bb;
392 	  /* Ensure blocks stay in the same order.  */
393 	  if (last_jump)
394 	    add_dependence (insn, last_jump, REG_DEP_ANTI);
395 	  last_jump = insn;
396 	}
397       else if (INSN_P (insn) && last_jump != NULL_RTX)
398 	{
399 	  classification = haifa_classify_insn (insn);
400 	  prev = last_jump;
401 
402 	  switch (classification)
403 	    {
404 	    case PFREE_CANDIDATE:
405 	      if (flag_schedule_speculative_load)
406 		{
407 		  bb = earliest_block_with_similiar_load (last_block, insn);
408 		  if (bb)
409 		    {
410 		      bb = (basic_block) bb->aux;
411 		      if (!bb)
412 			break;
413 		      prev = BB_END (bb);
414 		    }
415 		}
416 	      /* Fall through.  */
417 	    case TRAP_RISKY:
418 	    case IRISKY:
419 	    case PRISKY_CANDIDATE:
420 	      /* ??? We could implement better checking PRISKY_CANDIDATEs
421 		 analogous to sched-rgn.c.  */
422 	      /* We can not change the mode of the backward
423 		 dependency because REG_DEP_ANTI has the lowest
424 		 rank.  */
425 	      if (! sched_insns_conditions_mutex_p (insn, prev))
426 		{
427 		  if ((current_sched_info->flags & DO_SPECULATION)
428 		      && (spec_info->mask & BEGIN_CONTROL))
429 		    {
430 		      dep_def _dep, *dep = &_dep;
431 
432 		      init_dep (dep, prev, insn, REG_DEP_ANTI);
433 
434 		      if (current_sched_info->flags & USE_DEPS_LIST)
435 			{
436 			  DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
437 							   MAX_DEP_WEAK);
438 
439 			}
440 		      sd_add_or_update_dep (dep, false);
441 		    }
442 		  else
443 		    add_dependence (insn, prev, REG_DEP_CONTROL);
444 		}
445 
446 	      break;
447 
448 	    default:
449 	      break;
450 	    }
451 	}
452     }
453   /* Maintain the invariant that bb->aux is clear after use.  */
454   while (last_block)
455     {
456       bb = (basic_block) last_block->aux;
457       last_block->aux = NULL;
458       last_block = bb;
459     }
460 }
461 
462 /* Schedule a single extended basic block, defined by the boundaries
463    HEAD and TAIL.
464 
465    We change our expectations about scheduler behavior depending on
466    whether MODULO_SCHEDULING is true.  If it is, we expect that the
467    caller has already called set_modulo_params and created delay pairs
468    as appropriate.  If the modulo schedule failed, we return
469    NULL_RTX.  */
470 
471 basic_block
472 schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
473 {
474   basic_block first_bb, target_bb;
475   struct deps_desc tmp_deps;
476   bool success;
477 
478   /* Blah.  We should fix the rest of the code not to get confused by
479      a note or two.  */
480   while (head != tail)
481     {
482       if (NOTE_P (head) || DEBUG_INSN_P (head))
483 	head = NEXT_INSN (head);
484       else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
485 	tail = PREV_INSN (tail);
486       else if (LABEL_P (head))
487 	head = NEXT_INSN (head);
488       else
489 	break;
490     }
491 
492   first_bb = BLOCK_FOR_INSN (head);
493   last_bb = BLOCK_FOR_INSN (tail);
494 
495   if (no_real_insns_p (head, tail))
496     return BLOCK_FOR_INSN (tail);
497 
498   gcc_assert (INSN_P (head) && INSN_P (tail));
499 
500   if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
501     {
502       init_deps_global ();
503 
504       /* Compute dependencies.  */
505       init_deps (&tmp_deps, false);
506       sched_analyze (&tmp_deps, head, tail);
507       free_deps (&tmp_deps);
508 
509       add_deps_for_risky_insns (head, tail);
510 
511       if (targetm.sched.dependencies_evaluation_hook)
512         targetm.sched.dependencies_evaluation_hook (head, tail);
513 
514       finish_deps_global ();
515     }
516   else
517     /* Only recovery blocks can have their dependencies already calculated,
518        and they always are single block ebbs.  */
519     gcc_assert (first_bb == last_bb);
520 
521   /* Set priorities.  */
522   current_sched_info->sched_max_insns_priority = 0;
523   rgn_n_insns = set_priorities (head, tail);
524   current_sched_info->sched_max_insns_priority++;
525 
526   current_sched_info->prev_head = PREV_INSN (head);
527   current_sched_info->next_tail = NEXT_INSN (tail);
528 
529   remove_notes (head, tail);
530 
531   unlink_bb_notes (first_bb, last_bb);
532 
533   target_bb = first_bb;
534 
535   /* Make ready list big enough to hold all the instructions from the ebb.  */
536   sched_extend_ready_list (rgn_n_insns);
537   success = schedule_block (&target_bb, NULL);
538   gcc_assert (success || modulo_scheduling);
539 
540   /* Free ready list.  */
541   sched_finish_ready_list ();
542 
543   /* We might pack all instructions into fewer blocks,
544      so we may made some of them empty.  Can't assert (b == last_bb).  */
545 
546   /* Sanity check: verify that all region insns were scheduled.  */
547   gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
548 
549   /* Free dependencies.  */
550   sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
551 
552   gcc_assert (haifa_recovery_bb_ever_added_p
553 	      || deps_pools_are_empty_p ());
554 
555   if (EDGE_COUNT (last_bb->preds) == 0)
556     /* LAST_BB is unreachable.  */
557     {
558       gcc_assert (first_bb != last_bb
559 		  && EDGE_COUNT (last_bb->succs) == 0);
560       last_bb = last_bb->prev_bb;
561       delete_basic_block (last_bb->next_bb);
562     }
563 
564   return success ? last_bb : NULL;
565 }
566 
567 /* Perform initializations before running schedule_ebbs or a single
568    schedule_ebb.  */
569 void
570 schedule_ebbs_init (void)
571 {
572   /* Setup infos.  */
573   {
574     memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
575 	    sizeof (ebb_common_sched_info));
576 
577     ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
578     ebb_common_sched_info.add_block = ebb_add_block;
579     ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
580 
581     common_sched_info = &ebb_common_sched_info;
582     sched_deps_info = &ebb_sched_deps_info;
583     current_sched_info = &ebb_sched_info;
584   }
585 
586   haifa_sched_init ();
587 
588   compute_bb_for_insn ();
589 
590   /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  */
591   bitmap_initialize (&dont_calc_deps, 0);
592   bitmap_clear (&dont_calc_deps);
593 }
594 
595 /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb.  */
596 void
597 schedule_ebbs_finish (void)
598 {
599   bitmap_clear (&dont_calc_deps);
600 
601   /* Reposition the prologue and epilogue notes in case we moved the
602      prologue/epilogue insns.  */
603   if (reload_completed)
604     reposition_prologue_and_epilogue_notes ();
605 
606   haifa_sched_finish ();
607 }
608 
609 /* The main entry point in this file.  */
610 
611 void
612 schedule_ebbs (void)
613 {
614   basic_block bb;
615   int probability_cutoff;
616   rtx_insn *tail;
617 
618   /* Taking care of this degenerate case makes the rest of
619      this code simpler.  */
620   if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
621     return;
622 
623   if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
624     probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
625   else
626     probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
627   probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
628 
629   schedule_ebbs_init ();
630 
631   /* Schedule every region in the subroutine.  */
632   FOR_EACH_BB_FN (bb, cfun)
633     {
634       rtx_insn *head = BB_HEAD (bb);
635 
636       if (bb->flags & BB_DISABLE_SCHEDULE)
637 	continue;
638 
639       for (;;)
640 	{
641 	  edge e;
642 	  tail = BB_END (bb);
643 	  if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
644 	      || LABEL_P (BB_HEAD (bb->next_bb)))
645 	    break;
646 	  e = find_fallthru_edge (bb->succs);
647 	  if (! e)
648 	    break;
649 	  if (e->probability.initialized_p ()
650 	      && e->probability.to_reg_br_prob_base () <= probability_cutoff)
651 	    break;
652 	  if (e->dest->flags & BB_DISABLE_SCHEDULE)
653  	    break;
654 	  bb = bb->next_bb;
655 	}
656 
657       bb = schedule_ebb (head, tail, false);
658     }
659   schedule_ebbs_finish ();
660 }
661 
662 /* INSN has been added to/removed from current ebb.  */
663 static void
664 ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
665 {
666   if (!remove_p)
667     rgn_n_insns++;
668   else
669     rgn_n_insns--;
670 }
671 
672 /* BB was added to ebb after AFTER.  */
673 static void
674 ebb_add_block (basic_block bb, basic_block after)
675 {
676   /* Recovery blocks are always bounded by BARRIERS,
677      therefore, they always form single block EBB,
678      therefore, we can use rec->index to identify such EBBs.  */
679   if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
680     bitmap_set_bit (&dont_calc_deps, bb->index);
681   else if (after == last_bb)
682     last_bb = bb;
683 }
684 
685 /* Return next block in ebb chain.  For parameter meaning please refer to
686    sched-int.h: struct sched_info: advance_target_bb.  */
687 static basic_block
688 advance_target_bb (basic_block bb, rtx_insn *insn)
689 {
690   if (insn)
691     {
692       if (BLOCK_FOR_INSN (insn) != bb
693 	  && control_flow_insn_p (insn)
694 	  /* We handle interblock movement of the speculation check
695 	     or over a speculation check in
696 	     haifa-sched.c: move_block_after_check ().  */
697 	  && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
698 	  && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
699 	{
700 	  /* Assert that we don't move jumps across blocks.  */
701 	  gcc_assert (!control_flow_insn_p (BB_END (bb))
702 		      && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
703 	  return bb;
704 	}
705       else
706 	return 0;
707     }
708   else
709     /* Return next non empty block.  */
710     {
711       do
712 	{
713 	  gcc_assert (bb != last_bb);
714 
715 	  bb = bb->next_bb;
716 	}
717       while (bb_note (bb) == BB_END (bb));
718 
719       return bb;
720     }
721 }
722 
723 /* Fix internal data after interblock movement of jump instruction.
724    For parameter meaning please refer to
725    sched-int.h: struct sched_info: fix_recovery_cfg.  */
726 static void
727 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
728 		      int jump_bb_nexti)
729 {
730   gcc_assert (last_bb->index != bbi);
731 
732   if (jump_bb_nexti == last_bb->index)
733     last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
734 }
735 
736 #endif /* INSN_SCHEDULING */
737