1 /* Instruction scheduling pass. Selective scheduler and pipeliner. 2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 3 Free Software Foundation, Inc. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "tm.h" 25 #include "diagnostic-core.h" 26 #include "rtl.h" 27 #include "tm_p.h" 28 #include "hard-reg-set.h" 29 #include "regs.h" 30 #include "function.h" 31 #include "flags.h" 32 #include "insn-config.h" 33 #include "insn-attr.h" 34 #include "except.h" 35 #include "recog.h" 36 #include "params.h" 37 #include "target.h" 38 #include "timevar.h" 39 #include "tree-pass.h" 40 #include "sched-int.h" 41 #include "ggc.h" 42 #include "tree.h" 43 #include "vec.h" 44 #include "langhooks.h" 45 #include "rtlhooks-def.h" 46 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ 47 48 #ifdef INSN_SCHEDULING 49 #include "sel-sched-ir.h" 50 /* We don't have to use it except for sel_print_insn. */ 51 #include "sel-sched-dump.h" 52 53 /* A vector holding bb info for whole scheduling pass. */ 54 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL; 55 56 /* A vector holding bb info. */ 57 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL; 58 59 /* A pool for allocating all lists. */ 60 alloc_pool sched_lists_pool; 61 62 /* This contains information about successors for compute_av_set. */ 63 struct succs_info current_succs; 64 65 /* Data structure to describe interaction with the generic scheduler utils. */ 66 static struct common_sched_info_def sel_common_sched_info; 67 68 /* The loop nest being pipelined. */ 69 struct loop *current_loop_nest; 70 71 /* LOOP_NESTS is a vector containing the corresponding loop nest for 72 each region. */ 73 static VEC(loop_p, heap) *loop_nests = NULL; 74 75 /* Saves blocks already in loop regions, indexed by bb->index. */ 76 static sbitmap bbs_in_loop_rgns = NULL; 77 78 /* CFG hooks that are saved before changing create_basic_block hook. */ 79 static struct cfg_hooks orig_cfg_hooks; 80 81 82 /* Array containing reverse topological index of function basic blocks, 83 indexed by BB->INDEX. */ 84 static int *rev_top_order_index = NULL; 85 86 /* Length of the above array. */ 87 static int rev_top_order_index_len = -1; 88 89 /* A regset pool structure. */ 90 static struct 91 { 92 /* The stack to which regsets are returned. */ 93 regset *v; 94 95 /* Its pointer. */ 96 int n; 97 98 /* Its size. */ 99 int s; 100 101 /* In VV we save all generated regsets so that, when destructing the 102 pool, we can compare it with V and check that every regset was returned 103 back to pool. */ 104 regset *vv; 105 106 /* The pointer of VV stack. */ 107 int nn; 108 109 /* Its size. */ 110 int ss; 111 112 /* The difference between allocated and returned regsets. */ 113 int diff; 114 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; 115 116 /* This represents the nop pool. */ 117 static struct 118 { 119 /* The vector which holds previously emitted nops. */ 120 insn_t *v; 121 122 /* Its pointer. */ 123 int n; 124 125 /* Its size. */ 126 int s; 127 } nop_pool = { NULL, 0, 0 }; 128 129 /* The pool for basic block notes. */ 130 static rtx_vec_t bb_note_pool; 131 132 /* A NOP pattern used to emit placeholder insns. */ 133 rtx nop_pattern = NULL_RTX; 134 /* A special instruction that resides in EXIT_BLOCK. 135 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ 136 rtx exit_insn = NULL_RTX; 137 138 /* TRUE if while scheduling current region, which is loop, its preheader 139 was removed. */ 140 bool preheader_removed = false; 141 142 143 /* Forward static declarations. */ 144 static void fence_clear (fence_t); 145 146 static void deps_init_id (idata_t, insn_t, bool); 147 static void init_id_from_df (idata_t, insn_t, bool); 148 static expr_t set_insn_init (expr_t, vinsn_t, int); 149 150 static void cfg_preds (basic_block, insn_t **, int *); 151 static void prepare_insn_expr (insn_t, int); 152 static void free_history_vect (VEC (expr_history_def, heap) **); 153 154 static void move_bb_info (basic_block, basic_block); 155 static void remove_empty_bb (basic_block, bool); 156 static void sel_merge_blocks (basic_block, basic_block); 157 static void sel_remove_loop_preheader (void); 158 static bool bb_has_removable_jump_to_p (basic_block, basic_block); 159 160 static bool insn_is_the_only_one_in_bb_p (insn_t); 161 static void create_initial_data_sets (basic_block); 162 163 static void free_av_set (basic_block); 164 static void invalidate_av_set (basic_block); 165 static void extend_insn_data (void); 166 static void sel_init_new_insn (insn_t, int); 167 static void finish_insns (void); 168 169 /* Various list functions. */ 170 171 /* Copy an instruction list L. */ 172 ilist_t 173 ilist_copy (ilist_t l) 174 { 175 ilist_t head = NULL, *tailp = &head; 176 177 while (l) 178 { 179 ilist_add (tailp, ILIST_INSN (l)); 180 tailp = &ILIST_NEXT (*tailp); 181 l = ILIST_NEXT (l); 182 } 183 184 return head; 185 } 186 187 /* Invert an instruction list L. */ 188 ilist_t 189 ilist_invert (ilist_t l) 190 { 191 ilist_t res = NULL; 192 193 while (l) 194 { 195 ilist_add (&res, ILIST_INSN (l)); 196 l = ILIST_NEXT (l); 197 } 198 199 return res; 200 } 201 202 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ 203 void 204 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) 205 { 206 bnd_t bnd; 207 208 _list_add (lp); 209 bnd = BLIST_BND (*lp); 210 211 BND_TO (bnd) = to; 212 BND_PTR (bnd) = ptr; 213 BND_AV (bnd) = NULL; 214 BND_AV1 (bnd) = NULL; 215 BND_DC (bnd) = dc; 216 } 217 218 /* Remove the list note pointed to by LP. */ 219 void 220 blist_remove (blist_t *lp) 221 { 222 bnd_t b = BLIST_BND (*lp); 223 224 av_set_clear (&BND_AV (b)); 225 av_set_clear (&BND_AV1 (b)); 226 ilist_clear (&BND_PTR (b)); 227 228 _list_remove (lp); 229 } 230 231 /* Init a fence tail L. */ 232 void 233 flist_tail_init (flist_tail_t l) 234 { 235 FLIST_TAIL_HEAD (l) = NULL; 236 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); 237 } 238 239 /* Try to find fence corresponding to INSN in L. */ 240 fence_t 241 flist_lookup (flist_t l, insn_t insn) 242 { 243 while (l) 244 { 245 if (FENCE_INSN (FLIST_FENCE (l)) == insn) 246 return FLIST_FENCE (l); 247 248 l = FLIST_NEXT (l); 249 } 250 251 return NULL; 252 } 253 254 /* Init the fields of F before running fill_insns. */ 255 static void 256 init_fence_for_scheduling (fence_t f) 257 { 258 FENCE_BNDS (f) = NULL; 259 FENCE_PROCESSED_P (f) = false; 260 FENCE_SCHEDULED_P (f) = false; 261 } 262 263 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ 264 static void 265 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, 266 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns, 267 int *ready_ticks, int ready_ticks_size, insn_t sched_next, 268 int cycle, int cycle_issued_insns, int issue_more, 269 bool starts_cycle_p, bool after_stall_p) 270 { 271 fence_t f; 272 273 _list_add (lp); 274 f = FLIST_FENCE (*lp); 275 276 FENCE_INSN (f) = insn; 277 278 gcc_assert (state != NULL); 279 FENCE_STATE (f) = state; 280 281 FENCE_CYCLE (f) = cycle; 282 FENCE_ISSUED_INSNS (f) = cycle_issued_insns; 283 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; 284 FENCE_AFTER_STALL_P (f) = after_stall_p; 285 286 gcc_assert (dc != NULL); 287 FENCE_DC (f) = dc; 288 289 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); 290 FENCE_TC (f) = tc; 291 292 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; 293 FENCE_ISSUE_MORE (f) = issue_more; 294 FENCE_EXECUTING_INSNS (f) = executing_insns; 295 FENCE_READY_TICKS (f) = ready_ticks; 296 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; 297 FENCE_SCHED_NEXT (f) = sched_next; 298 299 init_fence_for_scheduling (f); 300 } 301 302 /* Remove the head node of the list pointed to by LP. */ 303 static void 304 flist_remove (flist_t *lp) 305 { 306 if (FENCE_INSN (FLIST_FENCE (*lp))) 307 fence_clear (FLIST_FENCE (*lp)); 308 _list_remove (lp); 309 } 310 311 /* Clear the fence list pointed to by LP. */ 312 void 313 flist_clear (flist_t *lp) 314 { 315 while (*lp) 316 flist_remove (lp); 317 } 318 319 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */ 320 void 321 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) 322 { 323 def_t d; 324 325 _list_add (dl); 326 d = DEF_LIST_DEF (*dl); 327 328 d->orig_insn = original_insn; 329 d->crosses_call = crosses_call; 330 } 331 332 333 /* Functions to work with target contexts. */ 334 335 /* Bulk target context. It is convenient for debugging purposes to ensure 336 that there are no uninitialized (null) target contexts. */ 337 static tc_t bulk_tc = (tc_t) 1; 338 339 /* Target hooks wrappers. In the future we can provide some default 340 implementations for them. */ 341 342 /* Allocate a store for the target context. */ 343 static tc_t 344 alloc_target_context (void) 345 { 346 return (targetm.sched.alloc_sched_context 347 ? targetm.sched.alloc_sched_context () : bulk_tc); 348 } 349 350 /* Init target context TC. 351 If CLEAN_P is true, then make TC as it is beginning of the scheduler. 352 Overwise, copy current backend context to TC. */ 353 static void 354 init_target_context (tc_t tc, bool clean_p) 355 { 356 if (targetm.sched.init_sched_context) 357 targetm.sched.init_sched_context (tc, clean_p); 358 } 359 360 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as 361 int init_target_context (). */ 362 tc_t 363 create_target_context (bool clean_p) 364 { 365 tc_t tc = alloc_target_context (); 366 367 init_target_context (tc, clean_p); 368 return tc; 369 } 370 371 /* Copy TC to the current backend context. */ 372 void 373 set_target_context (tc_t tc) 374 { 375 if (targetm.sched.set_sched_context) 376 targetm.sched.set_sched_context (tc); 377 } 378 379 /* TC is about to be destroyed. Free any internal data. */ 380 static void 381 clear_target_context (tc_t tc) 382 { 383 if (targetm.sched.clear_sched_context) 384 targetm.sched.clear_sched_context (tc); 385 } 386 387 /* Clear and free it. */ 388 static void 389 delete_target_context (tc_t tc) 390 { 391 clear_target_context (tc); 392 393 if (targetm.sched.free_sched_context) 394 targetm.sched.free_sched_context (tc); 395 } 396 397 /* Make a copy of FROM in TO. 398 NB: May be this should be a hook. */ 399 static void 400 copy_target_context (tc_t to, tc_t from) 401 { 402 tc_t tmp = create_target_context (false); 403 404 set_target_context (from); 405 init_target_context (to, false); 406 407 set_target_context (tmp); 408 delete_target_context (tmp); 409 } 410 411 /* Create a copy of TC. */ 412 static tc_t 413 create_copy_of_target_context (tc_t tc) 414 { 415 tc_t copy = alloc_target_context (); 416 417 copy_target_context (copy, tc); 418 419 return copy; 420 } 421 422 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P 423 is the same as in init_target_context (). */ 424 void 425 reset_target_context (tc_t tc, bool clean_p) 426 { 427 clear_target_context (tc); 428 init_target_context (tc, clean_p); 429 } 430 431 /* Functions to work with dependence contexts. 432 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence 433 context. It accumulates information about processed insns to decide if 434 current insn is dependent on the processed ones. */ 435 436 /* Make a copy of FROM in TO. */ 437 static void 438 copy_deps_context (deps_t to, deps_t from) 439 { 440 init_deps (to, false); 441 deps_join (to, from); 442 } 443 444 /* Allocate store for dep context. */ 445 static deps_t 446 alloc_deps_context (void) 447 { 448 return XNEW (struct deps_desc); 449 } 450 451 /* Allocate and initialize dep context. */ 452 static deps_t 453 create_deps_context (void) 454 { 455 deps_t dc = alloc_deps_context (); 456 457 init_deps (dc, false); 458 return dc; 459 } 460 461 /* Create a copy of FROM. */ 462 static deps_t 463 create_copy_of_deps_context (deps_t from) 464 { 465 deps_t to = alloc_deps_context (); 466 467 copy_deps_context (to, from); 468 return to; 469 } 470 471 /* Clean up internal data of DC. */ 472 static void 473 clear_deps_context (deps_t dc) 474 { 475 free_deps (dc); 476 } 477 478 /* Clear and free DC. */ 479 static void 480 delete_deps_context (deps_t dc) 481 { 482 clear_deps_context (dc); 483 free (dc); 484 } 485 486 /* Clear and init DC. */ 487 static void 488 reset_deps_context (deps_t dc) 489 { 490 clear_deps_context (dc); 491 init_deps (dc, false); 492 } 493 494 /* This structure describes the dependence analysis hooks for advancing 495 dependence context. */ 496 static struct sched_deps_info_def advance_deps_context_sched_deps_info = 497 { 498 NULL, 499 500 NULL, /* start_insn */ 501 NULL, /* finish_insn */ 502 NULL, /* start_lhs */ 503 NULL, /* finish_lhs */ 504 NULL, /* start_rhs */ 505 NULL, /* finish_rhs */ 506 haifa_note_reg_set, 507 haifa_note_reg_clobber, 508 haifa_note_reg_use, 509 NULL, /* note_mem_dep */ 510 NULL, /* note_dep */ 511 512 0, 0, 0 513 }; 514 515 /* Process INSN and add its impact on DC. */ 516 void 517 advance_deps_context (deps_t dc, insn_t insn) 518 { 519 sched_deps_info = &advance_deps_context_sched_deps_info; 520 deps_analyze_insn (dc, insn); 521 } 522 523 524 /* Functions to work with DFA states. */ 525 526 /* Allocate store for a DFA state. */ 527 static state_t 528 state_alloc (void) 529 { 530 return xmalloc (dfa_state_size); 531 } 532 533 /* Allocate and initialize DFA state. */ 534 static state_t 535 state_create (void) 536 { 537 state_t state = state_alloc (); 538 539 state_reset (state); 540 advance_state (state); 541 return state; 542 } 543 544 /* Free DFA state. */ 545 static void 546 state_free (state_t state) 547 { 548 free (state); 549 } 550 551 /* Make a copy of FROM in TO. */ 552 static void 553 state_copy (state_t to, state_t from) 554 { 555 memcpy (to, from, dfa_state_size); 556 } 557 558 /* Create a copy of FROM. */ 559 static state_t 560 state_create_copy (state_t from) 561 { 562 state_t to = state_alloc (); 563 564 state_copy (to, from); 565 return to; 566 } 567 568 569 /* Functions to work with fences. */ 570 571 /* Clear the fence. */ 572 static void 573 fence_clear (fence_t f) 574 { 575 state_t s = FENCE_STATE (f); 576 deps_t dc = FENCE_DC (f); 577 void *tc = FENCE_TC (f); 578 579 ilist_clear (&FENCE_BNDS (f)); 580 581 gcc_assert ((s != NULL && dc != NULL && tc != NULL) 582 || (s == NULL && dc == NULL && tc == NULL)); 583 584 free (s); 585 586 if (dc != NULL) 587 delete_deps_context (dc); 588 589 if (tc != NULL) 590 delete_target_context (tc); 591 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f)); 592 free (FENCE_READY_TICKS (f)); 593 FENCE_READY_TICKS (f) = NULL; 594 } 595 596 /* Init a list of fences with successors of OLD_FENCE. */ 597 void 598 init_fences (insn_t old_fence) 599 { 600 insn_t succ; 601 succ_iterator si; 602 bool first = true; 603 int ready_ticks_size = get_max_uid () + 1; 604 605 FOR_EACH_SUCC_1 (succ, si, old_fence, 606 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 607 { 608 609 if (first) 610 first = false; 611 else 612 gcc_assert (flag_sel_sched_pipelining_outer_loops); 613 614 flist_add (&fences, succ, 615 state_create (), 616 create_deps_context () /* dc */, 617 create_target_context (true) /* tc */, 618 NULL_RTX /* last_scheduled_insn */, 619 NULL, /* executing_insns */ 620 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ 621 ready_ticks_size, 622 NULL_RTX /* sched_next */, 623 1 /* cycle */, 0 /* cycle_issued_insns */, 624 issue_rate, /* issue_more */ 625 1 /* starts_cycle_p */, 0 /* after_stall_p */); 626 } 627 } 628 629 /* Merges two fences (filling fields of fence F with resulting values) by 630 following rules: 1) state, target context and last scheduled insn are 631 propagated from fallthrough edge if it is available; 632 2) deps context and cycle is propagated from more probable edge; 633 3) all other fields are set to corresponding constant values. 634 635 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, 636 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE 637 and AFTER_STALL_P are the corresponding fields of the second fence. */ 638 static void 639 merge_fences (fence_t f, insn_t insn, 640 state_t state, deps_t dc, void *tc, 641 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns, 642 int *ready_ticks, int ready_ticks_size, 643 rtx sched_next, int cycle, int issue_more, bool after_stall_p) 644 { 645 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); 646 647 gcc_assert (sel_bb_head_p (FENCE_INSN (f)) 648 && !sched_next && !FENCE_SCHED_NEXT (f)); 649 650 /* Check if we can decide which path fences came. 651 If we can't (or don't want to) - reset all. */ 652 if (last_scheduled_insn == NULL 653 || last_scheduled_insn_old == NULL 654 /* This is a case when INSN is reachable on several paths from 655 one insn (this can happen when pipelining of outer loops is on and 656 there are two edges: one going around of inner loop and the other - 657 right through it; in such case just reset everything). */ 658 || last_scheduled_insn == last_scheduled_insn_old) 659 { 660 state_reset (FENCE_STATE (f)); 661 state_free (state); 662 663 reset_deps_context (FENCE_DC (f)); 664 delete_deps_context (dc); 665 666 reset_target_context (FENCE_TC (f), true); 667 delete_target_context (tc); 668 669 if (cycle > FENCE_CYCLE (f)) 670 FENCE_CYCLE (f) = cycle; 671 672 FENCE_LAST_SCHEDULED_INSN (f) = NULL; 673 FENCE_ISSUE_MORE (f) = issue_rate; 674 VEC_free (rtx, gc, executing_insns); 675 free (ready_ticks); 676 if (FENCE_EXECUTING_INSNS (f)) 677 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, 678 VEC_length (rtx, FENCE_EXECUTING_INSNS (f))); 679 if (FENCE_READY_TICKS (f)) 680 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); 681 } 682 else 683 { 684 edge edge_old = NULL, edge_new = NULL; 685 edge candidate; 686 succ_iterator si; 687 insn_t succ; 688 689 /* Find fallthrough edge. */ 690 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); 691 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); 692 693 if (!candidate 694 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) 695 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) 696 { 697 /* No fallthrough edge leading to basic block of INSN. */ 698 state_reset (FENCE_STATE (f)); 699 state_free (state); 700 701 reset_target_context (FENCE_TC (f), true); 702 delete_target_context (tc); 703 704 FENCE_LAST_SCHEDULED_INSN (f) = NULL; 705 FENCE_ISSUE_MORE (f) = issue_rate; 706 } 707 else 708 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) 709 { 710 /* Would be weird if same insn is successor of several fallthrough 711 edges. */ 712 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb 713 != BLOCK_FOR_INSN (last_scheduled_insn_old)); 714 715 state_free (FENCE_STATE (f)); 716 FENCE_STATE (f) = state; 717 718 delete_target_context (FENCE_TC (f)); 719 FENCE_TC (f) = tc; 720 721 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; 722 FENCE_ISSUE_MORE (f) = issue_more; 723 } 724 else 725 { 726 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ 727 state_free (state); 728 delete_target_context (tc); 729 730 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb 731 != BLOCK_FOR_INSN (last_scheduled_insn)); 732 } 733 734 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ 735 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, 736 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 737 { 738 if (succ == insn) 739 { 740 /* No same successor allowed from several edges. */ 741 gcc_assert (!edge_old); 742 edge_old = si.e1; 743 } 744 } 745 /* Find edge of second predecessor (last_scheduled_insn->insn). */ 746 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, 747 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 748 { 749 if (succ == insn) 750 { 751 /* No same successor allowed from several edges. */ 752 gcc_assert (!edge_new); 753 edge_new = si.e1; 754 } 755 } 756 757 /* Check if we can choose most probable predecessor. */ 758 if (edge_old == NULL || edge_new == NULL) 759 { 760 reset_deps_context (FENCE_DC (f)); 761 delete_deps_context (dc); 762 VEC_free (rtx, gc, executing_insns); 763 free (ready_ticks); 764 765 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); 766 if (FENCE_EXECUTING_INSNS (f)) 767 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, 768 VEC_length (rtx, FENCE_EXECUTING_INSNS (f))); 769 if (FENCE_READY_TICKS (f)) 770 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); 771 } 772 else 773 if (edge_new->probability > edge_old->probability) 774 { 775 delete_deps_context (FENCE_DC (f)); 776 FENCE_DC (f) = dc; 777 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f)); 778 FENCE_EXECUTING_INSNS (f) = executing_insns; 779 free (FENCE_READY_TICKS (f)); 780 FENCE_READY_TICKS (f) = ready_ticks; 781 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; 782 FENCE_CYCLE (f) = cycle; 783 } 784 else 785 { 786 /* Leave DC and CYCLE untouched. */ 787 delete_deps_context (dc); 788 VEC_free (rtx, gc, executing_insns); 789 free (ready_ticks); 790 } 791 } 792 793 /* Fill remaining invariant fields. */ 794 if (after_stall_p) 795 FENCE_AFTER_STALL_P (f) = 1; 796 797 FENCE_ISSUED_INSNS (f) = 0; 798 FENCE_STARTS_CYCLE_P (f) = 1; 799 FENCE_SCHED_NEXT (f) = NULL; 800 } 801 802 /* Add a new fence to NEW_FENCES list, initializing it from all 803 other parameters. */ 804 static void 805 add_to_fences (flist_tail_t new_fences, insn_t insn, 806 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn, 807 VEC(rtx, gc) *executing_insns, int *ready_ticks, 808 int ready_ticks_size, rtx sched_next, int cycle, 809 int cycle_issued_insns, int issue_rate, 810 bool starts_cycle_p, bool after_stall_p) 811 { 812 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); 813 814 if (! f) 815 { 816 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, 817 last_scheduled_insn, executing_insns, ready_ticks, 818 ready_ticks_size, sched_next, cycle, cycle_issued_insns, 819 issue_rate, starts_cycle_p, after_stall_p); 820 821 FLIST_TAIL_TAILP (new_fences) 822 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); 823 } 824 else 825 { 826 merge_fences (f, insn, state, dc, tc, last_scheduled_insn, 827 executing_insns, ready_ticks, ready_ticks_size, 828 sched_next, cycle, issue_rate, after_stall_p); 829 } 830 } 831 832 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ 833 void 834 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) 835 { 836 fence_t f, old; 837 flist_t *tailp = FLIST_TAIL_TAILP (new_fences); 838 839 old = FLIST_FENCE (old_fences); 840 f = flist_lookup (FLIST_TAIL_HEAD (new_fences), 841 FENCE_INSN (FLIST_FENCE (old_fences))); 842 if (f) 843 { 844 merge_fences (f, old->insn, old->state, old->dc, old->tc, 845 old->last_scheduled_insn, old->executing_insns, 846 old->ready_ticks, old->ready_ticks_size, 847 old->sched_next, old->cycle, old->issue_more, 848 old->after_stall_p); 849 } 850 else 851 { 852 _list_add (tailp); 853 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); 854 *FLIST_FENCE (*tailp) = *old; 855 init_fence_for_scheduling (FLIST_FENCE (*tailp)); 856 } 857 FENCE_INSN (old) = NULL; 858 } 859 860 /* Add a new fence to NEW_FENCES list and initialize most of its data 861 as a clean one. */ 862 void 863 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) 864 { 865 int ready_ticks_size = get_max_uid () + 1; 866 867 add_to_fences (new_fences, 868 succ, state_create (), create_deps_context (), 869 create_target_context (true), 870 NULL_RTX, NULL, 871 XCNEWVEC (int, ready_ticks_size), ready_ticks_size, 872 NULL_RTX, FENCE_CYCLE (fence) + 1, 873 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); 874 } 875 876 /* Add a new fence to NEW_FENCES list and initialize all of its data 877 from FENCE and SUCC. */ 878 void 879 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) 880 { 881 int * new_ready_ticks 882 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); 883 884 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), 885 FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); 886 add_to_fences (new_fences, 887 succ, state_create_copy (FENCE_STATE (fence)), 888 create_copy_of_deps_context (FENCE_DC (fence)), 889 create_copy_of_target_context (FENCE_TC (fence)), 890 FENCE_LAST_SCHEDULED_INSN (fence), 891 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)), 892 new_ready_ticks, 893 FENCE_READY_TICKS_SIZE (fence), 894 FENCE_SCHED_NEXT (fence), 895 FENCE_CYCLE (fence), 896 FENCE_ISSUED_INSNS (fence), 897 FENCE_ISSUE_MORE (fence), 898 FENCE_STARTS_CYCLE_P (fence), 899 FENCE_AFTER_STALL_P (fence)); 900 } 901 902 903 /* Functions to work with regset and nop pools. */ 904 905 /* Returns the new regset from pool. It might have some of the bits set 906 from the previous usage. */ 907 regset 908 get_regset_from_pool (void) 909 { 910 regset rs; 911 912 if (regset_pool.n != 0) 913 rs = regset_pool.v[--regset_pool.n]; 914 else 915 /* We need to create the regset. */ 916 { 917 rs = ALLOC_REG_SET (®_obstack); 918 919 if (regset_pool.nn == regset_pool.ss) 920 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, 921 (regset_pool.ss = 2 * regset_pool.ss + 1)); 922 regset_pool.vv[regset_pool.nn++] = rs; 923 } 924 925 regset_pool.diff++; 926 927 return rs; 928 } 929 930 /* Same as above, but returns the empty regset. */ 931 regset 932 get_clear_regset_from_pool (void) 933 { 934 regset rs = get_regset_from_pool (); 935 936 CLEAR_REG_SET (rs); 937 return rs; 938 } 939 940 /* Return regset RS to the pool for future use. */ 941 void 942 return_regset_to_pool (regset rs) 943 { 944 gcc_assert (rs); 945 regset_pool.diff--; 946 947 if (regset_pool.n == regset_pool.s) 948 regset_pool.v = XRESIZEVEC (regset, regset_pool.v, 949 (regset_pool.s = 2 * regset_pool.s + 1)); 950 regset_pool.v[regset_pool.n++] = rs; 951 } 952 953 #ifdef ENABLE_CHECKING 954 /* This is used as a qsort callback for sorting regset pool stacks. 955 X and XX are addresses of two regsets. They are never equal. */ 956 static int 957 cmp_v_in_regset_pool (const void *x, const void *xx) 958 { 959 return *((const regset *) x) - *((const regset *) xx); 960 } 961 #endif 962 963 /* Free the regset pool possibly checking for memory leaks. */ 964 void 965 free_regset_pool (void) 966 { 967 #ifdef ENABLE_CHECKING 968 { 969 regset *v = regset_pool.v; 970 int i = 0; 971 int n = regset_pool.n; 972 973 regset *vv = regset_pool.vv; 974 int ii = 0; 975 int nn = regset_pool.nn; 976 977 int diff = 0; 978 979 gcc_assert (n <= nn); 980 981 /* Sort both vectors so it will be possible to compare them. */ 982 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); 983 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); 984 985 while (ii < nn) 986 { 987 if (v[i] == vv[ii]) 988 i++; 989 else 990 /* VV[II] was lost. */ 991 diff++; 992 993 ii++; 994 } 995 996 gcc_assert (diff == regset_pool.diff); 997 } 998 #endif 999 1000 /* If not true - we have a memory leak. */ 1001 gcc_assert (regset_pool.diff == 0); 1002 1003 while (regset_pool.n) 1004 { 1005 --regset_pool.n; 1006 FREE_REG_SET (regset_pool.v[regset_pool.n]); 1007 } 1008 1009 free (regset_pool.v); 1010 regset_pool.v = NULL; 1011 regset_pool.s = 0; 1012 1013 free (regset_pool.vv); 1014 regset_pool.vv = NULL; 1015 regset_pool.nn = 0; 1016 regset_pool.ss = 0; 1017 1018 regset_pool.diff = 0; 1019 } 1020 1021 1022 /* Functions to work with nop pools. NOP insns are used as temporary 1023 placeholders of the insns being scheduled to allow correct update of 1024 the data sets. When update is finished, NOPs are deleted. */ 1025 1026 /* A vinsn that is used to represent a nop. This vinsn is shared among all 1027 nops sel-sched generates. */ 1028 static vinsn_t nop_vinsn = NULL; 1029 1030 /* Emit a nop before INSN, taking it from pool. */ 1031 insn_t 1032 get_nop_from_pool (insn_t insn) 1033 { 1034 insn_t nop; 1035 bool old_p = nop_pool.n != 0; 1036 int flags; 1037 1038 if (old_p) 1039 nop = nop_pool.v[--nop_pool.n]; 1040 else 1041 nop = nop_pattern; 1042 1043 nop = emit_insn_before (nop, insn); 1044 1045 if (old_p) 1046 flags = INSN_INIT_TODO_SSID; 1047 else 1048 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; 1049 1050 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); 1051 sel_init_new_insn (nop, flags); 1052 1053 return nop; 1054 } 1055 1056 /* Remove NOP from the instruction stream and return it to the pool. */ 1057 void 1058 return_nop_to_pool (insn_t nop, bool full_tidying) 1059 { 1060 gcc_assert (INSN_IN_STREAM_P (nop)); 1061 sel_remove_insn (nop, false, full_tidying); 1062 1063 if (nop_pool.n == nop_pool.s) 1064 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v, 1065 (nop_pool.s = 2 * nop_pool.s + 1)); 1066 nop_pool.v[nop_pool.n++] = nop; 1067 } 1068 1069 /* Free the nop pool. */ 1070 void 1071 free_nop_pool (void) 1072 { 1073 nop_pool.n = 0; 1074 nop_pool.s = 0; 1075 free (nop_pool.v); 1076 nop_pool.v = NULL; 1077 } 1078 1079 1080 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. 1081 The callback is given two rtxes XX and YY and writes the new rtxes 1082 to NX and NY in case some needs to be skipped. */ 1083 static int 1084 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) 1085 { 1086 const_rtx x = *xx; 1087 const_rtx y = *yy; 1088 1089 if (GET_CODE (x) == UNSPEC 1090 && (targetm.sched.skip_rtx_p == NULL 1091 || targetm.sched.skip_rtx_p (x))) 1092 { 1093 *nx = XVECEXP (x, 0, 0); 1094 *ny = CONST_CAST_RTX (y); 1095 return 1; 1096 } 1097 1098 if (GET_CODE (y) == UNSPEC 1099 && (targetm.sched.skip_rtx_p == NULL 1100 || targetm.sched.skip_rtx_p (y))) 1101 { 1102 *nx = CONST_CAST_RTX (x); 1103 *ny = XVECEXP (y, 0, 0); 1104 return 1; 1105 } 1106 1107 return 0; 1108 } 1109 1110 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way 1111 to support ia64 speculation. When changes are needed, new rtx X and new mode 1112 NMODE are written, and the callback returns true. */ 1113 static int 1114 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED, 1115 rtx *nx, enum machine_mode* nmode) 1116 { 1117 if (GET_CODE (x) == UNSPEC 1118 && targetm.sched.skip_rtx_p 1119 && targetm.sched.skip_rtx_p (x)) 1120 { 1121 *nx = XVECEXP (x, 0 ,0); 1122 *nmode = VOIDmode; 1123 return 1; 1124 } 1125 1126 return 0; 1127 } 1128 1129 /* Returns LHS and RHS are ok to be scheduled separately. */ 1130 static bool 1131 lhs_and_rhs_separable_p (rtx lhs, rtx rhs) 1132 { 1133 if (lhs == NULL || rhs == NULL) 1134 return false; 1135 1136 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point 1137 to use reg, if const can be used. Moreover, scheduling const as rhs may 1138 lead to mode mismatch cause consts don't have modes but they could be 1139 merged from branches where the same const used in different modes. */ 1140 if (CONSTANT_P (rhs)) 1141 return false; 1142 1143 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ 1144 if (COMPARISON_P (rhs)) 1145 return false; 1146 1147 /* Do not allow single REG to be an rhs. */ 1148 if (REG_P (rhs)) 1149 return false; 1150 1151 /* See comment at find_used_regs_1 (*1) for explanation of this 1152 restriction. */ 1153 /* FIXME: remove this later. */ 1154 if (MEM_P (lhs)) 1155 return false; 1156 1157 /* This will filter all tricky things like ZERO_EXTRACT etc. 1158 For now we don't handle it. */ 1159 if (!REG_P (lhs) && !MEM_P (lhs)) 1160 return false; 1161 1162 return true; 1163 } 1164 1165 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When 1166 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is 1167 used e.g. for insns from recovery blocks. */ 1168 static void 1169 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) 1170 { 1171 hash_rtx_callback_function hrcf; 1172 int insn_class; 1173 1174 VINSN_INSN_RTX (vi) = insn; 1175 VINSN_COUNT (vi) = 0; 1176 vi->cost = -1; 1177 1178 if (INSN_NOP_P (insn)) 1179 return; 1180 1181 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) 1182 init_id_from_df (VINSN_ID (vi), insn, force_unique_p); 1183 else 1184 deps_init_id (VINSN_ID (vi), insn, force_unique_p); 1185 1186 /* Hash vinsn depending on whether it is separable or not. */ 1187 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; 1188 if (VINSN_SEPARABLE_P (vi)) 1189 { 1190 rtx rhs = VINSN_RHS (vi); 1191 1192 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), 1193 NULL, NULL, false, hrcf); 1194 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), 1195 VOIDmode, NULL, NULL, 1196 false, hrcf); 1197 } 1198 else 1199 { 1200 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, 1201 NULL, NULL, false, hrcf); 1202 VINSN_HASH_RTX (vi) = VINSN_HASH (vi); 1203 } 1204 1205 insn_class = haifa_classify_insn (insn); 1206 if (insn_class >= 2 1207 && (!targetm.sched.get_insn_spec_ds 1208 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) 1209 == 0))) 1210 VINSN_MAY_TRAP_P (vi) = true; 1211 else 1212 VINSN_MAY_TRAP_P (vi) = false; 1213 } 1214 1215 /* Indicate that VI has become the part of an rtx object. */ 1216 void 1217 vinsn_attach (vinsn_t vi) 1218 { 1219 /* Assert that VI is not pending for deletion. */ 1220 gcc_assert (VINSN_INSN_RTX (vi)); 1221 1222 VINSN_COUNT (vi)++; 1223 } 1224 1225 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct 1226 VINSN_TYPE (VI). */ 1227 static vinsn_t 1228 vinsn_create (insn_t insn, bool force_unique_p) 1229 { 1230 vinsn_t vi = XCNEW (struct vinsn_def); 1231 1232 vinsn_init (vi, insn, force_unique_p); 1233 return vi; 1234 } 1235 1236 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach 1237 the copy. */ 1238 vinsn_t 1239 vinsn_copy (vinsn_t vi, bool reattach_p) 1240 { 1241 rtx copy; 1242 bool unique = VINSN_UNIQUE_P (vi); 1243 vinsn_t new_vi; 1244 1245 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); 1246 new_vi = create_vinsn_from_insn_rtx (copy, unique); 1247 if (reattach_p) 1248 { 1249 vinsn_detach (vi); 1250 vinsn_attach (new_vi); 1251 } 1252 1253 return new_vi; 1254 } 1255 1256 /* Delete the VI vinsn and free its data. */ 1257 static void 1258 vinsn_delete (vinsn_t vi) 1259 { 1260 gcc_assert (VINSN_COUNT (vi) == 0); 1261 1262 if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) 1263 { 1264 return_regset_to_pool (VINSN_REG_SETS (vi)); 1265 return_regset_to_pool (VINSN_REG_USES (vi)); 1266 return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); 1267 } 1268 1269 free (vi); 1270 } 1271 1272 /* Indicate that VI is no longer a part of some rtx object. 1273 Remove VI if it is no longer needed. */ 1274 void 1275 vinsn_detach (vinsn_t vi) 1276 { 1277 gcc_assert (VINSN_COUNT (vi) > 0); 1278 1279 if (--VINSN_COUNT (vi) == 0) 1280 vinsn_delete (vi); 1281 } 1282 1283 /* Returns TRUE if VI is a branch. */ 1284 bool 1285 vinsn_cond_branch_p (vinsn_t vi) 1286 { 1287 insn_t insn; 1288 1289 if (!VINSN_UNIQUE_P (vi)) 1290 return false; 1291 1292 insn = VINSN_INSN_RTX (vi); 1293 if (BB_END (BLOCK_FOR_INSN (insn)) != insn) 1294 return false; 1295 1296 return control_flow_insn_p (insn); 1297 } 1298 1299 /* Return latency of INSN. */ 1300 static int 1301 sel_insn_rtx_cost (rtx insn) 1302 { 1303 int cost; 1304 1305 /* A USE insn, or something else we don't need to 1306 understand. We can't pass these directly to 1307 result_ready_cost or insn_default_latency because it will 1308 trigger a fatal error for unrecognizable insns. */ 1309 if (recog_memoized (insn) < 0) 1310 cost = 0; 1311 else 1312 { 1313 cost = insn_default_latency (insn); 1314 1315 if (cost < 0) 1316 cost = 0; 1317 } 1318 1319 return cost; 1320 } 1321 1322 /* Return the cost of the VI. 1323 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */ 1324 int 1325 sel_vinsn_cost (vinsn_t vi) 1326 { 1327 int cost = vi->cost; 1328 1329 if (cost < 0) 1330 { 1331 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); 1332 vi->cost = cost; 1333 } 1334 1335 return cost; 1336 } 1337 1338 1339 /* Functions for insn emitting. */ 1340 1341 /* Emit new insn after AFTER based on PATTERN and initialize its data from 1342 EXPR and SEQNO. */ 1343 insn_t 1344 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) 1345 { 1346 insn_t new_insn; 1347 1348 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); 1349 1350 new_insn = emit_insn_after (pattern, after); 1351 set_insn_init (expr, NULL, seqno); 1352 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); 1353 1354 return new_insn; 1355 } 1356 1357 /* Force newly generated vinsns to be unique. */ 1358 static bool init_insn_force_unique_p = false; 1359 1360 /* Emit new speculation recovery insn after AFTER based on PATTERN and 1361 initialize its data from EXPR and SEQNO. */ 1362 insn_t 1363 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, 1364 insn_t after) 1365 { 1366 insn_t insn; 1367 1368 gcc_assert (!init_insn_force_unique_p); 1369 1370 init_insn_force_unique_p = true; 1371 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); 1372 CANT_MOVE (insn) = 1; 1373 init_insn_force_unique_p = false; 1374 1375 return insn; 1376 } 1377 1378 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, 1379 take it as a new vinsn instead of EXPR's vinsn. 1380 We simplify insns later, after scheduling region in 1381 simplify_changed_insns. */ 1382 insn_t 1383 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, 1384 insn_t after) 1385 { 1386 expr_t emit_expr; 1387 insn_t insn; 1388 int flags; 1389 1390 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), 1391 seqno); 1392 insn = EXPR_INSN_RTX (emit_expr); 1393 add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); 1394 1395 flags = INSN_INIT_TODO_SSID; 1396 if (INSN_LUID (insn) == 0) 1397 flags |= INSN_INIT_TODO_LUID; 1398 sel_init_new_insn (insn, flags); 1399 1400 return insn; 1401 } 1402 1403 /* Move insn from EXPR after AFTER. */ 1404 insn_t 1405 sel_move_insn (expr_t expr, int seqno, insn_t after) 1406 { 1407 insn_t insn = EXPR_INSN_RTX (expr); 1408 basic_block bb = BLOCK_FOR_INSN (after); 1409 insn_t next = NEXT_INSN (after); 1410 1411 /* Assert that in move_op we disconnected this insn properly. */ 1412 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); 1413 PREV_INSN (insn) = after; 1414 NEXT_INSN (insn) = next; 1415 1416 NEXT_INSN (after) = insn; 1417 PREV_INSN (next) = insn; 1418 1419 /* Update links from insn to bb and vice versa. */ 1420 df_insn_change_bb (insn, bb); 1421 if (BB_END (bb) == after) 1422 BB_END (bb) = insn; 1423 1424 prepare_insn_expr (insn, seqno); 1425 return insn; 1426 } 1427 1428 1429 /* Functions to work with right-hand sides. */ 1430 1431 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector 1432 VECT and return true when found. Use NEW_VINSN for comparison only when 1433 COMPARE_VINSNS is true. Write to INDP the index on which 1434 the search has stopped, such that inserting the new element at INDP will 1435 retain VECT's sort order. */ 1436 static bool 1437 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, 1438 unsigned uid, vinsn_t new_vinsn, 1439 bool compare_vinsns, int *indp) 1440 { 1441 expr_history_def *arr; 1442 int i, j, len = VEC_length (expr_history_def, vect); 1443 1444 if (len == 0) 1445 { 1446 *indp = 0; 1447 return false; 1448 } 1449 1450 arr = VEC_address (expr_history_def, vect); 1451 i = 0, j = len - 1; 1452 1453 while (i <= j) 1454 { 1455 unsigned auid = arr[i].uid; 1456 vinsn_t avinsn = arr[i].new_expr_vinsn; 1457 1458 if (auid == uid 1459 /* When undoing transformation on a bookkeeping copy, the new vinsn 1460 may not be exactly equal to the one that is saved in the vector. 1461 This is because the insn whose copy we're checking was possibly 1462 substituted itself. */ 1463 && (! compare_vinsns 1464 || vinsn_equal_p (avinsn, new_vinsn))) 1465 { 1466 *indp = i; 1467 return true; 1468 } 1469 else if (auid > uid) 1470 break; 1471 i++; 1472 } 1473 1474 *indp = i; 1475 return false; 1476 } 1477 1478 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return 1479 the position found or -1, if no such value is in vector. 1480 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ 1481 int 1482 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn, 1483 vinsn_t new_vinsn, bool originators_p) 1484 { 1485 int ind; 1486 1487 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, 1488 false, &ind)) 1489 return ind; 1490 1491 if (INSN_ORIGINATORS (insn) && originators_p) 1492 { 1493 unsigned uid; 1494 bitmap_iterator bi; 1495 1496 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) 1497 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) 1498 return ind; 1499 } 1500 1501 return -1; 1502 } 1503 1504 /* Insert new element in a sorted history vector pointed to by PVECT, 1505 if it is not there already. The element is searched using 1506 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save 1507 the history of a transformation. */ 1508 void 1509 insert_in_history_vect (VEC (expr_history_def, heap) **pvect, 1510 unsigned uid, enum local_trans_type type, 1511 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, 1512 ds_t spec_ds) 1513 { 1514 VEC(expr_history_def, heap) *vect = *pvect; 1515 expr_history_def temp; 1516 bool res; 1517 int ind; 1518 1519 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); 1520 1521 if (res) 1522 { 1523 expr_history_def *phist = VEC_index (expr_history_def, vect, ind); 1524 1525 /* It is possible that speculation types of expressions that were 1526 propagated through different paths will be different here. In this 1527 case, merge the status to get the correct check later. */ 1528 if (phist->spec_ds != spec_ds) 1529 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); 1530 return; 1531 } 1532 1533 temp.uid = uid; 1534 temp.old_expr_vinsn = old_expr_vinsn; 1535 temp.new_expr_vinsn = new_expr_vinsn; 1536 temp.spec_ds = spec_ds; 1537 temp.type = type; 1538 1539 vinsn_attach (old_expr_vinsn); 1540 vinsn_attach (new_expr_vinsn); 1541 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp); 1542 *pvect = vect; 1543 } 1544 1545 /* Free history vector PVECT. */ 1546 static void 1547 free_history_vect (VEC (expr_history_def, heap) **pvect) 1548 { 1549 unsigned i; 1550 expr_history_def *phist; 1551 1552 if (! *pvect) 1553 return; 1554 1555 for (i = 0; 1556 VEC_iterate (expr_history_def, *pvect, i, phist); 1557 i++) 1558 { 1559 vinsn_detach (phist->old_expr_vinsn); 1560 vinsn_detach (phist->new_expr_vinsn); 1561 } 1562 1563 VEC_free (expr_history_def, heap, *pvect); 1564 *pvect = NULL; 1565 } 1566 1567 /* Merge vector FROM to PVECT. */ 1568 static void 1569 merge_history_vect (VEC (expr_history_def, heap) **pvect, 1570 VEC (expr_history_def, heap) *from) 1571 { 1572 expr_history_def *phist; 1573 int i; 1574 1575 /* We keep this vector sorted. */ 1576 for (i = 0; VEC_iterate (expr_history_def, from, i, phist); i++) 1577 insert_in_history_vect (pvect, phist->uid, phist->type, 1578 phist->old_expr_vinsn, phist->new_expr_vinsn, 1579 phist->spec_ds); 1580 } 1581 1582 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ 1583 bool 1584 vinsn_equal_p (vinsn_t x, vinsn_t y) 1585 { 1586 rtx_equal_p_callback_function repcf; 1587 1588 if (x == y) 1589 return true; 1590 1591 if (VINSN_TYPE (x) != VINSN_TYPE (y)) 1592 return false; 1593 1594 if (VINSN_HASH (x) != VINSN_HASH (y)) 1595 return false; 1596 1597 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; 1598 if (VINSN_SEPARABLE_P (x)) 1599 { 1600 /* Compare RHSes of VINSNs. */ 1601 gcc_assert (VINSN_RHS (x)); 1602 gcc_assert (VINSN_RHS (y)); 1603 1604 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); 1605 } 1606 1607 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); 1608 } 1609 1610 1611 /* Functions for working with expressions. */ 1612 1613 /* Initialize EXPR. */ 1614 static void 1615 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, 1616 int sched_times, int orig_bb_index, ds_t spec_done_ds, 1617 ds_t spec_to_check_ds, int orig_sched_cycle, 1618 VEC(expr_history_def, heap) *history, signed char target_available, 1619 bool was_substituted, bool was_renamed, bool needs_spec_check_p, 1620 bool cant_move) 1621 { 1622 vinsn_attach (vi); 1623 1624 EXPR_VINSN (expr) = vi; 1625 EXPR_SPEC (expr) = spec; 1626 EXPR_USEFULNESS (expr) = use; 1627 EXPR_PRIORITY (expr) = priority; 1628 EXPR_PRIORITY_ADJ (expr) = 0; 1629 EXPR_SCHED_TIMES (expr) = sched_times; 1630 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; 1631 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; 1632 EXPR_SPEC_DONE_DS (expr) = spec_done_ds; 1633 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; 1634 1635 if (history) 1636 EXPR_HISTORY_OF_CHANGES (expr) = history; 1637 else 1638 EXPR_HISTORY_OF_CHANGES (expr) = NULL; 1639 1640 EXPR_TARGET_AVAILABLE (expr) = target_available; 1641 EXPR_WAS_SUBSTITUTED (expr) = was_substituted; 1642 EXPR_WAS_RENAMED (expr) = was_renamed; 1643 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; 1644 EXPR_CANT_MOVE (expr) = cant_move; 1645 } 1646 1647 /* Make a copy of the expr FROM into the expr TO. */ 1648 void 1649 copy_expr (expr_t to, expr_t from) 1650 { 1651 VEC(expr_history_def, heap) *temp = NULL; 1652 1653 if (EXPR_HISTORY_OF_CHANGES (from)) 1654 { 1655 unsigned i; 1656 expr_history_def *phist; 1657 1658 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from)); 1659 for (i = 0; 1660 VEC_iterate (expr_history_def, temp, i, phist); 1661 i++) 1662 { 1663 vinsn_attach (phist->old_expr_vinsn); 1664 vinsn_attach (phist->new_expr_vinsn); 1665 } 1666 } 1667 1668 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), 1669 EXPR_USEFULNESS (from), EXPR_PRIORITY (from), 1670 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), 1671 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 1672 EXPR_ORIG_SCHED_CYCLE (from), temp, 1673 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), 1674 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), 1675 EXPR_CANT_MOVE (from)); 1676 } 1677 1678 /* Same, but the final expr will not ever be in av sets, so don't copy 1679 "uninteresting" data such as bitmap cache. */ 1680 void 1681 copy_expr_onside (expr_t to, expr_t from) 1682 { 1683 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), 1684 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, 1685 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL, 1686 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), 1687 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), 1688 EXPR_CANT_MOVE (from)); 1689 } 1690 1691 /* Prepare the expr of INSN for scheduling. Used when moving insn and when 1692 initializing new insns. */ 1693 static void 1694 prepare_insn_expr (insn_t insn, int seqno) 1695 { 1696 expr_t expr = INSN_EXPR (insn); 1697 ds_t ds; 1698 1699 INSN_SEQNO (insn) = seqno; 1700 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); 1701 EXPR_SPEC (expr) = 0; 1702 EXPR_ORIG_SCHED_CYCLE (expr) = 0; 1703 EXPR_WAS_SUBSTITUTED (expr) = 0; 1704 EXPR_WAS_RENAMED (expr) = 0; 1705 EXPR_TARGET_AVAILABLE (expr) = 1; 1706 INSN_LIVE_VALID_P (insn) = false; 1707 1708 /* ??? If this expression is speculative, make its dependence 1709 as weak as possible. We can filter this expression later 1710 in process_spec_exprs, because we do not distinguish 1711 between the status we got during compute_av_set and the 1712 existing status. To be fixed. */ 1713 ds = EXPR_SPEC_DONE_DS (expr); 1714 if (ds) 1715 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); 1716 1717 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr)); 1718 } 1719 1720 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT 1721 is non-null when expressions are merged from different successors at 1722 a split point. */ 1723 static void 1724 update_target_availability (expr_t to, expr_t from, insn_t split_point) 1725 { 1726 if (EXPR_TARGET_AVAILABLE (to) < 0 1727 || EXPR_TARGET_AVAILABLE (from) < 0) 1728 EXPR_TARGET_AVAILABLE (to) = -1; 1729 else 1730 { 1731 /* We try to detect the case when one of the expressions 1732 can only be reached through another one. In this case, 1733 we can do better. */ 1734 if (split_point == NULL) 1735 { 1736 int toind, fromind; 1737 1738 toind = EXPR_ORIG_BB_INDEX (to); 1739 fromind = EXPR_ORIG_BB_INDEX (from); 1740 1741 if (toind && toind == fromind) 1742 /* Do nothing -- everything is done in 1743 merge_with_other_exprs. */ 1744 ; 1745 else 1746 EXPR_TARGET_AVAILABLE (to) = -1; 1747 } 1748 else if (EXPR_TARGET_AVAILABLE (from) == 0 1749 && EXPR_LHS (from) 1750 && REG_P (EXPR_LHS (from)) 1751 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) 1752 EXPR_TARGET_AVAILABLE (to) = -1; 1753 else 1754 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); 1755 } 1756 } 1757 1758 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT 1759 is non-null when expressions are merged from different successors at 1760 a split point. */ 1761 static void 1762 update_speculative_bits (expr_t to, expr_t from, insn_t split_point) 1763 { 1764 ds_t old_to_ds, old_from_ds; 1765 1766 old_to_ds = EXPR_SPEC_DONE_DS (to); 1767 old_from_ds = EXPR_SPEC_DONE_DS (from); 1768 1769 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); 1770 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); 1771 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); 1772 1773 /* When merging e.g. control & data speculative exprs, or a control 1774 speculative with a control&data speculative one, we really have 1775 to change vinsn too. Also, when speculative status is changed, 1776 we also need to record this as a transformation in expr's history. */ 1777 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) 1778 { 1779 old_to_ds = ds_get_speculation_types (old_to_ds); 1780 old_from_ds = ds_get_speculation_types (old_from_ds); 1781 1782 if (old_to_ds != old_from_ds) 1783 { 1784 ds_t record_ds; 1785 1786 /* When both expressions are speculative, we need to change 1787 the vinsn first. */ 1788 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) 1789 { 1790 int res; 1791 1792 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); 1793 gcc_assert (res >= 0); 1794 } 1795 1796 if (split_point != NULL) 1797 { 1798 /* Record the change with proper status. */ 1799 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; 1800 record_ds &= ~(old_to_ds & SPECULATIVE); 1801 record_ds &= ~(old_from_ds & SPECULATIVE); 1802 1803 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), 1804 INSN_UID (split_point), TRANS_SPECULATION, 1805 EXPR_VINSN (from), EXPR_VINSN (to), 1806 record_ds); 1807 } 1808 } 1809 } 1810 } 1811 1812 1813 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, 1814 this is done along different paths. */ 1815 void 1816 merge_expr_data (expr_t to, expr_t from, insn_t split_point) 1817 { 1818 /* Choose the maximum of the specs of merged exprs. This is required 1819 for correctness of bookkeeping. */ 1820 if (EXPR_SPEC (to) < EXPR_SPEC (from)) 1821 EXPR_SPEC (to) = EXPR_SPEC (from); 1822 1823 if (split_point) 1824 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); 1825 else 1826 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), 1827 EXPR_USEFULNESS (from)); 1828 1829 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) 1830 EXPR_PRIORITY (to) = EXPR_PRIORITY (from); 1831 1832 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from)) 1833 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from); 1834 1835 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) 1836 EXPR_ORIG_BB_INDEX (to) = 0; 1837 1838 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), 1839 EXPR_ORIG_SCHED_CYCLE (from)); 1840 1841 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); 1842 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); 1843 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); 1844 1845 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), 1846 EXPR_HISTORY_OF_CHANGES (from)); 1847 update_target_availability (to, from, split_point); 1848 update_speculative_bits (to, from, split_point); 1849 } 1850 1851 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal 1852 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions 1853 are merged from different successors at a split point. */ 1854 void 1855 merge_expr (expr_t to, expr_t from, insn_t split_point) 1856 { 1857 vinsn_t to_vi = EXPR_VINSN (to); 1858 vinsn_t from_vi = EXPR_VINSN (from); 1859 1860 gcc_assert (vinsn_equal_p (to_vi, from_vi)); 1861 1862 /* Make sure that speculative pattern is propagated into exprs that 1863 have non-speculative one. This will provide us with consistent 1864 speculative bits and speculative patterns inside expr. */ 1865 if (EXPR_SPEC_DONE_DS (to) == 0 1866 && EXPR_SPEC_DONE_DS (from) != 0) 1867 change_vinsn_in_expr (to, EXPR_VINSN (from)); 1868 1869 merge_expr_data (to, from, split_point); 1870 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); 1871 } 1872 1873 /* Clear the information of this EXPR. */ 1874 void 1875 clear_expr (expr_t expr) 1876 { 1877 1878 vinsn_detach (EXPR_VINSN (expr)); 1879 EXPR_VINSN (expr) = NULL; 1880 1881 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr)); 1882 } 1883 1884 /* For a given LV_SET, mark EXPR having unavailable target register. */ 1885 static void 1886 set_unavailable_target_for_expr (expr_t expr, regset lv_set) 1887 { 1888 if (EXPR_SEPARABLE_P (expr)) 1889 { 1890 if (REG_P (EXPR_LHS (expr)) 1891 && register_unavailable_p (lv_set, EXPR_LHS (expr))) 1892 { 1893 /* If it's an insn like r1 = use (r1, ...), and it exists in 1894 different forms in each of the av_sets being merged, we can't say 1895 whether original destination register is available or not. 1896 However, this still works if destination register is not used 1897 in the original expression: if the branch at which LV_SET we're 1898 looking here is not actually 'other branch' in sense that same 1899 expression is available through it (but it can't be determined 1900 at computation stage because of transformations on one of the 1901 branches), it still won't affect the availability. 1902 Liveness of a register somewhere on a code motion path means 1903 it's either read somewhere on a codemotion path, live on 1904 'other' branch, live at the point immediately following 1905 the original operation, or is read by the original operation. 1906 The latter case is filtered out in the condition below. 1907 It still doesn't cover the case when register is defined and used 1908 somewhere within the code motion path, and in this case we could 1909 miss a unifying code motion along both branches using a renamed 1910 register, but it won't affect a code correctness since upon 1911 an actual code motion a bookkeeping code would be generated. */ 1912 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), 1913 EXPR_LHS (expr))) 1914 EXPR_TARGET_AVAILABLE (expr) = -1; 1915 else 1916 EXPR_TARGET_AVAILABLE (expr) = false; 1917 } 1918 } 1919 else 1920 { 1921 unsigned regno; 1922 reg_set_iterator rsi; 1923 1924 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), 1925 0, regno, rsi) 1926 if (bitmap_bit_p (lv_set, regno)) 1927 { 1928 EXPR_TARGET_AVAILABLE (expr) = false; 1929 break; 1930 } 1931 1932 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), 1933 0, regno, rsi) 1934 if (bitmap_bit_p (lv_set, regno)) 1935 { 1936 EXPR_TARGET_AVAILABLE (expr) = false; 1937 break; 1938 } 1939 } 1940 } 1941 1942 /* Try to make EXPR speculative. Return 1 when EXPR's pattern 1943 or dependence status have changed, 2 when also the target register 1944 became unavailable, 0 if nothing had to be changed. */ 1945 int 1946 speculate_expr (expr_t expr, ds_t ds) 1947 { 1948 int res; 1949 rtx orig_insn_rtx; 1950 rtx spec_pat; 1951 ds_t target_ds, current_ds; 1952 1953 /* Obtain the status we need to put on EXPR. */ 1954 target_ds = (ds & SPECULATIVE); 1955 current_ds = EXPR_SPEC_DONE_DS (expr); 1956 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); 1957 1958 orig_insn_rtx = EXPR_INSN_RTX (expr); 1959 1960 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); 1961 1962 switch (res) 1963 { 1964 case 0: 1965 EXPR_SPEC_DONE_DS (expr) = ds; 1966 return current_ds != ds ? 1 : 0; 1967 1968 case 1: 1969 { 1970 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX); 1971 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); 1972 1973 change_vinsn_in_expr (expr, spec_vinsn); 1974 EXPR_SPEC_DONE_DS (expr) = ds; 1975 EXPR_NEEDS_SPEC_CHECK_P (expr) = true; 1976 1977 /* Do not allow clobbering the address register of speculative 1978 insns. */ 1979 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), 1980 expr_dest_reg (expr))) 1981 { 1982 EXPR_TARGET_AVAILABLE (expr) = false; 1983 return 2; 1984 } 1985 1986 return 1; 1987 } 1988 1989 case -1: 1990 return -1; 1991 1992 default: 1993 gcc_unreachable (); 1994 return -1; 1995 } 1996 } 1997 1998 /* Return a destination register, if any, of EXPR. */ 1999 rtx 2000 expr_dest_reg (expr_t expr) 2001 { 2002 rtx dest = VINSN_LHS (EXPR_VINSN (expr)); 2003 2004 if (dest != NULL_RTX && REG_P (dest)) 2005 return dest; 2006 2007 return NULL_RTX; 2008 } 2009 2010 /* Returns the REGNO of the R's destination. */ 2011 unsigned 2012 expr_dest_regno (expr_t expr) 2013 { 2014 rtx dest = expr_dest_reg (expr); 2015 2016 gcc_assert (dest != NULL_RTX); 2017 return REGNO (dest); 2018 } 2019 2020 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in 2021 AV_SET having unavailable target register. */ 2022 void 2023 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) 2024 { 2025 expr_t expr; 2026 av_set_iterator avi; 2027 2028 FOR_EACH_EXPR (expr, avi, join_set) 2029 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) 2030 set_unavailable_target_for_expr (expr, lv_set); 2031 } 2032 2033 2034 /* Returns true if REG (at least partially) is present in REGS. */ 2035 bool 2036 register_unavailable_p (regset regs, rtx reg) 2037 { 2038 unsigned regno, end_regno; 2039 2040 regno = REGNO (reg); 2041 if (bitmap_bit_p (regs, regno)) 2042 return true; 2043 2044 end_regno = END_REGNO (reg); 2045 2046 while (++regno < end_regno) 2047 if (bitmap_bit_p (regs, regno)) 2048 return true; 2049 2050 return false; 2051 } 2052 2053 /* Av set functions. */ 2054 2055 /* Add a new element to av set SETP. 2056 Return the element added. */ 2057 static av_set_t 2058 av_set_add_element (av_set_t *setp) 2059 { 2060 /* Insert at the beginning of the list. */ 2061 _list_add (setp); 2062 return *setp; 2063 } 2064 2065 /* Add EXPR to SETP. */ 2066 void 2067 av_set_add (av_set_t *setp, expr_t expr) 2068 { 2069 av_set_t elem; 2070 2071 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); 2072 elem = av_set_add_element (setp); 2073 copy_expr (_AV_SET_EXPR (elem), expr); 2074 } 2075 2076 /* Same, but do not copy EXPR. */ 2077 static void 2078 av_set_add_nocopy (av_set_t *setp, expr_t expr) 2079 { 2080 av_set_t elem; 2081 2082 elem = av_set_add_element (setp); 2083 *_AV_SET_EXPR (elem) = *expr; 2084 } 2085 2086 /* Remove expr pointed to by IP from the av_set. */ 2087 void 2088 av_set_iter_remove (av_set_iterator *ip) 2089 { 2090 clear_expr (_AV_SET_EXPR (*ip->lp)); 2091 _list_iter_remove (ip); 2092 } 2093 2094 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the 2095 sense of vinsn_equal_p function. Return NULL if no such expr is 2096 in SET was found. */ 2097 expr_t 2098 av_set_lookup (av_set_t set, vinsn_t sought_vinsn) 2099 { 2100 expr_t expr; 2101 av_set_iterator i; 2102 2103 FOR_EACH_EXPR (expr, i, set) 2104 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) 2105 return expr; 2106 return NULL; 2107 } 2108 2109 /* Same, but also remove the EXPR found. */ 2110 static expr_t 2111 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) 2112 { 2113 expr_t expr; 2114 av_set_iterator i; 2115 2116 FOR_EACH_EXPR_1 (expr, i, setp) 2117 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) 2118 { 2119 _list_iter_remove_nofree (&i); 2120 return expr; 2121 } 2122 return NULL; 2123 } 2124 2125 /* Search for an expr in SET, such that it's equivalent to EXPR in the 2126 sense of vinsn_equal_p function of their vinsns, but not EXPR itself. 2127 Returns NULL if no such expr is in SET was found. */ 2128 static expr_t 2129 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) 2130 { 2131 expr_t cur_expr; 2132 av_set_iterator i; 2133 2134 FOR_EACH_EXPR (cur_expr, i, set) 2135 { 2136 if (cur_expr == expr) 2137 continue; 2138 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) 2139 return cur_expr; 2140 } 2141 2142 return NULL; 2143 } 2144 2145 /* If other expression is already in AVP, remove one of them. */ 2146 expr_t 2147 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) 2148 { 2149 expr_t expr2; 2150 2151 expr2 = av_set_lookup_other_equiv_expr (*avp, expr); 2152 if (expr2 != NULL) 2153 { 2154 /* Reset target availability on merge, since taking it only from one 2155 of the exprs would be controversial for different code. */ 2156 EXPR_TARGET_AVAILABLE (expr2) = -1; 2157 EXPR_USEFULNESS (expr2) = 0; 2158 2159 merge_expr (expr2, expr, NULL); 2160 2161 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ 2162 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; 2163 2164 av_set_iter_remove (ip); 2165 return expr2; 2166 } 2167 2168 return expr; 2169 } 2170 2171 /* Return true if there is an expr that correlates to VI in SET. */ 2172 bool 2173 av_set_is_in_p (av_set_t set, vinsn_t vi) 2174 { 2175 return av_set_lookup (set, vi) != NULL; 2176 } 2177 2178 /* Return a copy of SET. */ 2179 av_set_t 2180 av_set_copy (av_set_t set) 2181 { 2182 expr_t expr; 2183 av_set_iterator i; 2184 av_set_t res = NULL; 2185 2186 FOR_EACH_EXPR (expr, i, set) 2187 av_set_add (&res, expr); 2188 2189 return res; 2190 } 2191 2192 /* Join two av sets that do not have common elements by attaching second set 2193 (pointed to by FROMP) to the end of first set (TO_TAILP must point to 2194 _AV_SET_NEXT of first set's last element). */ 2195 static void 2196 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) 2197 { 2198 gcc_assert (*to_tailp == NULL); 2199 *to_tailp = *fromp; 2200 *fromp = NULL; 2201 } 2202 2203 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set 2204 pointed to by FROMP afterwards. */ 2205 void 2206 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) 2207 { 2208 expr_t expr1; 2209 av_set_iterator i; 2210 2211 /* Delete from TOP all exprs, that present in FROMP. */ 2212 FOR_EACH_EXPR_1 (expr1, i, top) 2213 { 2214 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); 2215 2216 if (expr2) 2217 { 2218 merge_expr (expr2, expr1, insn); 2219 av_set_iter_remove (&i); 2220 } 2221 } 2222 2223 join_distinct_sets (i.lp, fromp); 2224 } 2225 2226 /* Same as above, but also update availability of target register in 2227 TOP judging by TO_LV_SET and FROM_LV_SET. */ 2228 void 2229 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, 2230 regset from_lv_set, insn_t insn) 2231 { 2232 expr_t expr1; 2233 av_set_iterator i; 2234 av_set_t *to_tailp, in_both_set = NULL; 2235 2236 /* Delete from TOP all expres, that present in FROMP. */ 2237 FOR_EACH_EXPR_1 (expr1, i, top) 2238 { 2239 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); 2240 2241 if (expr2) 2242 { 2243 /* It may be that the expressions have different destination 2244 registers, in which case we need to check liveness here. */ 2245 if (EXPR_SEPARABLE_P (expr1)) 2246 { 2247 int regno1 = (REG_P (EXPR_LHS (expr1)) 2248 ? (int) expr_dest_regno (expr1) : -1); 2249 int regno2 = (REG_P (EXPR_LHS (expr2)) 2250 ? (int) expr_dest_regno (expr2) : -1); 2251 2252 /* ??? We don't have a way to check restrictions for 2253 *other* register on the current path, we did it only 2254 for the current target register. Give up. */ 2255 if (regno1 != regno2) 2256 EXPR_TARGET_AVAILABLE (expr2) = -1; 2257 } 2258 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) 2259 EXPR_TARGET_AVAILABLE (expr2) = -1; 2260 2261 merge_expr (expr2, expr1, insn); 2262 av_set_add_nocopy (&in_both_set, expr2); 2263 av_set_iter_remove (&i); 2264 } 2265 else 2266 /* EXPR1 is present in TOP, but not in FROMP. Check it on 2267 FROM_LV_SET. */ 2268 set_unavailable_target_for_expr (expr1, from_lv_set); 2269 } 2270 to_tailp = i.lp; 2271 2272 /* These expressions are not present in TOP. Check liveness 2273 restrictions on TO_LV_SET. */ 2274 FOR_EACH_EXPR (expr1, i, *fromp) 2275 set_unavailable_target_for_expr (expr1, to_lv_set); 2276 2277 join_distinct_sets (i.lp, &in_both_set); 2278 join_distinct_sets (to_tailp, fromp); 2279 } 2280 2281 /* Clear av_set pointed to by SETP. */ 2282 void 2283 av_set_clear (av_set_t *setp) 2284 { 2285 expr_t expr; 2286 av_set_iterator i; 2287 2288 FOR_EACH_EXPR_1 (expr, i, setp) 2289 av_set_iter_remove (&i); 2290 2291 gcc_assert (*setp == NULL); 2292 } 2293 2294 /* Leave only one non-speculative element in the SETP. */ 2295 void 2296 av_set_leave_one_nonspec (av_set_t *setp) 2297 { 2298 expr_t expr; 2299 av_set_iterator i; 2300 bool has_one_nonspec = false; 2301 2302 /* Keep all speculative exprs, and leave one non-speculative 2303 (the first one). */ 2304 FOR_EACH_EXPR_1 (expr, i, setp) 2305 { 2306 if (!EXPR_SPEC_DONE_DS (expr)) 2307 { 2308 if (has_one_nonspec) 2309 av_set_iter_remove (&i); 2310 else 2311 has_one_nonspec = true; 2312 } 2313 } 2314 } 2315 2316 /* Return the N'th element of the SET. */ 2317 expr_t 2318 av_set_element (av_set_t set, int n) 2319 { 2320 expr_t expr; 2321 av_set_iterator i; 2322 2323 FOR_EACH_EXPR (expr, i, set) 2324 if (n-- == 0) 2325 return expr; 2326 2327 gcc_unreachable (); 2328 return NULL; 2329 } 2330 2331 /* Deletes all expressions from AVP that are conditional branches (IFs). */ 2332 void 2333 av_set_substract_cond_branches (av_set_t *avp) 2334 { 2335 av_set_iterator i; 2336 expr_t expr; 2337 2338 FOR_EACH_EXPR_1 (expr, i, avp) 2339 if (vinsn_cond_branch_p (EXPR_VINSN (expr))) 2340 av_set_iter_remove (&i); 2341 } 2342 2343 /* Multiplies usefulness attribute of each member of av-set *AVP by 2344 value PROB / ALL_PROB. */ 2345 void 2346 av_set_split_usefulness (av_set_t av, int prob, int all_prob) 2347 { 2348 av_set_iterator i; 2349 expr_t expr; 2350 2351 FOR_EACH_EXPR (expr, i, av) 2352 EXPR_USEFULNESS (expr) = (all_prob 2353 ? (EXPR_USEFULNESS (expr) * prob) / all_prob 2354 : 0); 2355 } 2356 2357 /* Leave in AVP only those expressions, which are present in AV, 2358 and return it, merging history expressions. */ 2359 void 2360 av_set_code_motion_filter (av_set_t *avp, av_set_t av) 2361 { 2362 av_set_iterator i; 2363 expr_t expr, expr2; 2364 2365 FOR_EACH_EXPR_1 (expr, i, avp) 2366 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) 2367 av_set_iter_remove (&i); 2368 else 2369 /* When updating av sets in bookkeeping blocks, we can add more insns 2370 there which will be transformed but the upper av sets will not 2371 reflect those transformations. We then fail to undo those 2372 when searching for such insns. So merge the history saved 2373 in the av set of the block we are processing. */ 2374 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), 2375 EXPR_HISTORY_OF_CHANGES (expr2)); 2376 } 2377 2378 2379 2380 /* Dependence hooks to initialize insn data. */ 2381 2382 /* This is used in hooks callable from dependence analysis when initializing 2383 instruction's data. */ 2384 static struct 2385 { 2386 /* Where the dependence was found (lhs/rhs). */ 2387 deps_where_t where; 2388 2389 /* The actual data object to initialize. */ 2390 idata_t id; 2391 2392 /* True when the insn should not be made clonable. */ 2393 bool force_unique_p; 2394 2395 /* True when insn should be treated as of type USE, i.e. never renamed. */ 2396 bool force_use_p; 2397 } deps_init_id_data; 2398 2399 2400 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be 2401 clonable. */ 2402 static void 2403 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) 2404 { 2405 int type; 2406 2407 /* Determine whether INSN could be cloned and return appropriate vinsn type. 2408 That clonable insns which can be separated into lhs and rhs have type SET. 2409 Other clonable insns have type USE. */ 2410 type = GET_CODE (insn); 2411 2412 /* Only regular insns could be cloned. */ 2413 if (type == INSN && !force_unique_p) 2414 type = SET; 2415 else if (type == JUMP_INSN && simplejump_p (insn)) 2416 type = PC; 2417 else if (type == DEBUG_INSN) 2418 type = !force_unique_p ? USE : INSN; 2419 2420 IDATA_TYPE (id) = type; 2421 IDATA_REG_SETS (id) = get_clear_regset_from_pool (); 2422 IDATA_REG_USES (id) = get_clear_regset_from_pool (); 2423 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); 2424 } 2425 2426 /* Start initializing insn data. */ 2427 static void 2428 deps_init_id_start_insn (insn_t insn) 2429 { 2430 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); 2431 2432 setup_id_for_insn (deps_init_id_data.id, insn, 2433 deps_init_id_data.force_unique_p); 2434 deps_init_id_data.where = DEPS_IN_INSN; 2435 } 2436 2437 /* Start initializing lhs data. */ 2438 static void 2439 deps_init_id_start_lhs (rtx lhs) 2440 { 2441 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); 2442 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); 2443 2444 if (IDATA_TYPE (deps_init_id_data.id) == SET) 2445 { 2446 IDATA_LHS (deps_init_id_data.id) = lhs; 2447 deps_init_id_data.where = DEPS_IN_LHS; 2448 } 2449 } 2450 2451 /* Finish initializing lhs data. */ 2452 static void 2453 deps_init_id_finish_lhs (void) 2454 { 2455 deps_init_id_data.where = DEPS_IN_INSN; 2456 } 2457 2458 /* Note a set of REGNO. */ 2459 static void 2460 deps_init_id_note_reg_set (int regno) 2461 { 2462 haifa_note_reg_set (regno); 2463 2464 if (deps_init_id_data.where == DEPS_IN_RHS) 2465 deps_init_id_data.force_use_p = true; 2466 2467 if (IDATA_TYPE (deps_init_id_data.id) != PC) 2468 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); 2469 2470 #ifdef STACK_REGS 2471 /* Make instructions that set stack registers to be ineligible for 2472 renaming to avoid issues with find_used_regs. */ 2473 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) 2474 deps_init_id_data.force_use_p = true; 2475 #endif 2476 } 2477 2478 /* Note a clobber of REGNO. */ 2479 static void 2480 deps_init_id_note_reg_clobber (int regno) 2481 { 2482 haifa_note_reg_clobber (regno); 2483 2484 if (deps_init_id_data.where == DEPS_IN_RHS) 2485 deps_init_id_data.force_use_p = true; 2486 2487 if (IDATA_TYPE (deps_init_id_data.id) != PC) 2488 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); 2489 } 2490 2491 /* Note a use of REGNO. */ 2492 static void 2493 deps_init_id_note_reg_use (int regno) 2494 { 2495 haifa_note_reg_use (regno); 2496 2497 if (IDATA_TYPE (deps_init_id_data.id) != PC) 2498 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); 2499 } 2500 2501 /* Start initializing rhs data. */ 2502 static void 2503 deps_init_id_start_rhs (rtx rhs) 2504 { 2505 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); 2506 2507 /* And there was no sel_deps_reset_to_insn (). */ 2508 if (IDATA_LHS (deps_init_id_data.id) != NULL) 2509 { 2510 IDATA_RHS (deps_init_id_data.id) = rhs; 2511 deps_init_id_data.where = DEPS_IN_RHS; 2512 } 2513 } 2514 2515 /* Finish initializing rhs data. */ 2516 static void 2517 deps_init_id_finish_rhs (void) 2518 { 2519 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS 2520 || deps_init_id_data.where == DEPS_IN_INSN); 2521 deps_init_id_data.where = DEPS_IN_INSN; 2522 } 2523 2524 /* Finish initializing insn data. */ 2525 static void 2526 deps_init_id_finish_insn (void) 2527 { 2528 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); 2529 2530 if (IDATA_TYPE (deps_init_id_data.id) == SET) 2531 { 2532 rtx lhs = IDATA_LHS (deps_init_id_data.id); 2533 rtx rhs = IDATA_RHS (deps_init_id_data.id); 2534 2535 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) 2536 || deps_init_id_data.force_use_p) 2537 { 2538 /* This should be a USE, as we don't want to schedule its RHS 2539 separately. However, we still want to have them recorded 2540 for the purposes of substitution. That's why we don't 2541 simply call downgrade_to_use () here. */ 2542 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); 2543 gcc_assert (!lhs == !rhs); 2544 2545 IDATA_TYPE (deps_init_id_data.id) = USE; 2546 } 2547 } 2548 2549 deps_init_id_data.where = DEPS_IN_NOWHERE; 2550 } 2551 2552 /* This is dependence info used for initializing insn's data. */ 2553 static struct sched_deps_info_def deps_init_id_sched_deps_info; 2554 2555 /* This initializes most of the static part of the above structure. */ 2556 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = 2557 { 2558 NULL, 2559 2560 deps_init_id_start_insn, 2561 deps_init_id_finish_insn, 2562 deps_init_id_start_lhs, 2563 deps_init_id_finish_lhs, 2564 deps_init_id_start_rhs, 2565 deps_init_id_finish_rhs, 2566 deps_init_id_note_reg_set, 2567 deps_init_id_note_reg_clobber, 2568 deps_init_id_note_reg_use, 2569 NULL, /* note_mem_dep */ 2570 NULL, /* note_dep */ 2571 2572 0, /* use_cselib */ 2573 0, /* use_deps_list */ 2574 0 /* generate_spec_deps */ 2575 }; 2576 2577 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, 2578 we don't actually need information about lhs and rhs. */ 2579 static void 2580 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) 2581 { 2582 rtx pat = PATTERN (insn); 2583 2584 if (NONJUMP_INSN_P (insn) 2585 && GET_CODE (pat) == SET 2586 && !force_unique_p) 2587 { 2588 IDATA_RHS (id) = SET_SRC (pat); 2589 IDATA_LHS (id) = SET_DEST (pat); 2590 } 2591 else 2592 IDATA_LHS (id) = IDATA_RHS (id) = NULL; 2593 } 2594 2595 /* Possibly downgrade INSN to USE. */ 2596 static void 2597 maybe_downgrade_id_to_use (idata_t id, insn_t insn) 2598 { 2599 bool must_be_use = false; 2600 unsigned uid = INSN_UID (insn); 2601 df_ref *rec; 2602 rtx lhs = IDATA_LHS (id); 2603 rtx rhs = IDATA_RHS (id); 2604 2605 /* We downgrade only SETs. */ 2606 if (IDATA_TYPE (id) != SET) 2607 return; 2608 2609 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) 2610 { 2611 IDATA_TYPE (id) = USE; 2612 return; 2613 } 2614 2615 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++) 2616 { 2617 df_ref def = *rec; 2618 2619 if (DF_REF_INSN (def) 2620 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) 2621 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) 2622 { 2623 must_be_use = true; 2624 break; 2625 } 2626 2627 #ifdef STACK_REGS 2628 /* Make instructions that set stack registers to be ineligible for 2629 renaming to avoid issues with find_used_regs. */ 2630 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) 2631 { 2632 must_be_use = true; 2633 break; 2634 } 2635 #endif 2636 } 2637 2638 if (must_be_use) 2639 IDATA_TYPE (id) = USE; 2640 } 2641 2642 /* Setup register sets describing INSN in ID. */ 2643 static void 2644 setup_id_reg_sets (idata_t id, insn_t insn) 2645 { 2646 unsigned uid = INSN_UID (insn); 2647 df_ref *rec; 2648 regset tmp = get_clear_regset_from_pool (); 2649 2650 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++) 2651 { 2652 df_ref def = *rec; 2653 unsigned int regno = DF_REF_REGNO (def); 2654 2655 /* Post modifies are treated like clobbers by sched-deps.c. */ 2656 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER 2657 | DF_REF_PRE_POST_MODIFY))) 2658 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); 2659 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) 2660 { 2661 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); 2662 2663 #ifdef STACK_REGS 2664 /* For stack registers, treat writes to them as writes 2665 to the first one to be consistent with sched-deps.c. */ 2666 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) 2667 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); 2668 #endif 2669 } 2670 /* Mark special refs that generate read/write def pair. */ 2671 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) 2672 || regno == STACK_POINTER_REGNUM) 2673 bitmap_set_bit (tmp, regno); 2674 } 2675 2676 for (rec = DF_INSN_UID_USES (uid); *rec; rec++) 2677 { 2678 df_ref use = *rec; 2679 unsigned int regno = DF_REF_REGNO (use); 2680 2681 /* When these refs are met for the first time, skip them, as 2682 these uses are just counterparts of some defs. */ 2683 if (bitmap_bit_p (tmp, regno)) 2684 bitmap_clear_bit (tmp, regno); 2685 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) 2686 { 2687 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); 2688 2689 #ifdef STACK_REGS 2690 /* For stack registers, treat reads from them as reads from 2691 the first one to be consistent with sched-deps.c. */ 2692 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) 2693 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); 2694 #endif 2695 } 2696 } 2697 2698 return_regset_to_pool (tmp); 2699 } 2700 2701 /* Initialize instruction data for INSN in ID using DF's data. */ 2702 static void 2703 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) 2704 { 2705 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); 2706 2707 setup_id_for_insn (id, insn, force_unique_p); 2708 setup_id_lhs_rhs (id, insn, force_unique_p); 2709 2710 if (INSN_NOP_P (insn)) 2711 return; 2712 2713 maybe_downgrade_id_to_use (id, insn); 2714 setup_id_reg_sets (id, insn); 2715 } 2716 2717 /* Initialize instruction data for INSN in ID. */ 2718 static void 2719 deps_init_id (idata_t id, insn_t insn, bool force_unique_p) 2720 { 2721 struct deps_desc _dc, *dc = &_dc; 2722 2723 deps_init_id_data.where = DEPS_IN_NOWHERE; 2724 deps_init_id_data.id = id; 2725 deps_init_id_data.force_unique_p = force_unique_p; 2726 deps_init_id_data.force_use_p = false; 2727 2728 init_deps (dc, false); 2729 2730 memcpy (&deps_init_id_sched_deps_info, 2731 &const_deps_init_id_sched_deps_info, 2732 sizeof (deps_init_id_sched_deps_info)); 2733 2734 if (spec_info != NULL) 2735 deps_init_id_sched_deps_info.generate_spec_deps = 1; 2736 2737 sched_deps_info = &deps_init_id_sched_deps_info; 2738 2739 deps_analyze_insn (dc, insn); 2740 2741 free_deps (dc); 2742 2743 deps_init_id_data.id = NULL; 2744 } 2745 2746 2747 struct sched_scan_info_def 2748 { 2749 /* This hook notifies scheduler frontend to extend its internal per basic 2750 block data structures. This hook should be called once before a series of 2751 calls to bb_init (). */ 2752 void (*extend_bb) (void); 2753 2754 /* This hook makes scheduler frontend to initialize its internal data 2755 structures for the passed basic block. */ 2756 void (*init_bb) (basic_block); 2757 2758 /* This hook notifies scheduler frontend to extend its internal per insn data 2759 structures. This hook should be called once before a series of calls to 2760 insn_init (). */ 2761 void (*extend_insn) (void); 2762 2763 /* This hook makes scheduler frontend to initialize its internal data 2764 structures for the passed insn. */ 2765 void (*init_insn) (rtx); 2766 }; 2767 2768 /* A driver function to add a set of basic blocks (BBS) to the 2769 scheduling region. */ 2770 static void 2771 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) 2772 { 2773 unsigned i; 2774 basic_block bb; 2775 2776 if (ssi->extend_bb) 2777 ssi->extend_bb (); 2778 2779 if (ssi->init_bb) 2780 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb) 2781 ssi->init_bb (bb); 2782 2783 if (ssi->extend_insn) 2784 ssi->extend_insn (); 2785 2786 if (ssi->init_insn) 2787 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb) 2788 { 2789 rtx insn; 2790 2791 FOR_BB_INSNS (bb, insn) 2792 ssi->init_insn (insn); 2793 } 2794 } 2795 2796 /* Implement hooks for collecting fundamental insn properties like if insn is 2797 an ASM or is within a SCHED_GROUP. */ 2798 2799 /* True when a "one-time init" data for INSN was already inited. */ 2800 static bool 2801 first_time_insn_init (insn_t insn) 2802 { 2803 return INSN_LIVE (insn) == NULL; 2804 } 2805 2806 /* Hash an entry in a transformed_insns hashtable. */ 2807 static hashval_t 2808 hash_transformed_insns (const void *p) 2809 { 2810 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); 2811 } 2812 2813 /* Compare the entries in a transformed_insns hashtable. */ 2814 static int 2815 eq_transformed_insns (const void *p, const void *q) 2816 { 2817 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); 2818 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); 2819 2820 if (INSN_UID (i1) == INSN_UID (i2)) 2821 return 1; 2822 return rtx_equal_p (PATTERN (i1), PATTERN (i2)); 2823 } 2824 2825 /* Free an entry in a transformed_insns hashtable. */ 2826 static void 2827 free_transformed_insns (void *p) 2828 { 2829 struct transformed_insns *pti = (struct transformed_insns *) p; 2830 2831 vinsn_detach (pti->vinsn_old); 2832 vinsn_detach (pti->vinsn_new); 2833 free (pti); 2834 } 2835 2836 /* Init the s_i_d data for INSN which should be inited just once, when 2837 we first see the insn. */ 2838 static void 2839 init_first_time_insn_data (insn_t insn) 2840 { 2841 /* This should not be set if this is the first time we init data for 2842 insn. */ 2843 gcc_assert (first_time_insn_init (insn)); 2844 2845 /* These are needed for nops too. */ 2846 INSN_LIVE (insn) = get_regset_from_pool (); 2847 INSN_LIVE_VALID_P (insn) = false; 2848 2849 if (!INSN_NOP_P (insn)) 2850 { 2851 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); 2852 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); 2853 INSN_TRANSFORMED_INSNS (insn) 2854 = htab_create (16, hash_transformed_insns, 2855 eq_transformed_insns, free_transformed_insns); 2856 init_deps (&INSN_DEPS_CONTEXT (insn), true); 2857 } 2858 } 2859 2860 /* Free almost all above data for INSN that is scheduled already. 2861 Used for extra-large basic blocks. */ 2862 void 2863 free_data_for_scheduled_insn (insn_t insn) 2864 { 2865 gcc_assert (! first_time_insn_init (insn)); 2866 2867 if (! INSN_ANALYZED_DEPS (insn)) 2868 return; 2869 2870 BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); 2871 BITMAP_FREE (INSN_FOUND_DEPS (insn)); 2872 htab_delete (INSN_TRANSFORMED_INSNS (insn)); 2873 2874 /* This is allocated only for bookkeeping insns. */ 2875 if (INSN_ORIGINATORS (insn)) 2876 BITMAP_FREE (INSN_ORIGINATORS (insn)); 2877 free_deps (&INSN_DEPS_CONTEXT (insn)); 2878 2879 INSN_ANALYZED_DEPS (insn) = NULL; 2880 2881 /* Clear the readonly flag so we would ICE when trying to recalculate 2882 the deps context (as we believe that it should not happen). */ 2883 (&INSN_DEPS_CONTEXT (insn))->readonly = 0; 2884 } 2885 2886 /* Free the same data as above for INSN. */ 2887 static void 2888 free_first_time_insn_data (insn_t insn) 2889 { 2890 gcc_assert (! first_time_insn_init (insn)); 2891 2892 free_data_for_scheduled_insn (insn); 2893 return_regset_to_pool (INSN_LIVE (insn)); 2894 INSN_LIVE (insn) = NULL; 2895 INSN_LIVE_VALID_P (insn) = false; 2896 } 2897 2898 /* Initialize region-scope data structures for basic blocks. */ 2899 static void 2900 init_global_and_expr_for_bb (basic_block bb) 2901 { 2902 if (sel_bb_empty_p (bb)) 2903 return; 2904 2905 invalidate_av_set (bb); 2906 } 2907 2908 /* Data for global dependency analysis (to initialize CANT_MOVE and 2909 SCHED_GROUP_P). */ 2910 static struct 2911 { 2912 /* Previous insn. */ 2913 insn_t prev_insn; 2914 } init_global_data; 2915 2916 /* Determine if INSN is in the sched_group, is an asm or should not be 2917 cloned. After that initialize its expr. */ 2918 static void 2919 init_global_and_expr_for_insn (insn_t insn) 2920 { 2921 if (LABEL_P (insn)) 2922 return; 2923 2924 if (NOTE_INSN_BASIC_BLOCK_P (insn)) 2925 { 2926 init_global_data.prev_insn = NULL_RTX; 2927 return; 2928 } 2929 2930 gcc_assert (INSN_P (insn)); 2931 2932 if (SCHED_GROUP_P (insn)) 2933 /* Setup a sched_group. */ 2934 { 2935 insn_t prev_insn = init_global_data.prev_insn; 2936 2937 if (prev_insn) 2938 INSN_SCHED_NEXT (prev_insn) = insn; 2939 2940 init_global_data.prev_insn = insn; 2941 } 2942 else 2943 init_global_data.prev_insn = NULL_RTX; 2944 2945 if (GET_CODE (PATTERN (insn)) == ASM_INPUT 2946 || asm_noperands (PATTERN (insn)) >= 0) 2947 /* Mark INSN as an asm. */ 2948 INSN_ASM_P (insn) = true; 2949 2950 { 2951 bool force_unique_p; 2952 ds_t spec_done_ds; 2953 2954 /* Certain instructions cannot be cloned, and frame related insns and 2955 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of 2956 their block. */ 2957 if (prologue_epilogue_contains (insn)) 2958 { 2959 if (RTX_FRAME_RELATED_P (insn)) 2960 CANT_MOVE (insn) = 1; 2961 else 2962 { 2963 rtx note; 2964 for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) 2965 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE 2966 && ((enum insn_note) INTVAL (XEXP (note, 0)) 2967 == NOTE_INSN_EPILOGUE_BEG)) 2968 { 2969 CANT_MOVE (insn) = 1; 2970 break; 2971 } 2972 } 2973 force_unique_p = true; 2974 } 2975 else 2976 if (CANT_MOVE (insn) 2977 || INSN_ASM_P (insn) 2978 || SCHED_GROUP_P (insn) 2979 || CALL_P (insn) 2980 /* Exception handling insns are always unique. */ 2981 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) 2982 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ 2983 || control_flow_insn_p (insn) 2984 || volatile_insn_p (PATTERN (insn)) 2985 || (targetm.cannot_copy_insn_p 2986 && targetm.cannot_copy_insn_p (insn))) 2987 force_unique_p = true; 2988 else 2989 force_unique_p = false; 2990 2991 if (targetm.sched.get_insn_spec_ds) 2992 { 2993 spec_done_ds = targetm.sched.get_insn_spec_ds (insn); 2994 spec_done_ds = ds_get_max_dep_weak (spec_done_ds); 2995 } 2996 else 2997 spec_done_ds = 0; 2998 2999 /* Initialize INSN's expr. */ 3000 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, 3001 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), 3002 spec_done_ds, 0, 0, NULL, true, false, false, false, 3003 CANT_MOVE (insn)); 3004 } 3005 3006 init_first_time_insn_data (insn); 3007 } 3008 3009 /* Scan the region and initialize instruction data for basic blocks BBS. */ 3010 void 3011 sel_init_global_and_expr (bb_vec_t bbs) 3012 { 3013 /* ??? It would be nice to implement push / pop scheme for sched_infos. */ 3014 const struct sched_scan_info_def ssi = 3015 { 3016 NULL, /* extend_bb */ 3017 init_global_and_expr_for_bb, /* init_bb */ 3018 extend_insn_data, /* extend_insn */ 3019 init_global_and_expr_for_insn /* init_insn */ 3020 }; 3021 3022 sched_scan (&ssi, bbs); 3023 } 3024 3025 /* Finalize region-scope data structures for basic blocks. */ 3026 static void 3027 finish_global_and_expr_for_bb (basic_block bb) 3028 { 3029 av_set_clear (&BB_AV_SET (bb)); 3030 BB_AV_LEVEL (bb) = 0; 3031 } 3032 3033 /* Finalize INSN's data. */ 3034 static void 3035 finish_global_and_expr_insn (insn_t insn) 3036 { 3037 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) 3038 return; 3039 3040 gcc_assert (INSN_P (insn)); 3041 3042 if (INSN_LUID (insn) > 0) 3043 { 3044 free_first_time_insn_data (insn); 3045 INSN_WS_LEVEL (insn) = 0; 3046 CANT_MOVE (insn) = 0; 3047 3048 /* We can no longer assert this, as vinsns of this insn could be 3049 easily live in other insn's caches. This should be changed to 3050 a counter-like approach among all vinsns. */ 3051 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); 3052 clear_expr (INSN_EXPR (insn)); 3053 } 3054 } 3055 3056 /* Finalize per instruction data for the whole region. */ 3057 void 3058 sel_finish_global_and_expr (void) 3059 { 3060 { 3061 bb_vec_t bbs; 3062 int i; 3063 3064 bbs = VEC_alloc (basic_block, heap, current_nr_blocks); 3065 3066 for (i = 0; i < current_nr_blocks; i++) 3067 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i))); 3068 3069 /* Clear AV_SETs and INSN_EXPRs. */ 3070 { 3071 const struct sched_scan_info_def ssi = 3072 { 3073 NULL, /* extend_bb */ 3074 finish_global_and_expr_for_bb, /* init_bb */ 3075 NULL, /* extend_insn */ 3076 finish_global_and_expr_insn /* init_insn */ 3077 }; 3078 3079 sched_scan (&ssi, bbs); 3080 } 3081 3082 VEC_free (basic_block, heap, bbs); 3083 } 3084 3085 finish_insns (); 3086 } 3087 3088 3089 /* In the below hooks, we merely calculate whether or not a dependence 3090 exists, and in what part of insn. However, we will need more data 3091 when we'll start caching dependence requests. */ 3092 3093 /* Container to hold information for dependency analysis. */ 3094 static struct 3095 { 3096 deps_t dc; 3097 3098 /* A variable to track which part of rtx we are scanning in 3099 sched-deps.c: sched_analyze_insn (). */ 3100 deps_where_t where; 3101 3102 /* Current producer. */ 3103 insn_t pro; 3104 3105 /* Current consumer. */ 3106 vinsn_t con; 3107 3108 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. 3109 X is from { INSN, LHS, RHS }. */ 3110 ds_t has_dep_p[DEPS_IN_NOWHERE]; 3111 } has_dependence_data; 3112 3113 /* Start analyzing dependencies of INSN. */ 3114 static void 3115 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) 3116 { 3117 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); 3118 3119 has_dependence_data.where = DEPS_IN_INSN; 3120 } 3121 3122 /* Finish analyzing dependencies of an insn. */ 3123 static void 3124 has_dependence_finish_insn (void) 3125 { 3126 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3127 3128 has_dependence_data.where = DEPS_IN_NOWHERE; 3129 } 3130 3131 /* Start analyzing dependencies of LHS. */ 3132 static void 3133 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) 3134 { 3135 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3136 3137 if (VINSN_LHS (has_dependence_data.con) != NULL) 3138 has_dependence_data.where = DEPS_IN_LHS; 3139 } 3140 3141 /* Finish analyzing dependencies of an lhs. */ 3142 static void 3143 has_dependence_finish_lhs (void) 3144 { 3145 has_dependence_data.where = DEPS_IN_INSN; 3146 } 3147 3148 /* Start analyzing dependencies of RHS. */ 3149 static void 3150 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) 3151 { 3152 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3153 3154 if (VINSN_RHS (has_dependence_data.con) != NULL) 3155 has_dependence_data.where = DEPS_IN_RHS; 3156 } 3157 3158 /* Start analyzing dependencies of an rhs. */ 3159 static void 3160 has_dependence_finish_rhs (void) 3161 { 3162 gcc_assert (has_dependence_data.where == DEPS_IN_RHS 3163 || has_dependence_data.where == DEPS_IN_INSN); 3164 3165 has_dependence_data.where = DEPS_IN_INSN; 3166 } 3167 3168 /* Note a set of REGNO. */ 3169 static void 3170 has_dependence_note_reg_set (int regno) 3171 { 3172 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; 3173 3174 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3175 VINSN_INSN_RTX 3176 (has_dependence_data.con))) 3177 { 3178 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3179 3180 if (reg_last->sets != NULL 3181 || reg_last->clobbers != NULL) 3182 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; 3183 3184 if (reg_last->uses) 3185 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; 3186 } 3187 } 3188 3189 /* Note a clobber of REGNO. */ 3190 static void 3191 has_dependence_note_reg_clobber (int regno) 3192 { 3193 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; 3194 3195 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3196 VINSN_INSN_RTX 3197 (has_dependence_data.con))) 3198 { 3199 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3200 3201 if (reg_last->sets) 3202 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; 3203 3204 if (reg_last->uses) 3205 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; 3206 } 3207 } 3208 3209 /* Note a use of REGNO. */ 3210 static void 3211 has_dependence_note_reg_use (int regno) 3212 { 3213 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; 3214 3215 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3216 VINSN_INSN_RTX 3217 (has_dependence_data.con))) 3218 { 3219 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3220 3221 if (reg_last->sets) 3222 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; 3223 3224 if (reg_last->clobbers) 3225 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; 3226 3227 /* Handle BE_IN_SPEC. */ 3228 if (reg_last->uses) 3229 { 3230 ds_t pro_spec_checked_ds; 3231 3232 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); 3233 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); 3234 3235 if (pro_spec_checked_ds != 0 3236 && bitmap_bit_p (INSN_REG_SETS (has_dependence_data.pro), regno)) 3237 /* Merge BE_IN_SPEC bits into *DSP. */ 3238 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, 3239 NULL_RTX, NULL_RTX); 3240 } 3241 } 3242 } 3243 3244 /* Note a memory dependence. */ 3245 static void 3246 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, 3247 rtx pending_mem ATTRIBUTE_UNUSED, 3248 insn_t pending_insn ATTRIBUTE_UNUSED, 3249 ds_t ds ATTRIBUTE_UNUSED) 3250 { 3251 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3252 VINSN_INSN_RTX (has_dependence_data.con))) 3253 { 3254 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3255 3256 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); 3257 } 3258 } 3259 3260 /* Note a dependence. */ 3261 static void 3262 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED, 3263 ds_t ds ATTRIBUTE_UNUSED) 3264 { 3265 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3266 VINSN_INSN_RTX (has_dependence_data.con))) 3267 { 3268 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3269 3270 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); 3271 } 3272 } 3273 3274 /* Mark the insn as having a hard dependence that prevents speculation. */ 3275 void 3276 sel_mark_hard_insn (rtx insn) 3277 { 3278 int i; 3279 3280 /* Only work when we're in has_dependence_p mode. 3281 ??? This is a hack, this should actually be a hook. */ 3282 if (!has_dependence_data.dc || !has_dependence_data.pro) 3283 return; 3284 3285 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); 3286 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3287 3288 for (i = 0; i < DEPS_IN_NOWHERE; i++) 3289 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE; 3290 } 3291 3292 /* This structure holds the hooks for the dependency analysis used when 3293 actually processing dependencies in the scheduler. */ 3294 static struct sched_deps_info_def has_dependence_sched_deps_info; 3295 3296 /* This initializes most of the fields of the above structure. */ 3297 static const struct sched_deps_info_def const_has_dependence_sched_deps_info = 3298 { 3299 NULL, 3300 3301 has_dependence_start_insn, 3302 has_dependence_finish_insn, 3303 has_dependence_start_lhs, 3304 has_dependence_finish_lhs, 3305 has_dependence_start_rhs, 3306 has_dependence_finish_rhs, 3307 has_dependence_note_reg_set, 3308 has_dependence_note_reg_clobber, 3309 has_dependence_note_reg_use, 3310 has_dependence_note_mem_dep, 3311 has_dependence_note_dep, 3312 3313 0, /* use_cselib */ 3314 0, /* use_deps_list */ 3315 0 /* generate_spec_deps */ 3316 }; 3317 3318 /* Initialize has_dependence_sched_deps_info with extra spec field. */ 3319 static void 3320 setup_has_dependence_sched_deps_info (void) 3321 { 3322 memcpy (&has_dependence_sched_deps_info, 3323 &const_has_dependence_sched_deps_info, 3324 sizeof (has_dependence_sched_deps_info)); 3325 3326 if (spec_info != NULL) 3327 has_dependence_sched_deps_info.generate_spec_deps = 1; 3328 3329 sched_deps_info = &has_dependence_sched_deps_info; 3330 } 3331 3332 /* Remove all dependences found and recorded in has_dependence_data array. */ 3333 void 3334 sel_clear_has_dependence (void) 3335 { 3336 int i; 3337 3338 for (i = 0; i < DEPS_IN_NOWHERE; i++) 3339 has_dependence_data.has_dep_p[i] = 0; 3340 } 3341 3342 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer 3343 to the dependence information array in HAS_DEP_PP. */ 3344 ds_t 3345 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) 3346 { 3347 int i; 3348 ds_t ds; 3349 struct deps_desc *dc; 3350 3351 if (INSN_SIMPLEJUMP_P (pred)) 3352 /* Unconditional jump is just a transfer of control flow. 3353 Ignore it. */ 3354 return false; 3355 3356 dc = &INSN_DEPS_CONTEXT (pred); 3357 3358 /* We init this field lazily. */ 3359 if (dc->reg_last == NULL) 3360 init_deps_reg_last (dc); 3361 3362 if (!dc->readonly) 3363 { 3364 has_dependence_data.pro = NULL; 3365 /* Initialize empty dep context with information about PRED. */ 3366 advance_deps_context (dc, pred); 3367 dc->readonly = 1; 3368 } 3369 3370 has_dependence_data.where = DEPS_IN_NOWHERE; 3371 has_dependence_data.pro = pred; 3372 has_dependence_data.con = EXPR_VINSN (expr); 3373 has_dependence_data.dc = dc; 3374 3375 sel_clear_has_dependence (); 3376 3377 /* Now catch all dependencies that would be generated between PRED and 3378 INSN. */ 3379 setup_has_dependence_sched_deps_info (); 3380 deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); 3381 has_dependence_data.dc = NULL; 3382 3383 /* When a barrier was found, set DEPS_IN_INSN bits. */ 3384 if (dc->last_reg_pending_barrier == TRUE_BARRIER) 3385 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE; 3386 else if (dc->last_reg_pending_barrier == MOVE_BARRIER) 3387 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; 3388 3389 /* Do not allow stores to memory to move through checks. Currently 3390 we don't move this to sched-deps.c as the check doesn't have 3391 obvious places to which this dependence can be attached. 3392 FIMXE: this should go to a hook. */ 3393 if (EXPR_LHS (expr) 3394 && MEM_P (EXPR_LHS (expr)) 3395 && sel_insn_is_speculation_check (pred)) 3396 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; 3397 3398 *has_dep_pp = has_dependence_data.has_dep_p; 3399 ds = 0; 3400 for (i = 0; i < DEPS_IN_NOWHERE; i++) 3401 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i], 3402 NULL_RTX, NULL_RTX); 3403 3404 return ds; 3405 } 3406 3407 3408 /* Dependence hooks implementation that checks dependence latency constraints 3409 on the insns being scheduled. The entry point for these routines is 3410 tick_check_p predicate. */ 3411 3412 static struct 3413 { 3414 /* An expr we are currently checking. */ 3415 expr_t expr; 3416 3417 /* A minimal cycle for its scheduling. */ 3418 int cycle; 3419 3420 /* Whether we have seen a true dependence while checking. */ 3421 bool seen_true_dep_p; 3422 } tick_check_data; 3423 3424 /* Update minimal scheduling cycle for tick_check_insn given that it depends 3425 on PRO with status DS and weight DW. */ 3426 static void 3427 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw) 3428 { 3429 expr_t con_expr = tick_check_data.expr; 3430 insn_t con_insn = EXPR_INSN_RTX (con_expr); 3431 3432 if (con_insn != pro_insn) 3433 { 3434 enum reg_note dt; 3435 int tick; 3436 3437 if (/* PROducer was removed from above due to pipelining. */ 3438 !INSN_IN_STREAM_P (pro_insn) 3439 /* Or PROducer was originally on the next iteration regarding the 3440 CONsumer. */ 3441 || (INSN_SCHED_TIMES (pro_insn) 3442 - EXPR_SCHED_TIMES (con_expr)) > 1) 3443 /* Don't count this dependence. */ 3444 return; 3445 3446 dt = ds_to_dt (ds); 3447 if (dt == REG_DEP_TRUE) 3448 tick_check_data.seen_true_dep_p = true; 3449 3450 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0); 3451 3452 { 3453 dep_def _dep, *dep = &_dep; 3454 3455 init_dep (dep, pro_insn, con_insn, dt); 3456 3457 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw); 3458 } 3459 3460 /* When there are several kinds of dependencies between pro and con, 3461 only REG_DEP_TRUE should be taken into account. */ 3462 if (tick > tick_check_data.cycle 3463 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p)) 3464 tick_check_data.cycle = tick; 3465 } 3466 } 3467 3468 /* An implementation of note_dep hook. */ 3469 static void 3470 tick_check_note_dep (insn_t pro, ds_t ds) 3471 { 3472 tick_check_dep_with_dw (pro, ds, 0); 3473 } 3474 3475 /* An implementation of note_mem_dep hook. */ 3476 static void 3477 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds) 3478 { 3479 dw_t dw; 3480 3481 dw = (ds_to_dt (ds) == REG_DEP_TRUE 3482 ? estimate_dep_weak (mem1, mem2) 3483 : 0); 3484 3485 tick_check_dep_with_dw (pro, ds, dw); 3486 } 3487 3488 /* This structure contains hooks for dependence analysis used when determining 3489 whether an insn is ready for scheduling. */ 3490 static struct sched_deps_info_def tick_check_sched_deps_info = 3491 { 3492 NULL, 3493 3494 NULL, 3495 NULL, 3496 NULL, 3497 NULL, 3498 NULL, 3499 NULL, 3500 haifa_note_reg_set, 3501 haifa_note_reg_clobber, 3502 haifa_note_reg_use, 3503 tick_check_note_mem_dep, 3504 tick_check_note_dep, 3505 3506 0, 0, 0 3507 }; 3508 3509 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be 3510 scheduled. Return 0 if all data from producers in DC is ready. */ 3511 int 3512 tick_check_p (expr_t expr, deps_t dc, fence_t fence) 3513 { 3514 int cycles_left; 3515 /* Initialize variables. */ 3516 tick_check_data.expr = expr; 3517 tick_check_data.cycle = 0; 3518 tick_check_data.seen_true_dep_p = false; 3519 sched_deps_info = &tick_check_sched_deps_info; 3520 3521 gcc_assert (!dc->readonly); 3522 dc->readonly = 1; 3523 deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); 3524 dc->readonly = 0; 3525 3526 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence); 3527 3528 return cycles_left >= 0 ? cycles_left : 0; 3529 } 3530 3531 3532 /* Functions to work with insns. */ 3533 3534 /* Returns true if LHS of INSN is the same as DEST of an insn 3535 being moved. */ 3536 bool 3537 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) 3538 { 3539 rtx lhs = INSN_LHS (insn); 3540 3541 if (lhs == NULL || dest == NULL) 3542 return false; 3543 3544 return rtx_equal_p (lhs, dest); 3545 } 3546 3547 /* Return s_i_d entry of INSN. Callable from debugger. */ 3548 sel_insn_data_def 3549 insn_sid (insn_t insn) 3550 { 3551 return *SID (insn); 3552 } 3553 3554 /* True when INSN is a speculative check. We can tell this by looking 3555 at the data structures of the selective scheduler, not by examining 3556 the pattern. */ 3557 bool 3558 sel_insn_is_speculation_check (rtx insn) 3559 { 3560 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn); 3561 } 3562 3563 /* Extracts machine mode MODE and destination location DST_LOC 3564 for given INSN. */ 3565 void 3566 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode) 3567 { 3568 rtx pat = PATTERN (insn); 3569 3570 gcc_assert (dst_loc); 3571 gcc_assert (GET_CODE (pat) == SET); 3572 3573 *dst_loc = SET_DEST (pat); 3574 3575 gcc_assert (*dst_loc); 3576 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc)); 3577 3578 if (mode) 3579 *mode = GET_MODE (*dst_loc); 3580 } 3581 3582 /* Returns true when moving through JUMP will result in bookkeeping 3583 creation. */ 3584 bool 3585 bookkeeping_can_be_created_if_moved_through_p (insn_t jump) 3586 { 3587 insn_t succ; 3588 succ_iterator si; 3589 3590 FOR_EACH_SUCC (succ, si, jump) 3591 if (sel_num_cfg_preds_gt_1 (succ)) 3592 return true; 3593 3594 return false; 3595 } 3596 3597 /* Return 'true' if INSN is the only one in its basic block. */ 3598 static bool 3599 insn_is_the_only_one_in_bb_p (insn_t insn) 3600 { 3601 return sel_bb_head_p (insn) && sel_bb_end_p (insn); 3602 } 3603 3604 #ifdef ENABLE_CHECKING 3605 /* Check that the region we're scheduling still has at most one 3606 backedge. */ 3607 static void 3608 verify_backedges (void) 3609 { 3610 if (pipelining_p) 3611 { 3612 int i, n = 0; 3613 edge e; 3614 edge_iterator ei; 3615 3616 for (i = 0; i < current_nr_blocks; i++) 3617 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs) 3618 if (in_current_region_p (e->dest) 3619 && BLOCK_TO_BB (e->dest->index) < i) 3620 n++; 3621 3622 gcc_assert (n <= 1); 3623 } 3624 } 3625 #endif 3626 3627 3628 /* Functions to work with control flow. */ 3629 3630 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks 3631 are sorted in topological order (it might have been invalidated by 3632 redirecting an edge). */ 3633 static void 3634 sel_recompute_toporder (void) 3635 { 3636 int i, n, rgn; 3637 int *postorder, n_blocks; 3638 3639 postorder = XALLOCAVEC (int, n_basic_blocks); 3640 n_blocks = post_order_compute (postorder, false, false); 3641 3642 rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 3643 for (n = 0, i = n_blocks - 1; i >= 0; i--) 3644 if (CONTAINING_RGN (postorder[i]) == rgn) 3645 { 3646 BLOCK_TO_BB (postorder[i]) = n; 3647 BB_TO_BLOCK (n) = postorder[i]; 3648 n++; 3649 } 3650 3651 /* Assert that we updated info for all blocks. We may miss some blocks if 3652 this function is called when redirecting an edge made a block 3653 unreachable, but that block is not deleted yet. */ 3654 gcc_assert (n == RGN_NR_BLOCKS (rgn)); 3655 } 3656 3657 /* Tidy the possibly empty block BB. */ 3658 static bool 3659 maybe_tidy_empty_bb (basic_block bb) 3660 { 3661 basic_block succ_bb, pred_bb, note_bb; 3662 VEC (basic_block, heap) *dom_bbs; 3663 edge e; 3664 edge_iterator ei; 3665 bool rescan_p; 3666 3667 /* Keep empty bb only if this block immediately precedes EXIT and 3668 has incoming non-fallthrough edge, or it has no predecessors or 3669 successors. Otherwise remove it. */ 3670 if (!sel_bb_empty_p (bb) 3671 || (single_succ_p (bb) 3672 && single_succ (bb) == EXIT_BLOCK_PTR 3673 && (!single_pred_p (bb) 3674 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) 3675 || EDGE_COUNT (bb->preds) == 0 3676 || EDGE_COUNT (bb->succs) == 0) 3677 return false; 3678 3679 /* Do not attempt to redirect complex edges. */ 3680 FOR_EACH_EDGE (e, ei, bb->preds) 3681 if (e->flags & EDGE_COMPLEX) 3682 return false; 3683 else if (e->flags & EDGE_FALLTHRU) 3684 { 3685 rtx note; 3686 /* If prev bb ends with asm goto, see if any of the 3687 ASM_OPERANDS_LABELs don't point to the fallthru 3688 label. Do not attempt to redirect it in that case. */ 3689 if (JUMP_P (BB_END (e->src)) 3690 && (note = extract_asm_operands (PATTERN (BB_END (e->src))))) 3691 { 3692 int i, n = ASM_OPERANDS_LABEL_LENGTH (note); 3693 3694 for (i = 0; i < n; ++i) 3695 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb)) 3696 return false; 3697 } 3698 } 3699 3700 free_data_sets (bb); 3701 3702 /* Do not delete BB if it has more than one successor. 3703 That can occur when we moving a jump. */ 3704 if (!single_succ_p (bb)) 3705 { 3706 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb)); 3707 sel_merge_blocks (bb->prev_bb, bb); 3708 return true; 3709 } 3710 3711 succ_bb = single_succ (bb); 3712 rescan_p = true; 3713 pred_bb = NULL; 3714 dom_bbs = NULL; 3715 3716 /* Save a pred/succ from the current region to attach the notes to. */ 3717 note_bb = NULL; 3718 FOR_EACH_EDGE (e, ei, bb->preds) 3719 if (in_current_region_p (e->src)) 3720 { 3721 note_bb = e->src; 3722 break; 3723 } 3724 if (note_bb == NULL) 3725 note_bb = succ_bb; 3726 3727 /* Redirect all non-fallthru edges to the next bb. */ 3728 while (rescan_p) 3729 { 3730 rescan_p = false; 3731 3732 FOR_EACH_EDGE (e, ei, bb->preds) 3733 { 3734 pred_bb = e->src; 3735 3736 if (!(e->flags & EDGE_FALLTHRU)) 3737 { 3738 /* We can not invalidate computed topological order by moving 3739 the edge destination block (E->SUCC) along a fallthru edge. 3740 3741 We will update dominators here only when we'll get 3742 an unreachable block when redirecting, otherwise 3743 sel_redirect_edge_and_branch will take care of it. */ 3744 if (e->dest != bb 3745 && single_pred_p (e->dest)) 3746 VEC_safe_push (basic_block, heap, dom_bbs, e->dest); 3747 sel_redirect_edge_and_branch (e, succ_bb); 3748 rescan_p = true; 3749 break; 3750 } 3751 /* If the edge is fallthru, but PRED_BB ends in a conditional jump 3752 to BB (so there is no non-fallthru edge from PRED_BB to BB), we 3753 still have to adjust it. */ 3754 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb))) 3755 { 3756 /* If possible, try to remove the unneeded conditional jump. */ 3757 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0 3758 && !IN_CURRENT_FENCE_P (BB_END (pred_bb))) 3759 { 3760 if (!sel_remove_insn (BB_END (pred_bb), false, false)) 3761 tidy_fallthru_edge (e); 3762 } 3763 else 3764 sel_redirect_edge_and_branch (e, succ_bb); 3765 rescan_p = true; 3766 break; 3767 } 3768 } 3769 } 3770 3771 if (can_merge_blocks_p (bb->prev_bb, bb)) 3772 sel_merge_blocks (bb->prev_bb, bb); 3773 else 3774 { 3775 /* This is a block without fallthru predecessor. Just delete it. */ 3776 gcc_assert (note_bb); 3777 move_bb_info (note_bb, bb); 3778 remove_empty_bb (bb, true); 3779 } 3780 3781 if (!VEC_empty (basic_block, dom_bbs)) 3782 { 3783 VEC_safe_push (basic_block, heap, dom_bbs, succ_bb); 3784 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); 3785 VEC_free (basic_block, heap, dom_bbs); 3786 } 3787 3788 return true; 3789 } 3790 3791 /* Tidy the control flow after we have removed original insn from 3792 XBB. Return true if we have removed some blocks. When FULL_TIDYING 3793 is true, also try to optimize control flow on non-empty blocks. */ 3794 bool 3795 tidy_control_flow (basic_block xbb, bool full_tidying) 3796 { 3797 bool changed = true; 3798 insn_t first, last; 3799 3800 /* First check whether XBB is empty. */ 3801 changed = maybe_tidy_empty_bb (xbb); 3802 if (changed || !full_tidying) 3803 return changed; 3804 3805 /* Check if there is a unnecessary jump after insn left. */ 3806 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb) 3807 && INSN_SCHED_TIMES (BB_END (xbb)) == 0 3808 && !IN_CURRENT_FENCE_P (BB_END (xbb))) 3809 { 3810 if (sel_remove_insn (BB_END (xbb), false, false)) 3811 return true; 3812 tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); 3813 } 3814 3815 first = sel_bb_head (xbb); 3816 last = sel_bb_end (xbb); 3817 if (MAY_HAVE_DEBUG_INSNS) 3818 { 3819 if (first != last && DEBUG_INSN_P (first)) 3820 do 3821 first = NEXT_INSN (first); 3822 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); 3823 3824 if (first != last && DEBUG_INSN_P (last)) 3825 do 3826 last = PREV_INSN (last); 3827 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); 3828 } 3829 /* Check if there is an unnecessary jump in previous basic block leading 3830 to next basic block left after removing INSN from stream. 3831 If it is so, remove that jump and redirect edge to current 3832 basic block (where there was INSN before deletion). This way 3833 when NOP will be deleted several instructions later with its 3834 basic block we will not get a jump to next instruction, which 3835 can be harmful. */ 3836 if (first == last 3837 && !sel_bb_empty_p (xbb) 3838 && INSN_NOP_P (last) 3839 /* Flow goes fallthru from current block to the next. */ 3840 && EDGE_COUNT (xbb->succs) == 1 3841 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) 3842 /* When successor is an EXIT block, it may not be the next block. */ 3843 && single_succ (xbb) != EXIT_BLOCK_PTR 3844 /* And unconditional jump in previous basic block leads to 3845 next basic block of XBB and this jump can be safely removed. */ 3846 && in_current_region_p (xbb->prev_bb) 3847 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb) 3848 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0 3849 /* Also this jump is not at the scheduling boundary. */ 3850 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) 3851 { 3852 bool recompute_toporder_p; 3853 /* Clear data structures of jump - jump itself will be removed 3854 by sel_redirect_edge_and_branch. */ 3855 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); 3856 recompute_toporder_p 3857 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); 3858 3859 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); 3860 3861 /* It can turn out that after removing unused jump, basic block 3862 that contained that jump, becomes empty too. In such case 3863 remove it too. */ 3864 if (sel_bb_empty_p (xbb->prev_bb)) 3865 changed = maybe_tidy_empty_bb (xbb->prev_bb); 3866 if (recompute_toporder_p) 3867 sel_recompute_toporder (); 3868 } 3869 3870 #ifdef ENABLE_CHECKING 3871 verify_backedges (); 3872 verify_dominators (CDI_DOMINATORS); 3873 #endif 3874 3875 return changed; 3876 } 3877 3878 /* Purge meaningless empty blocks in the middle of a region. */ 3879 void 3880 purge_empty_blocks (void) 3881 { 3882 int i; 3883 3884 /* Do not attempt to delete the first basic block in the region. */ 3885 for (i = 1; i < current_nr_blocks; ) 3886 { 3887 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i)); 3888 3889 if (maybe_tidy_empty_bb (b)) 3890 continue; 3891 3892 i++; 3893 } 3894 } 3895 3896 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, 3897 do not delete insn's data, because it will be later re-emitted. 3898 Return true if we have removed some blocks afterwards. */ 3899 bool 3900 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) 3901 { 3902 basic_block bb = BLOCK_FOR_INSN (insn); 3903 3904 gcc_assert (INSN_IN_STREAM_P (insn)); 3905 3906 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) 3907 { 3908 expr_t expr; 3909 av_set_iterator i; 3910 3911 /* When we remove a debug insn that is head of a BB, it remains 3912 in the AV_SET of the block, but it shouldn't. */ 3913 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) 3914 if (EXPR_INSN_RTX (expr) == insn) 3915 { 3916 av_set_iter_remove (&i); 3917 break; 3918 } 3919 } 3920 3921 if (only_disconnect) 3922 { 3923 insn_t prev = PREV_INSN (insn); 3924 insn_t next = NEXT_INSN (insn); 3925 basic_block bb = BLOCK_FOR_INSN (insn); 3926 3927 NEXT_INSN (prev) = next; 3928 PREV_INSN (next) = prev; 3929 3930 if (BB_HEAD (bb) == insn) 3931 { 3932 gcc_assert (BLOCK_FOR_INSN (prev) == bb); 3933 BB_HEAD (bb) = prev; 3934 } 3935 if (BB_END (bb) == insn) 3936 BB_END (bb) = prev; 3937 } 3938 else 3939 { 3940 remove_insn (insn); 3941 clear_expr (INSN_EXPR (insn)); 3942 } 3943 3944 /* It is necessary to null this fields before calling add_insn (). */ 3945 PREV_INSN (insn) = NULL_RTX; 3946 NEXT_INSN (insn) = NULL_RTX; 3947 3948 return tidy_control_flow (bb, full_tidying); 3949 } 3950 3951 /* Estimate number of the insns in BB. */ 3952 static int 3953 sel_estimate_number_of_insns (basic_block bb) 3954 { 3955 int res = 0; 3956 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); 3957 3958 for (; insn != next_tail; insn = NEXT_INSN (insn)) 3959 if (NONDEBUG_INSN_P (insn)) 3960 res++; 3961 3962 return res; 3963 } 3964 3965 /* We don't need separate luids for notes or labels. */ 3966 static int 3967 sel_luid_for_non_insn (rtx x) 3968 { 3969 gcc_assert (NOTE_P (x) || LABEL_P (x)); 3970 3971 return -1; 3972 } 3973 3974 /* Find the proper seqno for inserting at INSN by successors. 3975 Return -1 if no successors with positive seqno exist. */ 3976 static int 3977 get_seqno_by_succs (rtx insn) 3978 { 3979 basic_block bb = BLOCK_FOR_INSN (insn); 3980 rtx tmp = insn, end = BB_END (bb); 3981 int seqno; 3982 insn_t succ = NULL; 3983 succ_iterator si; 3984 3985 while (tmp != end) 3986 { 3987 tmp = NEXT_INSN (tmp); 3988 if (INSN_P (tmp)) 3989 return INSN_SEQNO (tmp); 3990 } 3991 3992 seqno = INT_MAX; 3993 3994 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL) 3995 if (INSN_SEQNO (succ) > 0) 3996 seqno = MIN (seqno, INSN_SEQNO (succ)); 3997 3998 if (seqno == INT_MAX) 3999 return -1; 4000 4001 return seqno; 4002 } 4003 4004 /* Compute seqno for INSN by its preds or succs. */ 4005 static int 4006 get_seqno_for_a_jump (insn_t insn) 4007 { 4008 int seqno; 4009 4010 gcc_assert (INSN_SIMPLEJUMP_P (insn)); 4011 4012 if (!sel_bb_head_p (insn)) 4013 seqno = INSN_SEQNO (PREV_INSN (insn)); 4014 else 4015 { 4016 basic_block bb = BLOCK_FOR_INSN (insn); 4017 4018 if (single_pred_p (bb) 4019 && !in_current_region_p (single_pred (bb))) 4020 { 4021 /* We can have preds outside a region when splitting edges 4022 for pipelining of an outer loop. Use succ instead. 4023 There should be only one of them. */ 4024 insn_t succ = NULL; 4025 succ_iterator si; 4026 bool first = true; 4027 4028 gcc_assert (flag_sel_sched_pipelining_outer_loops 4029 && current_loop_nest); 4030 FOR_EACH_SUCC_1 (succ, si, insn, 4031 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 4032 { 4033 gcc_assert (first); 4034 first = false; 4035 } 4036 4037 gcc_assert (succ != NULL); 4038 seqno = INSN_SEQNO (succ); 4039 } 4040 else 4041 { 4042 insn_t *preds; 4043 int n; 4044 4045 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n); 4046 4047 gcc_assert (n > 0); 4048 /* For one predecessor, use simple method. */ 4049 if (n == 1) 4050 seqno = INSN_SEQNO (preds[0]); 4051 else 4052 seqno = get_seqno_by_preds (insn); 4053 4054 free (preds); 4055 } 4056 } 4057 4058 /* We were unable to find a good seqno among preds. */ 4059 if (seqno < 0) 4060 seqno = get_seqno_by_succs (insn); 4061 4062 gcc_assert (seqno >= 0); 4063 4064 return seqno; 4065 } 4066 4067 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors 4068 with positive seqno exist. */ 4069 int 4070 get_seqno_by_preds (rtx insn) 4071 { 4072 basic_block bb = BLOCK_FOR_INSN (insn); 4073 rtx tmp = insn, head = BB_HEAD (bb); 4074 insn_t *preds; 4075 int n, i, seqno; 4076 4077 while (tmp != head) 4078 { 4079 tmp = PREV_INSN (tmp); 4080 if (INSN_P (tmp)) 4081 return INSN_SEQNO (tmp); 4082 } 4083 4084 cfg_preds (bb, &preds, &n); 4085 for (i = 0, seqno = -1; i < n; i++) 4086 seqno = MAX (seqno, INSN_SEQNO (preds[i])); 4087 4088 return seqno; 4089 } 4090 4091 4092 4093 /* Extend pass-scope data structures for basic blocks. */ 4094 void 4095 sel_extend_global_bb_info (void) 4096 { 4097 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info, 4098 last_basic_block); 4099 } 4100 4101 /* Extend region-scope data structures for basic blocks. */ 4102 static void 4103 extend_region_bb_info (void) 4104 { 4105 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info, 4106 last_basic_block); 4107 } 4108 4109 /* Extend all data structures to fit for all basic blocks. */ 4110 static void 4111 extend_bb_info (void) 4112 { 4113 sel_extend_global_bb_info (); 4114 extend_region_bb_info (); 4115 } 4116 4117 /* Finalize pass-scope data structures for basic blocks. */ 4118 void 4119 sel_finish_global_bb_info (void) 4120 { 4121 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info); 4122 } 4123 4124 /* Finalize region-scope data structures for basic blocks. */ 4125 static void 4126 finish_region_bb_info (void) 4127 { 4128 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info); 4129 } 4130 4131 4132 /* Data for each insn in current region. */ 4133 VEC (sel_insn_data_def, heap) *s_i_d = NULL; 4134 4135 /* Extend data structures for insns from current region. */ 4136 static void 4137 extend_insn_data (void) 4138 { 4139 int reserve; 4140 4141 sched_extend_target (); 4142 sched_deps_init (false); 4143 4144 /* Extend data structures for insns from current region. */ 4145 reserve = (sched_max_luid + 1 4146 - VEC_length (sel_insn_data_def, s_i_d)); 4147 if (reserve > 0 4148 && ! VEC_space (sel_insn_data_def, s_i_d, reserve)) 4149 { 4150 int size; 4151 4152 if (sched_max_luid / 2 > 1024) 4153 size = sched_max_luid + 1024; 4154 else 4155 size = 3 * sched_max_luid / 2; 4156 4157 4158 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size); 4159 } 4160 } 4161 4162 /* Finalize data structures for insns from current region. */ 4163 static void 4164 finish_insns (void) 4165 { 4166 unsigned i; 4167 4168 /* Clear here all dependence contexts that may have left from insns that were 4169 removed during the scheduling. */ 4170 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++) 4171 { 4172 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i); 4173 4174 if (sid_entry->live) 4175 return_regset_to_pool (sid_entry->live); 4176 if (sid_entry->analyzed_deps) 4177 { 4178 BITMAP_FREE (sid_entry->analyzed_deps); 4179 BITMAP_FREE (sid_entry->found_deps); 4180 htab_delete (sid_entry->transformed_insns); 4181 free_deps (&sid_entry->deps_context); 4182 } 4183 if (EXPR_VINSN (&sid_entry->expr)) 4184 { 4185 clear_expr (&sid_entry->expr); 4186 4187 /* Also, clear CANT_MOVE bit here, because we really don't want it 4188 to be passed to the next region. */ 4189 CANT_MOVE_BY_LUID (i) = 0; 4190 } 4191 } 4192 4193 VEC_free (sel_insn_data_def, heap, s_i_d); 4194 } 4195 4196 /* A proxy to pass initialization data to init_insn (). */ 4197 static sel_insn_data_def _insn_init_ssid; 4198 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid; 4199 4200 /* If true create a new vinsn. Otherwise use the one from EXPR. */ 4201 static bool insn_init_create_new_vinsn_p; 4202 4203 /* Set all necessary data for initialization of the new insn[s]. */ 4204 static expr_t 4205 set_insn_init (expr_t expr, vinsn_t vi, int seqno) 4206 { 4207 expr_t x = &insn_init_ssid->expr; 4208 4209 copy_expr_onside (x, expr); 4210 if (vi != NULL) 4211 { 4212 insn_init_create_new_vinsn_p = false; 4213 change_vinsn_in_expr (x, vi); 4214 } 4215 else 4216 insn_init_create_new_vinsn_p = true; 4217 4218 insn_init_ssid->seqno = seqno; 4219 return x; 4220 } 4221 4222 /* Init data for INSN. */ 4223 static void 4224 init_insn_data (insn_t insn) 4225 { 4226 expr_t expr; 4227 sel_insn_data_t ssid = insn_init_ssid; 4228 4229 /* The fields mentioned below are special and hence are not being 4230 propagated to the new insns. */ 4231 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL 4232 && !ssid->after_stall_p && ssid->sched_cycle == 0); 4233 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0); 4234 4235 expr = INSN_EXPR (insn); 4236 copy_expr (expr, &ssid->expr); 4237 prepare_insn_expr (insn, ssid->seqno); 4238 4239 if (insn_init_create_new_vinsn_p) 4240 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); 4241 4242 if (first_time_insn_init (insn)) 4243 init_first_time_insn_data (insn); 4244 } 4245 4246 /* This is used to initialize spurious jumps generated by 4247 sel_redirect_edge (). */ 4248 static void 4249 init_simplejump_data (insn_t insn) 4250 { 4251 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, 4252 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false, 4253 false, true); 4254 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn); 4255 init_first_time_insn_data (insn); 4256 } 4257 4258 /* Perform deferred initialization of insns. This is used to process 4259 a new jump that may be created by redirect_edge. */ 4260 void 4261 sel_init_new_insn (insn_t insn, int flags) 4262 { 4263 /* We create data structures for bb when the first insn is emitted in it. */ 4264 if (INSN_P (insn) 4265 && INSN_IN_STREAM_P (insn) 4266 && insn_is_the_only_one_in_bb_p (insn)) 4267 { 4268 extend_bb_info (); 4269 create_initial_data_sets (BLOCK_FOR_INSN (insn)); 4270 } 4271 4272 if (flags & INSN_INIT_TODO_LUID) 4273 { 4274 sched_extend_luids (); 4275 sched_init_insn_luid (insn); 4276 } 4277 4278 if (flags & INSN_INIT_TODO_SSID) 4279 { 4280 extend_insn_data (); 4281 init_insn_data (insn); 4282 clear_expr (&insn_init_ssid->expr); 4283 } 4284 4285 if (flags & INSN_INIT_TODO_SIMPLEJUMP) 4286 { 4287 extend_insn_data (); 4288 init_simplejump_data (insn); 4289 } 4290 4291 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) 4292 == CONTAINING_RGN (BB_TO_BLOCK (0))); 4293 } 4294 4295 4296 /* Functions to init/finish work with lv sets. */ 4297 4298 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */ 4299 static void 4300 init_lv_set (basic_block bb) 4301 { 4302 gcc_assert (!BB_LV_SET_VALID_P (bb)); 4303 4304 BB_LV_SET (bb) = get_regset_from_pool (); 4305 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); 4306 BB_LV_SET_VALID_P (bb) = true; 4307 } 4308 4309 /* Copy liveness information to BB from FROM_BB. */ 4310 static void 4311 copy_lv_set_from (basic_block bb, basic_block from_bb) 4312 { 4313 gcc_assert (!BB_LV_SET_VALID_P (bb)); 4314 4315 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); 4316 BB_LV_SET_VALID_P (bb) = true; 4317 } 4318 4319 /* Initialize lv set of all bb headers. */ 4320 void 4321 init_lv_sets (void) 4322 { 4323 basic_block bb; 4324 4325 /* Initialize of LV sets. */ 4326 FOR_EACH_BB (bb) 4327 init_lv_set (bb); 4328 4329 /* Don't forget EXIT_BLOCK. */ 4330 init_lv_set (EXIT_BLOCK_PTR); 4331 } 4332 4333 /* Release lv set of HEAD. */ 4334 static void 4335 free_lv_set (basic_block bb) 4336 { 4337 gcc_assert (BB_LV_SET (bb) != NULL); 4338 4339 return_regset_to_pool (BB_LV_SET (bb)); 4340 BB_LV_SET (bb) = NULL; 4341 BB_LV_SET_VALID_P (bb) = false; 4342 } 4343 4344 /* Finalize lv sets of all bb headers. */ 4345 void 4346 free_lv_sets (void) 4347 { 4348 basic_block bb; 4349 4350 /* Don't forget EXIT_BLOCK. */ 4351 free_lv_set (EXIT_BLOCK_PTR); 4352 4353 /* Free LV sets. */ 4354 FOR_EACH_BB (bb) 4355 if (BB_LV_SET (bb)) 4356 free_lv_set (bb); 4357 } 4358 4359 /* Mark AV_SET for BB as invalid, so this set will be updated the next time 4360 compute_av() processes BB. This function is called when creating new basic 4361 blocks, as well as for blocks (either new or existing) where new jumps are 4362 created when the control flow is being updated. */ 4363 static void 4364 invalidate_av_set (basic_block bb) 4365 { 4366 BB_AV_LEVEL (bb) = -1; 4367 } 4368 4369 /* Create initial data sets for BB (they will be invalid). */ 4370 static void 4371 create_initial_data_sets (basic_block bb) 4372 { 4373 if (BB_LV_SET (bb)) 4374 BB_LV_SET_VALID_P (bb) = false; 4375 else 4376 BB_LV_SET (bb) = get_regset_from_pool (); 4377 invalidate_av_set (bb); 4378 } 4379 4380 /* Free av set of BB. */ 4381 static void 4382 free_av_set (basic_block bb) 4383 { 4384 av_set_clear (&BB_AV_SET (bb)); 4385 BB_AV_LEVEL (bb) = 0; 4386 } 4387 4388 /* Free data sets of BB. */ 4389 void 4390 free_data_sets (basic_block bb) 4391 { 4392 free_lv_set (bb); 4393 free_av_set (bb); 4394 } 4395 4396 /* Exchange lv sets of TO and FROM. */ 4397 static void 4398 exchange_lv_sets (basic_block to, basic_block from) 4399 { 4400 { 4401 regset to_lv_set = BB_LV_SET (to); 4402 4403 BB_LV_SET (to) = BB_LV_SET (from); 4404 BB_LV_SET (from) = to_lv_set; 4405 } 4406 4407 { 4408 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to); 4409 4410 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); 4411 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p; 4412 } 4413 } 4414 4415 4416 /* Exchange av sets of TO and FROM. */ 4417 static void 4418 exchange_av_sets (basic_block to, basic_block from) 4419 { 4420 { 4421 av_set_t to_av_set = BB_AV_SET (to); 4422 4423 BB_AV_SET (to) = BB_AV_SET (from); 4424 BB_AV_SET (from) = to_av_set; 4425 } 4426 4427 { 4428 int to_av_level = BB_AV_LEVEL (to); 4429 4430 BB_AV_LEVEL (to) = BB_AV_LEVEL (from); 4431 BB_AV_LEVEL (from) = to_av_level; 4432 } 4433 } 4434 4435 /* Exchange data sets of TO and FROM. */ 4436 void 4437 exchange_data_sets (basic_block to, basic_block from) 4438 { 4439 exchange_lv_sets (to, from); 4440 exchange_av_sets (to, from); 4441 } 4442 4443 /* Copy data sets of FROM to TO. */ 4444 void 4445 copy_data_sets (basic_block to, basic_block from) 4446 { 4447 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to)); 4448 gcc_assert (BB_AV_SET (to) == NULL); 4449 4450 BB_AV_LEVEL (to) = BB_AV_LEVEL (from); 4451 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); 4452 4453 if (BB_AV_SET_VALID_P (from)) 4454 { 4455 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from)); 4456 } 4457 if (BB_LV_SET_VALID_P (from)) 4458 { 4459 gcc_assert (BB_LV_SET (to) != NULL); 4460 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from)); 4461 } 4462 } 4463 4464 /* Return an av set for INSN, if any. */ 4465 av_set_t 4466 get_av_set (insn_t insn) 4467 { 4468 av_set_t av_set; 4469 4470 gcc_assert (AV_SET_VALID_P (insn)); 4471 4472 if (sel_bb_head_p (insn)) 4473 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn)); 4474 else 4475 av_set = NULL; 4476 4477 return av_set; 4478 } 4479 4480 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */ 4481 int 4482 get_av_level (insn_t insn) 4483 { 4484 int av_level; 4485 4486 gcc_assert (INSN_P (insn)); 4487 4488 if (sel_bb_head_p (insn)) 4489 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn)); 4490 else 4491 av_level = INSN_WS_LEVEL (insn); 4492 4493 return av_level; 4494 } 4495 4496 4497 4498 /* Variables to work with control-flow graph. */ 4499 4500 /* The basic block that already has been processed by the sched_data_update (), 4501 but hasn't been in sel_add_bb () yet. */ 4502 static VEC (basic_block, heap) *last_added_blocks = NULL; 4503 4504 /* A pool for allocating successor infos. */ 4505 static struct 4506 { 4507 /* A stack for saving succs_info structures. */ 4508 struct succs_info *stack; 4509 4510 /* Its size. */ 4511 int size; 4512 4513 /* Top of the stack. */ 4514 int top; 4515 4516 /* Maximal value of the top. */ 4517 int max_top; 4518 } succs_info_pool; 4519 4520 /* Functions to work with control-flow graph. */ 4521 4522 /* Return basic block note of BB. */ 4523 insn_t 4524 sel_bb_head (basic_block bb) 4525 { 4526 insn_t head; 4527 4528 if (bb == EXIT_BLOCK_PTR) 4529 { 4530 gcc_assert (exit_insn != NULL_RTX); 4531 head = exit_insn; 4532 } 4533 else 4534 { 4535 insn_t note; 4536 4537 note = bb_note (bb); 4538 head = next_nonnote_insn (note); 4539 4540 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb)) 4541 head = NULL_RTX; 4542 } 4543 4544 return head; 4545 } 4546 4547 /* Return true if INSN is a basic block header. */ 4548 bool 4549 sel_bb_head_p (insn_t insn) 4550 { 4551 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn; 4552 } 4553 4554 /* Return last insn of BB. */ 4555 insn_t 4556 sel_bb_end (basic_block bb) 4557 { 4558 if (sel_bb_empty_p (bb)) 4559 return NULL_RTX; 4560 4561 gcc_assert (bb != EXIT_BLOCK_PTR); 4562 4563 return BB_END (bb); 4564 } 4565 4566 /* Return true if INSN is the last insn in its basic block. */ 4567 bool 4568 sel_bb_end_p (insn_t insn) 4569 { 4570 return insn == sel_bb_end (BLOCK_FOR_INSN (insn)); 4571 } 4572 4573 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */ 4574 bool 4575 sel_bb_empty_p (basic_block bb) 4576 { 4577 return sel_bb_head (bb) == NULL; 4578 } 4579 4580 /* True when BB belongs to the current scheduling region. */ 4581 bool 4582 in_current_region_p (basic_block bb) 4583 { 4584 if (bb->index < NUM_FIXED_BLOCKS) 4585 return false; 4586 4587 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0)); 4588 } 4589 4590 /* Return the block which is a fallthru bb of a conditional jump JUMP. */ 4591 basic_block 4592 fallthru_bb_of_jump (rtx jump) 4593 { 4594 if (!JUMP_P (jump)) 4595 return NULL; 4596 4597 if (!any_condjump_p (jump)) 4598 return NULL; 4599 4600 /* A basic block that ends with a conditional jump may still have one successor 4601 (and be followed by a barrier), we are not interested. */ 4602 if (single_succ_p (BLOCK_FOR_INSN (jump))) 4603 return NULL; 4604 4605 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; 4606 } 4607 4608 /* Remove all notes from BB. */ 4609 static void 4610 init_bb (basic_block bb) 4611 { 4612 remove_notes (bb_note (bb), BB_END (bb)); 4613 BB_NOTE_LIST (bb) = note_list; 4614 } 4615 4616 void 4617 sel_init_bbs (bb_vec_t bbs) 4618 { 4619 const struct sched_scan_info_def ssi = 4620 { 4621 extend_bb_info, /* extend_bb */ 4622 init_bb, /* init_bb */ 4623 NULL, /* extend_insn */ 4624 NULL /* init_insn */ 4625 }; 4626 4627 sched_scan (&ssi, bbs); 4628 } 4629 4630 /* Restore notes for the whole region. */ 4631 static void 4632 sel_restore_notes (void) 4633 { 4634 int bb; 4635 insn_t insn; 4636 4637 for (bb = 0; bb < current_nr_blocks; bb++) 4638 { 4639 basic_block first, last; 4640 4641 first = EBB_FIRST_BB (bb); 4642 last = EBB_LAST_BB (bb)->next_bb; 4643 4644 do 4645 { 4646 note_list = BB_NOTE_LIST (first); 4647 restore_other_notes (NULL, first); 4648 BB_NOTE_LIST (first) = NULL_RTX; 4649 4650 FOR_BB_INSNS (first, insn) 4651 if (NONDEBUG_INSN_P (insn)) 4652 reemit_notes (insn); 4653 4654 first = first->next_bb; 4655 } 4656 while (first != last); 4657 } 4658 } 4659 4660 /* Free per-bb data structures. */ 4661 void 4662 sel_finish_bbs (void) 4663 { 4664 sel_restore_notes (); 4665 4666 /* Remove current loop preheader from this loop. */ 4667 if (current_loop_nest) 4668 sel_remove_loop_preheader (); 4669 4670 finish_region_bb_info (); 4671 } 4672 4673 /* Return true if INSN has a single successor of type FLAGS. */ 4674 bool 4675 sel_insn_has_single_succ_p (insn_t insn, int flags) 4676 { 4677 insn_t succ; 4678 succ_iterator si; 4679 bool first_p = true; 4680 4681 FOR_EACH_SUCC_1 (succ, si, insn, flags) 4682 { 4683 if (first_p) 4684 first_p = false; 4685 else 4686 return false; 4687 } 4688 4689 return true; 4690 } 4691 4692 /* Allocate successor's info. */ 4693 static struct succs_info * 4694 alloc_succs_info (void) 4695 { 4696 if (succs_info_pool.top == succs_info_pool.max_top) 4697 { 4698 int i; 4699 4700 if (++succs_info_pool.max_top >= succs_info_pool.size) 4701 gcc_unreachable (); 4702 4703 i = ++succs_info_pool.top; 4704 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10); 4705 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10); 4706 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10); 4707 } 4708 else 4709 succs_info_pool.top++; 4710 4711 return &succs_info_pool.stack[succs_info_pool.top]; 4712 } 4713 4714 /* Free successor's info. */ 4715 void 4716 free_succs_info (struct succs_info * sinfo) 4717 { 4718 gcc_assert (succs_info_pool.top >= 0 4719 && &succs_info_pool.stack[succs_info_pool.top] == sinfo); 4720 succs_info_pool.top--; 4721 4722 /* Clear stale info. */ 4723 VEC_block_remove (rtx, sinfo->succs_ok, 4724 0, VEC_length (rtx, sinfo->succs_ok)); 4725 VEC_block_remove (rtx, sinfo->succs_other, 4726 0, VEC_length (rtx, sinfo->succs_other)); 4727 VEC_block_remove (int, sinfo->probs_ok, 4728 0, VEC_length (int, sinfo->probs_ok)); 4729 sinfo->all_prob = 0; 4730 sinfo->succs_ok_n = 0; 4731 sinfo->all_succs_n = 0; 4732 } 4733 4734 /* Compute successor info for INSN. FLAGS are the flags passed 4735 to the FOR_EACH_SUCC_1 iterator. */ 4736 struct succs_info * 4737 compute_succs_info (insn_t insn, short flags) 4738 { 4739 succ_iterator si; 4740 insn_t succ; 4741 struct succs_info *sinfo = alloc_succs_info (); 4742 4743 /* Traverse *all* successors and decide what to do with each. */ 4744 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) 4745 { 4746 /* FIXME: this doesn't work for skipping to loop exits, as we don't 4747 perform code motion through inner loops. */ 4748 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS; 4749 4750 if (current_flags & flags) 4751 { 4752 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ); 4753 VEC_safe_push (int, heap, sinfo->probs_ok, 4754 /* FIXME: Improve calculation when skipping 4755 inner loop to exits. */ 4756 (si.bb_end 4757 ? si.e1->probability 4758 : REG_BR_PROB_BASE)); 4759 sinfo->succs_ok_n++; 4760 } 4761 else 4762 VEC_safe_push (rtx, heap, sinfo->succs_other, succ); 4763 4764 /* Compute all_prob. */ 4765 if (!si.bb_end) 4766 sinfo->all_prob = REG_BR_PROB_BASE; 4767 else 4768 sinfo->all_prob += si.e1->probability; 4769 4770 sinfo->all_succs_n++; 4771 } 4772 4773 return sinfo; 4774 } 4775 4776 /* Return the predecessors of BB in PREDS and their number in N. 4777 Empty blocks are skipped. SIZE is used to allocate PREDS. */ 4778 static void 4779 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) 4780 { 4781 edge e; 4782 edge_iterator ei; 4783 4784 gcc_assert (BLOCK_TO_BB (bb->index) != 0); 4785 4786 FOR_EACH_EDGE (e, ei, bb->preds) 4787 { 4788 basic_block pred_bb = e->src; 4789 insn_t bb_end = BB_END (pred_bb); 4790 4791 if (!in_current_region_p (pred_bb)) 4792 { 4793 gcc_assert (flag_sel_sched_pipelining_outer_loops 4794 && current_loop_nest); 4795 continue; 4796 } 4797 4798 if (sel_bb_empty_p (pred_bb)) 4799 cfg_preds_1 (pred_bb, preds, n, size); 4800 else 4801 { 4802 if (*n == *size) 4803 *preds = XRESIZEVEC (insn_t, *preds, 4804 (*size = 2 * *size + 1)); 4805 (*preds)[(*n)++] = bb_end; 4806 } 4807 } 4808 4809 gcc_assert (*n != 0 4810 || (flag_sel_sched_pipelining_outer_loops 4811 && current_loop_nest)); 4812 } 4813 4814 /* Find all predecessors of BB and record them in PREDS and their number 4815 in N. Empty blocks are skipped, and only normal (forward in-region) 4816 edges are processed. */ 4817 static void 4818 cfg_preds (basic_block bb, insn_t **preds, int *n) 4819 { 4820 int size = 0; 4821 4822 *preds = NULL; 4823 *n = 0; 4824 cfg_preds_1 (bb, preds, n, &size); 4825 } 4826 4827 /* Returns true if we are moving INSN through join point. */ 4828 bool 4829 sel_num_cfg_preds_gt_1 (insn_t insn) 4830 { 4831 basic_block bb; 4832 4833 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0) 4834 return false; 4835 4836 bb = BLOCK_FOR_INSN (insn); 4837 4838 while (1) 4839 { 4840 if (EDGE_COUNT (bb->preds) > 1) 4841 return true; 4842 4843 gcc_assert (EDGE_PRED (bb, 0)->dest == bb); 4844 bb = EDGE_PRED (bb, 0)->src; 4845 4846 if (!sel_bb_empty_p (bb)) 4847 break; 4848 } 4849 4850 return false; 4851 } 4852 4853 /* Returns true when BB should be the end of an ebb. Adapted from the 4854 code in sched-ebb.c. */ 4855 bool 4856 bb_ends_ebb_p (basic_block bb) 4857 { 4858 basic_block next_bb = bb_next_bb (bb); 4859 edge e; 4860 4861 if (next_bb == EXIT_BLOCK_PTR 4862 || bitmap_bit_p (forced_ebb_heads, next_bb->index) 4863 || (LABEL_P (BB_HEAD (next_bb)) 4864 /* NB: LABEL_NUSES () is not maintained outside of jump.c. 4865 Work around that. */ 4866 && !single_pred_p (next_bb))) 4867 return true; 4868 4869 if (!in_current_region_p (next_bb)) 4870 return true; 4871 4872 e = find_fallthru_edge (bb->succs); 4873 if (e) 4874 { 4875 gcc_assert (e->dest == next_bb); 4876 4877 return false; 4878 } 4879 4880 return true; 4881 } 4882 4883 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a 4884 successor of INSN. */ 4885 bool 4886 in_same_ebb_p (insn_t insn, insn_t succ) 4887 { 4888 basic_block ptr = BLOCK_FOR_INSN (insn); 4889 4890 for(;;) 4891 { 4892 if (ptr == BLOCK_FOR_INSN (succ)) 4893 return true; 4894 4895 if (bb_ends_ebb_p (ptr)) 4896 return false; 4897 4898 ptr = bb_next_bb (ptr); 4899 } 4900 4901 gcc_unreachable (); 4902 return false; 4903 } 4904 4905 /* Recomputes the reverse topological order for the function and 4906 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also 4907 modified appropriately. */ 4908 static void 4909 recompute_rev_top_order (void) 4910 { 4911 int *postorder; 4912 int n_blocks, i; 4913 4914 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block) 4915 { 4916 rev_top_order_index_len = last_basic_block; 4917 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index, 4918 rev_top_order_index_len); 4919 } 4920 4921 postorder = XNEWVEC (int, n_basic_blocks); 4922 4923 n_blocks = post_order_compute (postorder, true, false); 4924 gcc_assert (n_basic_blocks == n_blocks); 4925 4926 /* Build reverse function: for each basic block with BB->INDEX == K 4927 rev_top_order_index[K] is it's reverse topological sort number. */ 4928 for (i = 0; i < n_blocks; i++) 4929 { 4930 gcc_assert (postorder[i] < rev_top_order_index_len); 4931 rev_top_order_index[postorder[i]] = i; 4932 } 4933 4934 free (postorder); 4935 } 4936 4937 /* Clear all flags from insns in BB that could spoil its rescheduling. */ 4938 void 4939 clear_outdated_rtx_info (basic_block bb) 4940 { 4941 rtx insn; 4942 4943 FOR_BB_INSNS (bb, insn) 4944 if (INSN_P (insn)) 4945 { 4946 SCHED_GROUP_P (insn) = 0; 4947 INSN_AFTER_STALL_P (insn) = 0; 4948 INSN_SCHED_TIMES (insn) = 0; 4949 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0; 4950 4951 /* We cannot use the changed caches, as previously we could ignore 4952 the LHS dependence due to enabled renaming and transform 4953 the expression, and currently we'll be unable to do this. */ 4954 htab_empty (INSN_TRANSFORMED_INSNS (insn)); 4955 } 4956 } 4957 4958 /* Add BB_NOTE to the pool of available basic block notes. */ 4959 static void 4960 return_bb_to_pool (basic_block bb) 4961 { 4962 rtx note = bb_note (bb); 4963 4964 gcc_assert (NOTE_BASIC_BLOCK (note) == bb 4965 && bb->aux == NULL); 4966 4967 /* It turns out that current cfg infrastructure does not support 4968 reuse of basic blocks. Don't bother for now. */ 4969 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/ 4970 } 4971 4972 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */ 4973 static rtx 4974 get_bb_note_from_pool (void) 4975 { 4976 if (VEC_empty (rtx, bb_note_pool)) 4977 return NULL_RTX; 4978 else 4979 { 4980 rtx note = VEC_pop (rtx, bb_note_pool); 4981 4982 PREV_INSN (note) = NULL_RTX; 4983 NEXT_INSN (note) = NULL_RTX; 4984 4985 return note; 4986 } 4987 } 4988 4989 /* Free bb_note_pool. */ 4990 void 4991 free_bb_note_pool (void) 4992 { 4993 VEC_free (rtx, heap, bb_note_pool); 4994 } 4995 4996 /* Setup scheduler pool and successor structure. */ 4997 void 4998 alloc_sched_pools (void) 4999 { 5000 int succs_size; 5001 5002 succs_size = MAX_WS + 1; 5003 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); 5004 succs_info_pool.size = succs_size; 5005 succs_info_pool.top = -1; 5006 succs_info_pool.max_top = -1; 5007 5008 sched_lists_pool = create_alloc_pool ("sel-sched-lists", 5009 sizeof (struct _list_node), 500); 5010 } 5011 5012 /* Free the pools. */ 5013 void 5014 free_sched_pools (void) 5015 { 5016 int i; 5017 5018 free_alloc_pool (sched_lists_pool); 5019 gcc_assert (succs_info_pool.top == -1); 5020 for (i = 0; i < succs_info_pool.max_top; i++) 5021 { 5022 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok); 5023 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other); 5024 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok); 5025 } 5026 free (succs_info_pool.stack); 5027 } 5028 5029 5030 /* Returns a position in RGN where BB can be inserted retaining 5031 topological order. */ 5032 static int 5033 find_place_to_insert_bb (basic_block bb, int rgn) 5034 { 5035 bool has_preds_outside_rgn = false; 5036 edge e; 5037 edge_iterator ei; 5038 5039 /* Find whether we have preds outside the region. */ 5040 FOR_EACH_EDGE (e, ei, bb->preds) 5041 if (!in_current_region_p (e->src)) 5042 { 5043 has_preds_outside_rgn = true; 5044 break; 5045 } 5046 5047 /* Recompute the top order -- needed when we have > 1 pred 5048 and in case we don't have preds outside. */ 5049 if (flag_sel_sched_pipelining_outer_loops 5050 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1)) 5051 { 5052 int i, bbi = bb->index, cur_bbi; 5053 5054 recompute_rev_top_order (); 5055 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--) 5056 { 5057 cur_bbi = BB_TO_BLOCK (i); 5058 if (rev_top_order_index[bbi] 5059 < rev_top_order_index[cur_bbi]) 5060 break; 5061 } 5062 5063 /* We skipped the right block, so we increase i. We accomodate 5064 it for increasing by step later, so we decrease i. */ 5065 return (i + 1) - 1; 5066 } 5067 else if (has_preds_outside_rgn) 5068 { 5069 /* This is the case when we generate an extra empty block 5070 to serve as region head during pipelining. */ 5071 e = EDGE_SUCC (bb, 0); 5072 gcc_assert (EDGE_COUNT (bb->succs) == 1 5073 && in_current_region_p (EDGE_SUCC (bb, 0)->dest) 5074 && (BLOCK_TO_BB (e->dest->index) == 0)); 5075 return -1; 5076 } 5077 5078 /* We don't have preds outside the region. We should have 5079 the only pred, because the multiple preds case comes from 5080 the pipelining of outer loops, and that is handled above. 5081 Just take the bbi of this single pred. */ 5082 if (EDGE_COUNT (bb->succs) > 0) 5083 { 5084 int pred_bbi; 5085 5086 gcc_assert (EDGE_COUNT (bb->preds) == 1); 5087 5088 pred_bbi = EDGE_PRED (bb, 0)->src->index; 5089 return BLOCK_TO_BB (pred_bbi); 5090 } 5091 else 5092 /* BB has no successors. It is safe to put it in the end. */ 5093 return current_nr_blocks - 1; 5094 } 5095 5096 /* Deletes an empty basic block freeing its data. */ 5097 static void 5098 delete_and_free_basic_block (basic_block bb) 5099 { 5100 gcc_assert (sel_bb_empty_p (bb)); 5101 5102 if (BB_LV_SET (bb)) 5103 free_lv_set (bb); 5104 5105 bitmap_clear_bit (blocks_to_reschedule, bb->index); 5106 5107 /* Can't assert av_set properties because we use sel_aremove_bb 5108 when removing loop preheader from the region. At the point of 5109 removing the preheader we already have deallocated sel_region_bb_info. */ 5110 gcc_assert (BB_LV_SET (bb) == NULL 5111 && !BB_LV_SET_VALID_P (bb) 5112 && BB_AV_LEVEL (bb) == 0 5113 && BB_AV_SET (bb) == NULL); 5114 5115 delete_basic_block (bb); 5116 } 5117 5118 /* Add BB to the current region and update the region data. */ 5119 static void 5120 add_block_to_current_region (basic_block bb) 5121 { 5122 int i, pos, bbi = -2, rgn; 5123 5124 rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 5125 bbi = find_place_to_insert_bb (bb, rgn); 5126 bbi += 1; 5127 pos = RGN_BLOCKS (rgn) + bbi; 5128 5129 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 5130 && ebb_head[bbi] == pos); 5131 5132 /* Make a place for the new block. */ 5133 extend_regions (); 5134 5135 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) 5136 BLOCK_TO_BB (rgn_bb_table[i])++; 5137 5138 memmove (rgn_bb_table + pos + 1, 5139 rgn_bb_table + pos, 5140 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); 5141 5142 /* Initialize data for BB. */ 5143 rgn_bb_table[pos] = bb->index; 5144 BLOCK_TO_BB (bb->index) = bbi; 5145 CONTAINING_RGN (bb->index) = rgn; 5146 5147 RGN_NR_BLOCKS (rgn)++; 5148 5149 for (i = rgn + 1; i <= nr_regions; i++) 5150 RGN_BLOCKS (i)++; 5151 } 5152 5153 /* Remove BB from the current region and update the region data. */ 5154 static void 5155 remove_bb_from_region (basic_block bb) 5156 { 5157 int i, pos, bbi = -2, rgn; 5158 5159 rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 5160 bbi = BLOCK_TO_BB (bb->index); 5161 pos = RGN_BLOCKS (rgn) + bbi; 5162 5163 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 5164 && ebb_head[bbi] == pos); 5165 5166 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) 5167 BLOCK_TO_BB (rgn_bb_table[i])--; 5168 5169 memmove (rgn_bb_table + pos, 5170 rgn_bb_table + pos + 1, 5171 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); 5172 5173 RGN_NR_BLOCKS (rgn)--; 5174 for (i = rgn + 1; i <= nr_regions; i++) 5175 RGN_BLOCKS (i)--; 5176 } 5177 5178 /* Add BB to the current region and update all data. If BB is NULL, add all 5179 blocks from last_added_blocks vector. */ 5180 static void 5181 sel_add_bb (basic_block bb) 5182 { 5183 /* Extend luids so that new notes will receive zero luids. */ 5184 sched_extend_luids (); 5185 sched_init_bbs (); 5186 sel_init_bbs (last_added_blocks); 5187 5188 /* When bb is passed explicitly, the vector should contain 5189 the only element that equals to bb; otherwise, the vector 5190 should not be NULL. */ 5191 gcc_assert (last_added_blocks != NULL); 5192 5193 if (bb != NULL) 5194 { 5195 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1 5196 && VEC_index (basic_block, 5197 last_added_blocks, 0) == bb); 5198 add_block_to_current_region (bb); 5199 5200 /* We associate creating/deleting data sets with the first insn 5201 appearing / disappearing in the bb. */ 5202 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL) 5203 create_initial_data_sets (bb); 5204 5205 VEC_free (basic_block, heap, last_added_blocks); 5206 } 5207 else 5208 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */ 5209 { 5210 int i; 5211 basic_block temp_bb = NULL; 5212 5213 for (i = 0; 5214 VEC_iterate (basic_block, last_added_blocks, i, bb); i++) 5215 { 5216 add_block_to_current_region (bb); 5217 temp_bb = bb; 5218 } 5219 5220 /* We need to fetch at least one bb so we know the region 5221 to update. */ 5222 gcc_assert (temp_bb != NULL); 5223 bb = temp_bb; 5224 5225 VEC_free (basic_block, heap, last_added_blocks); 5226 } 5227 5228 rgn_setup_region (CONTAINING_RGN (bb->index)); 5229 } 5230 5231 /* Remove BB from the current region and update all data. 5232 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */ 5233 static void 5234 sel_remove_bb (basic_block bb, bool remove_from_cfg_p) 5235 { 5236 unsigned idx = bb->index; 5237 5238 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); 5239 5240 remove_bb_from_region (bb); 5241 return_bb_to_pool (bb); 5242 bitmap_clear_bit (blocks_to_reschedule, idx); 5243 5244 if (remove_from_cfg_p) 5245 { 5246 basic_block succ = single_succ (bb); 5247 delete_and_free_basic_block (bb); 5248 set_immediate_dominator (CDI_DOMINATORS, succ, 5249 recompute_dominator (CDI_DOMINATORS, succ)); 5250 } 5251 5252 rgn_setup_region (CONTAINING_RGN (idx)); 5253 } 5254 5255 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */ 5256 static void 5257 move_bb_info (basic_block merge_bb, basic_block empty_bb) 5258 { 5259 if (in_current_region_p (merge_bb)) 5260 concat_note_lists (BB_NOTE_LIST (empty_bb), 5261 &BB_NOTE_LIST (merge_bb)); 5262 BB_NOTE_LIST (empty_bb) = NULL_RTX; 5263 5264 } 5265 5266 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from 5267 region, but keep it in CFG. */ 5268 static void 5269 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p) 5270 { 5271 /* The block should contain just a note or a label. 5272 We try to check whether it is unused below. */ 5273 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb) 5274 || LABEL_P (BB_HEAD (empty_bb))); 5275 5276 /* If basic block has predecessors or successors, redirect them. */ 5277 if (remove_from_cfg_p 5278 && (EDGE_COUNT (empty_bb->preds) > 0 5279 || EDGE_COUNT (empty_bb->succs) > 0)) 5280 { 5281 basic_block pred; 5282 basic_block succ; 5283 5284 /* We need to init PRED and SUCC before redirecting edges. */ 5285 if (EDGE_COUNT (empty_bb->preds) > 0) 5286 { 5287 edge e; 5288 5289 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1); 5290 5291 e = EDGE_PRED (empty_bb, 0); 5292 gcc_assert (e->src == empty_bb->prev_bb 5293 && (e->flags & EDGE_FALLTHRU)); 5294 5295 pred = empty_bb->prev_bb; 5296 } 5297 else 5298 pred = NULL; 5299 5300 if (EDGE_COUNT (empty_bb->succs) > 0) 5301 { 5302 /* We do not check fallthruness here as above, because 5303 after removing a jump the edge may actually be not fallthru. */ 5304 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1); 5305 succ = EDGE_SUCC (empty_bb, 0)->dest; 5306 } 5307 else 5308 succ = NULL; 5309 5310 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL) 5311 { 5312 edge e = EDGE_PRED (empty_bb, 0); 5313 5314 if (e->flags & EDGE_FALLTHRU) 5315 redirect_edge_succ_nodup (e, succ); 5316 else 5317 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ); 5318 } 5319 5320 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL) 5321 { 5322 edge e = EDGE_SUCC (empty_bb, 0); 5323 5324 if (find_edge (pred, e->dest) == NULL) 5325 redirect_edge_pred (e, pred); 5326 } 5327 } 5328 5329 /* Finish removing. */ 5330 sel_remove_bb (empty_bb, remove_from_cfg_p); 5331 } 5332 5333 /* An implementation of create_basic_block hook, which additionally updates 5334 per-bb data structures. */ 5335 static basic_block 5336 sel_create_basic_block (void *headp, void *endp, basic_block after) 5337 { 5338 basic_block new_bb; 5339 insn_t new_bb_note; 5340 5341 gcc_assert (flag_sel_sched_pipelining_outer_loops 5342 || last_added_blocks == NULL); 5343 5344 new_bb_note = get_bb_note_from_pool (); 5345 5346 if (new_bb_note == NULL_RTX) 5347 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after); 5348 else 5349 { 5350 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp, 5351 new_bb_note, after); 5352 new_bb->aux = NULL; 5353 } 5354 5355 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb); 5356 5357 return new_bb; 5358 } 5359 5360 /* Implement sched_init_only_bb (). */ 5361 static void 5362 sel_init_only_bb (basic_block bb, basic_block after) 5363 { 5364 gcc_assert (after == NULL); 5365 5366 extend_regions (); 5367 rgn_make_new_region_out_of_new_block (bb); 5368 } 5369 5370 /* Update the latch when we've splitted or merged it from FROM block to TO. 5371 This should be checked for all outer loops, too. */ 5372 static void 5373 change_loops_latches (basic_block from, basic_block to) 5374 { 5375 gcc_assert (from != to); 5376 5377 if (current_loop_nest) 5378 { 5379 struct loop *loop; 5380 5381 for (loop = current_loop_nest; loop; loop = loop_outer (loop)) 5382 if (considered_for_pipelining_p (loop) && loop->latch == from) 5383 { 5384 gcc_assert (loop == current_loop_nest); 5385 loop->latch = to; 5386 gcc_assert (loop_latch_edge (loop)); 5387 } 5388 } 5389 } 5390 5391 /* Splits BB on two basic blocks, adding it to the region and extending 5392 per-bb data structures. Returns the newly created bb. */ 5393 static basic_block 5394 sel_split_block (basic_block bb, rtx after) 5395 { 5396 basic_block new_bb; 5397 insn_t insn; 5398 5399 new_bb = sched_split_block_1 (bb, after); 5400 sel_add_bb (new_bb); 5401 5402 /* This should be called after sel_add_bb, because this uses 5403 CONTAINING_RGN for the new block, which is not yet initialized. 5404 FIXME: this function may be a no-op now. */ 5405 change_loops_latches (bb, new_bb); 5406 5407 /* Update ORIG_BB_INDEX for insns moved into the new block. */ 5408 FOR_BB_INSNS (new_bb, insn) 5409 if (INSN_P (insn)) 5410 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index; 5411 5412 if (sel_bb_empty_p (bb)) 5413 { 5414 gcc_assert (!sel_bb_empty_p (new_bb)); 5415 5416 /* NEW_BB has data sets that need to be updated and BB holds 5417 data sets that should be removed. Exchange these data sets 5418 so that we won't lose BB's valid data sets. */ 5419 exchange_data_sets (new_bb, bb); 5420 free_data_sets (bb); 5421 } 5422 5423 if (!sel_bb_empty_p (new_bb) 5424 && bitmap_bit_p (blocks_to_reschedule, bb->index)) 5425 bitmap_set_bit (blocks_to_reschedule, new_bb->index); 5426 5427 return new_bb; 5428 } 5429 5430 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it. 5431 Otherwise returns NULL. */ 5432 static rtx 5433 check_for_new_jump (basic_block bb, int prev_max_uid) 5434 { 5435 rtx end; 5436 5437 end = sel_bb_end (bb); 5438 if (end && INSN_UID (end) >= prev_max_uid) 5439 return end; 5440 return NULL; 5441 } 5442 5443 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. 5444 New means having UID at least equal to PREV_MAX_UID. */ 5445 static rtx 5446 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) 5447 { 5448 rtx jump; 5449 5450 /* Return immediately if no new insns were emitted. */ 5451 if (get_max_uid () == prev_max_uid) 5452 return NULL; 5453 5454 /* Now check both blocks for new jumps. It will ever be only one. */ 5455 if ((jump = check_for_new_jump (from, prev_max_uid))) 5456 return jump; 5457 5458 if (jump_bb != NULL 5459 && (jump = check_for_new_jump (jump_bb, prev_max_uid))) 5460 return jump; 5461 return NULL; 5462 } 5463 5464 /* Splits E and adds the newly created basic block to the current region. 5465 Returns this basic block. */ 5466 basic_block 5467 sel_split_edge (edge e) 5468 { 5469 basic_block new_bb, src, other_bb = NULL; 5470 int prev_max_uid; 5471 rtx jump; 5472 5473 src = e->src; 5474 prev_max_uid = get_max_uid (); 5475 new_bb = split_edge (e); 5476 5477 if (flag_sel_sched_pipelining_outer_loops 5478 && current_loop_nest) 5479 { 5480 int i; 5481 basic_block bb; 5482 5483 /* Some of the basic blocks might not have been added to the loop. 5484 Add them here, until this is fixed in force_fallthru. */ 5485 for (i = 0; 5486 VEC_iterate (basic_block, last_added_blocks, i, bb); i++) 5487 if (!bb->loop_father) 5488 { 5489 add_bb_to_loop (bb, e->dest->loop_father); 5490 5491 gcc_assert (!other_bb && (new_bb->index != bb->index)); 5492 other_bb = bb; 5493 } 5494 } 5495 5496 /* Add all last_added_blocks to the region. */ 5497 sel_add_bb (NULL); 5498 5499 jump = find_new_jump (src, new_bb, prev_max_uid); 5500 if (jump) 5501 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5502 5503 /* Put the correct lv set on this block. */ 5504 if (other_bb && !sel_bb_empty_p (other_bb)) 5505 compute_live (sel_bb_head (other_bb)); 5506 5507 return new_bb; 5508 } 5509 5510 /* Implement sched_create_empty_bb (). */ 5511 static basic_block 5512 sel_create_empty_bb (basic_block after) 5513 { 5514 basic_block new_bb; 5515 5516 new_bb = sched_create_empty_bb_1 (after); 5517 5518 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit 5519 later. */ 5520 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1 5521 && VEC_index (basic_block, last_added_blocks, 0) == new_bb); 5522 5523 VEC_free (basic_block, heap, last_added_blocks); 5524 return new_bb; 5525 } 5526 5527 /* Implement sched_create_recovery_block. ORIG_INSN is where block 5528 will be splitted to insert a check. */ 5529 basic_block 5530 sel_create_recovery_block (insn_t orig_insn) 5531 { 5532 basic_block first_bb, second_bb, recovery_block; 5533 basic_block before_recovery = NULL; 5534 rtx jump; 5535 5536 first_bb = BLOCK_FOR_INSN (orig_insn); 5537 if (sel_bb_end_p (orig_insn)) 5538 { 5539 /* Avoid introducing an empty block while splitting. */ 5540 gcc_assert (single_succ_p (first_bb)); 5541 second_bb = single_succ (first_bb); 5542 } 5543 else 5544 second_bb = sched_split_block (first_bb, orig_insn); 5545 5546 recovery_block = sched_create_recovery_block (&before_recovery); 5547 if (before_recovery) 5548 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR); 5549 5550 gcc_assert (sel_bb_empty_p (recovery_block)); 5551 sched_create_recovery_edges (first_bb, recovery_block, second_bb); 5552 if (current_loops != NULL) 5553 add_bb_to_loop (recovery_block, first_bb->loop_father); 5554 5555 sel_add_bb (recovery_block); 5556 5557 jump = BB_END (recovery_block); 5558 gcc_assert (sel_bb_head (recovery_block) == jump); 5559 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5560 5561 return recovery_block; 5562 } 5563 5564 /* Merge basic block B into basic block A. */ 5565 static void 5566 sel_merge_blocks (basic_block a, basic_block b) 5567 { 5568 gcc_assert (sel_bb_empty_p (b) 5569 && EDGE_COUNT (b->preds) == 1 5570 && EDGE_PRED (b, 0)->src == b->prev_bb); 5571 5572 move_bb_info (b->prev_bb, b); 5573 remove_empty_bb (b, false); 5574 merge_blocks (a, b); 5575 change_loops_latches (b, a); 5576 } 5577 5578 /* A wrapper for redirect_edge_and_branch_force, which also initializes 5579 data structures for possibly created bb and insns. Returns the newly 5580 added bb or NULL, when a bb was not needed. */ 5581 void 5582 sel_redirect_edge_and_branch_force (edge e, basic_block to) 5583 { 5584 basic_block jump_bb, src, orig_dest = e->dest; 5585 int prev_max_uid; 5586 rtx jump; 5587 5588 /* This function is now used only for bookkeeping code creation, where 5589 we'll never get the single pred of orig_dest block and thus will not 5590 hit unreachable blocks when updating dominator info. */ 5591 gcc_assert (!sel_bb_empty_p (e->src) 5592 && !single_pred_p (orig_dest)); 5593 src = e->src; 5594 prev_max_uid = get_max_uid (); 5595 jump_bb = redirect_edge_and_branch_force (e, to); 5596 5597 if (jump_bb != NULL) 5598 sel_add_bb (jump_bb); 5599 5600 /* This function could not be used to spoil the loop structure by now, 5601 thus we don't care to update anything. But check it to be sure. */ 5602 if (current_loop_nest 5603 && pipelining_p) 5604 gcc_assert (loop_latch_edge (current_loop_nest)); 5605 5606 jump = find_new_jump (src, jump_bb, prev_max_uid); 5607 if (jump) 5608 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5609 set_immediate_dominator (CDI_DOMINATORS, to, 5610 recompute_dominator (CDI_DOMINATORS, to)); 5611 set_immediate_dominator (CDI_DOMINATORS, orig_dest, 5612 recompute_dominator (CDI_DOMINATORS, orig_dest)); 5613 } 5614 5615 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by 5616 redirected edge are in reverse topological order. */ 5617 bool 5618 sel_redirect_edge_and_branch (edge e, basic_block to) 5619 { 5620 bool latch_edge_p; 5621 basic_block src, orig_dest = e->dest; 5622 int prev_max_uid; 5623 rtx jump; 5624 edge redirected; 5625 bool recompute_toporder_p = false; 5626 bool maybe_unreachable = single_pred_p (orig_dest); 5627 5628 latch_edge_p = (pipelining_p 5629 && current_loop_nest 5630 && e == loop_latch_edge (current_loop_nest)); 5631 5632 src = e->src; 5633 prev_max_uid = get_max_uid (); 5634 5635 redirected = redirect_edge_and_branch (e, to); 5636 5637 gcc_assert (redirected && last_added_blocks == NULL); 5638 5639 /* When we've redirected a latch edge, update the header. */ 5640 if (latch_edge_p) 5641 { 5642 current_loop_nest->header = to; 5643 gcc_assert (loop_latch_edge (current_loop_nest)); 5644 } 5645 5646 /* In rare situations, the topological relation between the blocks connected 5647 by the redirected edge can change (see PR42245 for an example). Update 5648 block_to_bb/bb_to_block. */ 5649 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index) 5650 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index)) 5651 recompute_toporder_p = true; 5652 5653 jump = find_new_jump (src, NULL, prev_max_uid); 5654 if (jump) 5655 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5656 5657 /* Only update dominator info when we don't have unreachable blocks. 5658 Otherwise we'll update in maybe_tidy_empty_bb. */ 5659 if (!maybe_unreachable) 5660 { 5661 set_immediate_dominator (CDI_DOMINATORS, to, 5662 recompute_dominator (CDI_DOMINATORS, to)); 5663 set_immediate_dominator (CDI_DOMINATORS, orig_dest, 5664 recompute_dominator (CDI_DOMINATORS, orig_dest)); 5665 } 5666 return recompute_toporder_p; 5667 } 5668 5669 /* This variable holds the cfg hooks used by the selective scheduler. */ 5670 static struct cfg_hooks sel_cfg_hooks; 5671 5672 /* Register sel-sched cfg hooks. */ 5673 void 5674 sel_register_cfg_hooks (void) 5675 { 5676 sched_split_block = sel_split_block; 5677 5678 orig_cfg_hooks = get_cfg_hooks (); 5679 sel_cfg_hooks = orig_cfg_hooks; 5680 5681 sel_cfg_hooks.create_basic_block = sel_create_basic_block; 5682 5683 set_cfg_hooks (sel_cfg_hooks); 5684 5685 sched_init_only_bb = sel_init_only_bb; 5686 sched_split_block = sel_split_block; 5687 sched_create_empty_bb = sel_create_empty_bb; 5688 } 5689 5690 /* Unregister sel-sched cfg hooks. */ 5691 void 5692 sel_unregister_cfg_hooks (void) 5693 { 5694 sched_create_empty_bb = NULL; 5695 sched_split_block = NULL; 5696 sched_init_only_bb = NULL; 5697 5698 set_cfg_hooks (orig_cfg_hooks); 5699 } 5700 5701 5702 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted, 5703 LABEL is where this jump should be directed. */ 5704 rtx 5705 create_insn_rtx_from_pattern (rtx pattern, rtx label) 5706 { 5707 rtx insn_rtx; 5708 5709 gcc_assert (!INSN_P (pattern)); 5710 5711 start_sequence (); 5712 5713 if (label == NULL_RTX) 5714 insn_rtx = emit_insn (pattern); 5715 else if (DEBUG_INSN_P (label)) 5716 insn_rtx = emit_debug_insn (pattern); 5717 else 5718 { 5719 insn_rtx = emit_jump_insn (pattern); 5720 JUMP_LABEL (insn_rtx) = label; 5721 ++LABEL_NUSES (label); 5722 } 5723 5724 end_sequence (); 5725 5726 sched_extend_luids (); 5727 sched_extend_target (); 5728 sched_deps_init (false); 5729 5730 /* Initialize INSN_CODE now. */ 5731 recog_memoized (insn_rtx); 5732 return insn_rtx; 5733 } 5734 5735 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn 5736 must not be clonable. */ 5737 vinsn_t 5738 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p) 5739 { 5740 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx)); 5741 5742 /* If VINSN_TYPE is not USE, retain its uniqueness. */ 5743 return vinsn_create (insn_rtx, force_unique_p); 5744 } 5745 5746 /* Create a copy of INSN_RTX. */ 5747 rtx 5748 create_copy_of_insn_rtx (rtx insn_rtx) 5749 { 5750 rtx res, link; 5751 5752 if (DEBUG_INSN_P (insn_rtx)) 5753 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), 5754 insn_rtx); 5755 5756 gcc_assert (NONJUMP_INSN_P (insn_rtx)); 5757 5758 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), 5759 NULL_RTX); 5760 5761 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND 5762 since mark_jump_label will make them. REG_LABEL_TARGETs are created 5763 there too, but are supposed to be sticky, so we copy them. */ 5764 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1)) 5765 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND 5766 && REG_NOTE_KIND (link) != REG_EQUAL 5767 && REG_NOTE_KIND (link) != REG_EQUIV) 5768 { 5769 if (GET_CODE (link) == EXPR_LIST) 5770 add_reg_note (res, REG_NOTE_KIND (link), 5771 copy_insn_1 (XEXP (link, 0))); 5772 else 5773 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0)); 5774 } 5775 5776 return res; 5777 } 5778 5779 /* Change vinsn field of EXPR to hold NEW_VINSN. */ 5780 void 5781 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn) 5782 { 5783 vinsn_detach (EXPR_VINSN (expr)); 5784 5785 EXPR_VINSN (expr) = new_vinsn; 5786 vinsn_attach (new_vinsn); 5787 } 5788 5789 /* Helpers for global init. */ 5790 /* This structure is used to be able to call existing bundling mechanism 5791 and calculate insn priorities. */ 5792 static struct haifa_sched_info sched_sel_haifa_sched_info = 5793 { 5794 NULL, /* init_ready_list */ 5795 NULL, /* can_schedule_ready_p */ 5796 NULL, /* schedule_more_p */ 5797 NULL, /* new_ready */ 5798 NULL, /* rgn_rank */ 5799 sel_print_insn, /* rgn_print_insn */ 5800 contributes_to_priority, 5801 NULL, /* insn_finishes_block_p */ 5802 5803 NULL, NULL, 5804 NULL, NULL, 5805 0, 0, 5806 5807 NULL, /* add_remove_insn */ 5808 NULL, /* begin_schedule_ready */ 5809 NULL, /* begin_move_insn */ 5810 NULL, /* advance_target_bb */ 5811 5812 NULL, 5813 NULL, 5814 5815 SEL_SCHED | NEW_BBS 5816 }; 5817 5818 /* Setup special insns used in the scheduler. */ 5819 void 5820 setup_nop_and_exit_insns (void) 5821 { 5822 gcc_assert (nop_pattern == NULL_RTX 5823 && exit_insn == NULL_RTX); 5824 5825 nop_pattern = constm1_rtx; 5826 5827 start_sequence (); 5828 emit_insn (nop_pattern); 5829 exit_insn = get_insns (); 5830 end_sequence (); 5831 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR); 5832 } 5833 5834 /* Free special insns used in the scheduler. */ 5835 void 5836 free_nop_and_exit_insns (void) 5837 { 5838 exit_insn = NULL_RTX; 5839 nop_pattern = NULL_RTX; 5840 } 5841 5842 /* Setup a special vinsn used in new insns initialization. */ 5843 void 5844 setup_nop_vinsn (void) 5845 { 5846 nop_vinsn = vinsn_create (exit_insn, false); 5847 vinsn_attach (nop_vinsn); 5848 } 5849 5850 /* Free a special vinsn used in new insns initialization. */ 5851 void 5852 free_nop_vinsn (void) 5853 { 5854 gcc_assert (VINSN_COUNT (nop_vinsn) == 1); 5855 vinsn_detach (nop_vinsn); 5856 nop_vinsn = NULL; 5857 } 5858 5859 /* Call a set_sched_flags hook. */ 5860 void 5861 sel_set_sched_flags (void) 5862 { 5863 /* ??? This means that set_sched_flags were called, and we decided to 5864 support speculation. However, set_sched_flags also modifies flags 5865 on current_sched_info, doing this only at global init. And we 5866 sometimes change c_s_i later. So put the correct flags again. */ 5867 if (spec_info && targetm.sched.set_sched_flags) 5868 targetm.sched.set_sched_flags (spec_info); 5869 } 5870 5871 /* Setup pointers to global sched info structures. */ 5872 void 5873 sel_setup_sched_infos (void) 5874 { 5875 rgn_setup_common_sched_info (); 5876 5877 memcpy (&sel_common_sched_info, common_sched_info, 5878 sizeof (sel_common_sched_info)); 5879 5880 sel_common_sched_info.fix_recovery_cfg = NULL; 5881 sel_common_sched_info.add_block = NULL; 5882 sel_common_sched_info.estimate_number_of_insns 5883 = sel_estimate_number_of_insns; 5884 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn; 5885 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS; 5886 5887 common_sched_info = &sel_common_sched_info; 5888 5889 current_sched_info = &sched_sel_haifa_sched_info; 5890 current_sched_info->sched_max_insns_priority = 5891 get_rgn_sched_max_insns_priority (); 5892 5893 sel_set_sched_flags (); 5894 } 5895 5896 5897 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX, 5898 *BB_ORD_INDEX after that is increased. */ 5899 static void 5900 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn) 5901 { 5902 RGN_NR_BLOCKS (rgn) += 1; 5903 RGN_DONT_CALC_DEPS (rgn) = 0; 5904 RGN_HAS_REAL_EBB (rgn) = 0; 5905 CONTAINING_RGN (bb->index) = rgn; 5906 BLOCK_TO_BB (bb->index) = *bb_ord_index; 5907 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index; 5908 (*bb_ord_index)++; 5909 5910 /* FIXME: it is true only when not scheduling ebbs. */ 5911 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn); 5912 } 5913 5914 /* Functions to support pipelining of outer loops. */ 5915 5916 /* Creates a new empty region and returns it's number. */ 5917 static int 5918 sel_create_new_region (void) 5919 { 5920 int new_rgn_number = nr_regions; 5921 5922 RGN_NR_BLOCKS (new_rgn_number) = 0; 5923 5924 /* FIXME: This will work only when EBBs are not created. */ 5925 if (new_rgn_number != 0) 5926 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + 5927 RGN_NR_BLOCKS (new_rgn_number - 1); 5928 else 5929 RGN_BLOCKS (new_rgn_number) = 0; 5930 5931 /* Set the blocks of the next region so the other functions may 5932 calculate the number of blocks in the region. */ 5933 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + 5934 RGN_NR_BLOCKS (new_rgn_number); 5935 5936 nr_regions++; 5937 5938 return new_rgn_number; 5939 } 5940 5941 /* If X has a smaller topological sort number than Y, returns -1; 5942 if greater, returns 1. */ 5943 static int 5944 bb_top_order_comparator (const void *x, const void *y) 5945 { 5946 basic_block bb1 = *(const basic_block *) x; 5947 basic_block bb2 = *(const basic_block *) y; 5948 5949 gcc_assert (bb1 == bb2 5950 || rev_top_order_index[bb1->index] 5951 != rev_top_order_index[bb2->index]); 5952 5953 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so 5954 bbs with greater number should go earlier. */ 5955 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index]) 5956 return -1; 5957 else 5958 return 1; 5959 } 5960 5961 /* Create a region for LOOP and return its number. If we don't want 5962 to pipeline LOOP, return -1. */ 5963 static int 5964 make_region_from_loop (struct loop *loop) 5965 { 5966 unsigned int i; 5967 int new_rgn_number = -1; 5968 struct loop *inner; 5969 5970 /* Basic block index, to be assigned to BLOCK_TO_BB. */ 5971 int bb_ord_index = 0; 5972 basic_block *loop_blocks; 5973 basic_block preheader_block; 5974 5975 if (loop->num_nodes 5976 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS)) 5977 return -1; 5978 5979 /* Don't pipeline loops whose latch belongs to some of its inner loops. */ 5980 for (inner = loop->inner; inner; inner = inner->inner) 5981 if (flow_bb_inside_loop_p (inner, loop->latch)) 5982 return -1; 5983 5984 loop->ninsns = num_loop_insns (loop); 5985 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS)) 5986 return -1; 5987 5988 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator); 5989 5990 for (i = 0; i < loop->num_nodes; i++) 5991 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP) 5992 { 5993 free (loop_blocks); 5994 return -1; 5995 } 5996 5997 preheader_block = loop_preheader_edge (loop)->src; 5998 gcc_assert (preheader_block); 5999 gcc_assert (loop_blocks[0] == loop->header); 6000 6001 new_rgn_number = sel_create_new_region (); 6002 6003 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number); 6004 SET_BIT (bbs_in_loop_rgns, preheader_block->index); 6005 6006 for (i = 0; i < loop->num_nodes; i++) 6007 { 6008 /* Add only those blocks that haven't been scheduled in the inner loop. 6009 The exception is the basic blocks with bookkeeping code - they should 6010 be added to the region (and they actually don't belong to the loop 6011 body, but to the region containing that loop body). */ 6012 6013 gcc_assert (new_rgn_number >= 0); 6014 6015 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index)) 6016 { 6017 sel_add_block_to_region (loop_blocks[i], &bb_ord_index, 6018 new_rgn_number); 6019 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index); 6020 } 6021 } 6022 6023 free (loop_blocks); 6024 MARK_LOOP_FOR_PIPELINING (loop); 6025 6026 return new_rgn_number; 6027 } 6028 6029 /* Create a new region from preheader blocks LOOP_BLOCKS. */ 6030 void 6031 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks) 6032 { 6033 unsigned int i; 6034 int new_rgn_number = -1; 6035 basic_block bb; 6036 6037 /* Basic block index, to be assigned to BLOCK_TO_BB. */ 6038 int bb_ord_index = 0; 6039 6040 new_rgn_number = sel_create_new_region (); 6041 6042 FOR_EACH_VEC_ELT (basic_block, *loop_blocks, i, bb) 6043 { 6044 gcc_assert (new_rgn_number >= 0); 6045 6046 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number); 6047 } 6048 6049 VEC_free (basic_block, heap, *loop_blocks); 6050 gcc_assert (*loop_blocks == NULL); 6051 } 6052 6053 6054 /* Create region(s) from loop nest LOOP, such that inner loops will be 6055 pipelined before outer loops. Returns true when a region for LOOP 6056 is created. */ 6057 static bool 6058 make_regions_from_loop_nest (struct loop *loop) 6059 { 6060 struct loop *cur_loop; 6061 int rgn_number; 6062 6063 /* Traverse all inner nodes of the loop. */ 6064 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next) 6065 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index)) 6066 return false; 6067 6068 /* At this moment all regular inner loops should have been pipelined. 6069 Try to create a region from this loop. */ 6070 rgn_number = make_region_from_loop (loop); 6071 6072 if (rgn_number < 0) 6073 return false; 6074 6075 VEC_safe_push (loop_p, heap, loop_nests, loop); 6076 return true; 6077 } 6078 6079 /* Initalize data structures needed. */ 6080 void 6081 sel_init_pipelining (void) 6082 { 6083 /* Collect loop information to be used in outer loops pipelining. */ 6084 loop_optimizer_init (LOOPS_HAVE_PREHEADERS 6085 | LOOPS_HAVE_FALLTHRU_PREHEADERS 6086 | LOOPS_HAVE_RECORDED_EXITS 6087 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS); 6088 current_loop_nest = NULL; 6089 6090 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block); 6091 sbitmap_zero (bbs_in_loop_rgns); 6092 6093 recompute_rev_top_order (); 6094 } 6095 6096 /* Returns a struct loop for region RGN. */ 6097 loop_p 6098 get_loop_nest_for_rgn (unsigned int rgn) 6099 { 6100 /* Regions created with extend_rgns don't have corresponding loop nests, 6101 because they don't represent loops. */ 6102 if (rgn < VEC_length (loop_p, loop_nests)) 6103 return VEC_index (loop_p, loop_nests, rgn); 6104 else 6105 return NULL; 6106 } 6107 6108 /* True when LOOP was included into pipelining regions. */ 6109 bool 6110 considered_for_pipelining_p (struct loop *loop) 6111 { 6112 if (loop_depth (loop) == 0) 6113 return false; 6114 6115 /* Now, the loop could be too large or irreducible. Check whether its 6116 region is in LOOP_NESTS. 6117 We determine the region number of LOOP as the region number of its 6118 latch. We can't use header here, because this header could be 6119 just removed preheader and it will give us the wrong region number. 6120 Latch can't be used because it could be in the inner loop too. */ 6121 if (LOOP_MARKED_FOR_PIPELINING_P (loop)) 6122 { 6123 int rgn = CONTAINING_RGN (loop->latch->index); 6124 6125 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests)); 6126 return true; 6127 } 6128 6129 return false; 6130 } 6131 6132 /* Makes regions from the rest of the blocks, after loops are chosen 6133 for pipelining. */ 6134 static void 6135 make_regions_from_the_rest (void) 6136 { 6137 int cur_rgn_blocks; 6138 int *loop_hdr; 6139 int i; 6140 6141 basic_block bb; 6142 edge e; 6143 edge_iterator ei; 6144 int *degree; 6145 6146 /* Index in rgn_bb_table where to start allocating new regions. */ 6147 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0; 6148 6149 /* Make regions from all the rest basic blocks - those that don't belong to 6150 any loop or belong to irreducible loops. Prepare the data structures 6151 for extend_rgns. */ 6152 6153 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop, 6154 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same 6155 loop. */ 6156 loop_hdr = XNEWVEC (int, last_basic_block); 6157 degree = XCNEWVEC (int, last_basic_block); 6158 6159 6160 /* For each basic block that belongs to some loop assign the number 6161 of innermost loop it belongs to. */ 6162 for (i = 0; i < last_basic_block; i++) 6163 loop_hdr[i] = -1; 6164 6165 FOR_EACH_BB (bb) 6166 { 6167 if (bb->loop_father && !bb->loop_father->num == 0 6168 && !(bb->flags & BB_IRREDUCIBLE_LOOP)) 6169 loop_hdr[bb->index] = bb->loop_father->num; 6170 } 6171 6172 /* For each basic block degree is calculated as the number of incoming 6173 edges, that are going out of bbs that are not yet scheduled. 6174 The basic blocks that are scheduled have degree value of zero. */ 6175 FOR_EACH_BB (bb) 6176 { 6177 degree[bb->index] = 0; 6178 6179 if (!TEST_BIT (bbs_in_loop_rgns, bb->index)) 6180 { 6181 FOR_EACH_EDGE (e, ei, bb->preds) 6182 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index)) 6183 degree[bb->index]++; 6184 } 6185 else 6186 degree[bb->index] = -1; 6187 } 6188 6189 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr); 6190 6191 /* Any block that did not end up in a region is placed into a region 6192 by itself. */ 6193 FOR_EACH_BB (bb) 6194 if (degree[bb->index] >= 0) 6195 { 6196 rgn_bb_table[cur_rgn_blocks] = bb->index; 6197 RGN_NR_BLOCKS (nr_regions) = 1; 6198 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++; 6199 RGN_DONT_CALC_DEPS (nr_regions) = 0; 6200 RGN_HAS_REAL_EBB (nr_regions) = 0; 6201 CONTAINING_RGN (bb->index) = nr_regions++; 6202 BLOCK_TO_BB (bb->index) = 0; 6203 } 6204 6205 free (degree); 6206 free (loop_hdr); 6207 } 6208 6209 /* Free data structures used in pipelining of loops. */ 6210 void sel_finish_pipelining (void) 6211 { 6212 loop_iterator li; 6213 struct loop *loop; 6214 6215 /* Release aux fields so we don't free them later by mistake. */ 6216 FOR_EACH_LOOP (li, loop, 0) 6217 loop->aux = NULL; 6218 6219 loop_optimizer_finalize (); 6220 6221 VEC_free (loop_p, heap, loop_nests); 6222 6223 free (rev_top_order_index); 6224 rev_top_order_index = NULL; 6225 } 6226 6227 /* This function replaces the find_rgns when 6228 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */ 6229 void 6230 sel_find_rgns (void) 6231 { 6232 sel_init_pipelining (); 6233 extend_regions (); 6234 6235 if (current_loops) 6236 { 6237 loop_p loop; 6238 loop_iterator li; 6239 6240 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops 6241 ? LI_FROM_INNERMOST 6242 : LI_ONLY_INNERMOST)) 6243 make_regions_from_loop_nest (loop); 6244 } 6245 6246 /* Make regions from all the rest basic blocks and schedule them. 6247 These blocks include blocks that don't belong to any loop or belong 6248 to irreducible loops. */ 6249 make_regions_from_the_rest (); 6250 6251 /* We don't need bbs_in_loop_rgns anymore. */ 6252 sbitmap_free (bbs_in_loop_rgns); 6253 bbs_in_loop_rgns = NULL; 6254 } 6255 6256 /* Add the preheader blocks from previous loop to current region taking 6257 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS. 6258 This function is only used with -fsel-sched-pipelining-outer-loops. */ 6259 void 6260 sel_add_loop_preheaders (bb_vec_t *bbs) 6261 { 6262 int i; 6263 basic_block bb; 6264 VEC(basic_block, heap) *preheader_blocks 6265 = LOOP_PREHEADER_BLOCKS (current_loop_nest); 6266 6267 for (i = 0; 6268 VEC_iterate (basic_block, preheader_blocks, i, bb); 6269 i++) 6270 { 6271 VEC_safe_push (basic_block, heap, *bbs, bb); 6272 VEC_safe_push (basic_block, heap, last_added_blocks, bb); 6273 sel_add_bb (bb); 6274 } 6275 6276 VEC_free (basic_block, heap, preheader_blocks); 6277 } 6278 6279 /* While pipelining outer loops, returns TRUE if BB is a loop preheader. 6280 Please note that the function should also work when pipelining_p is 6281 false, because it is used when deciding whether we should or should 6282 not reschedule pipelined code. */ 6283 bool 6284 sel_is_loop_preheader_p (basic_block bb) 6285 { 6286 if (current_loop_nest) 6287 { 6288 struct loop *outer; 6289 6290 if (preheader_removed) 6291 return false; 6292 6293 /* Preheader is the first block in the region. */ 6294 if (BLOCK_TO_BB (bb->index) == 0) 6295 return true; 6296 6297 /* We used to find a preheader with the topological information. 6298 Check that the above code is equivalent to what we did before. */ 6299 6300 if (in_current_region_p (current_loop_nest->header)) 6301 gcc_assert (!(BLOCK_TO_BB (bb->index) 6302 < BLOCK_TO_BB (current_loop_nest->header->index))); 6303 6304 /* Support the situation when the latch block of outer loop 6305 could be from here. */ 6306 for (outer = loop_outer (current_loop_nest); 6307 outer; 6308 outer = loop_outer (outer)) 6309 if (considered_for_pipelining_p (outer) && outer->latch == bb) 6310 gcc_unreachable (); 6311 } 6312 6313 return false; 6314 } 6315 6316 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and 6317 can be removed, making the corresponding edge fallthrough (assuming that 6318 all basic blocks between JUMP_BB and DEST_BB are empty). */ 6319 static bool 6320 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb) 6321 { 6322 if (!onlyjump_p (BB_END (jump_bb)) 6323 || tablejump_p (BB_END (jump_bb), NULL, NULL)) 6324 return false; 6325 6326 /* Several outgoing edges, abnormal edge or destination of jump is 6327 not DEST_BB. */ 6328 if (EDGE_COUNT (jump_bb->succs) != 1 6329 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING) 6330 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb) 6331 return false; 6332 6333 /* If not anything of the upper. */ 6334 return true; 6335 } 6336 6337 /* Removes the loop preheader from the current region and saves it in 6338 PREHEADER_BLOCKS of the father loop, so they will be added later to 6339 region that represents an outer loop. */ 6340 static void 6341 sel_remove_loop_preheader (void) 6342 { 6343 int i, old_len; 6344 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 6345 basic_block bb; 6346 bool all_empty_p = true; 6347 VEC(basic_block, heap) *preheader_blocks 6348 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest)); 6349 6350 gcc_assert (current_loop_nest); 6351 old_len = VEC_length (basic_block, preheader_blocks); 6352 6353 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */ 6354 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++) 6355 { 6356 bb = BASIC_BLOCK (BB_TO_BLOCK (i)); 6357 6358 /* If the basic block belongs to region, but doesn't belong to 6359 corresponding loop, then it should be a preheader. */ 6360 if (sel_is_loop_preheader_p (bb)) 6361 { 6362 VEC_safe_push (basic_block, heap, preheader_blocks, bb); 6363 if (BB_END (bb) != bb_note (bb)) 6364 all_empty_p = false; 6365 } 6366 } 6367 6368 /* Remove these blocks only after iterating over the whole region. */ 6369 for (i = VEC_length (basic_block, preheader_blocks) - 1; 6370 i >= old_len; 6371 i--) 6372 { 6373 bb = VEC_index (basic_block, preheader_blocks, i); 6374 sel_remove_bb (bb, false); 6375 } 6376 6377 if (!considered_for_pipelining_p (loop_outer (current_loop_nest))) 6378 { 6379 if (!all_empty_p) 6380 /* Immediately create new region from preheader. */ 6381 make_region_from_loop_preheader (&preheader_blocks); 6382 else 6383 { 6384 /* If all preheader blocks are empty - dont create new empty region. 6385 Instead, remove them completely. */ 6386 FOR_EACH_VEC_ELT (basic_block, preheader_blocks, i, bb) 6387 { 6388 edge e; 6389 edge_iterator ei; 6390 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb; 6391 6392 /* Redirect all incoming edges to next basic block. */ 6393 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 6394 { 6395 if (! (e->flags & EDGE_FALLTHRU)) 6396 redirect_edge_and_branch (e, bb->next_bb); 6397 else 6398 redirect_edge_succ (e, bb->next_bb); 6399 } 6400 gcc_assert (BB_NOTE_LIST (bb) == NULL); 6401 delete_and_free_basic_block (bb); 6402 6403 /* Check if after deleting preheader there is a nonconditional 6404 jump in PREV_BB that leads to the next basic block NEXT_BB. 6405 If it is so - delete this jump and clear data sets of its 6406 basic block if it becomes empty. */ 6407 if (next_bb->prev_bb == prev_bb 6408 && prev_bb != ENTRY_BLOCK_PTR 6409 && bb_has_removable_jump_to_p (prev_bb, next_bb)) 6410 { 6411 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb); 6412 if (BB_END (prev_bb) == bb_note (prev_bb)) 6413 free_data_sets (prev_bb); 6414 } 6415 6416 set_immediate_dominator (CDI_DOMINATORS, next_bb, 6417 recompute_dominator (CDI_DOMINATORS, 6418 next_bb)); 6419 } 6420 } 6421 VEC_free (basic_block, heap, preheader_blocks); 6422 } 6423 else 6424 /* Store preheader within the father's loop structure. */ 6425 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest), 6426 preheader_blocks); 6427 } 6428 #endif 6429