1 /* Instruction scheduling pass. Selective scheduler and pipeliner. 2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 3 Free Software Foundation, Inc. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "tm.h" 25 #include "diagnostic-core.h" 26 #include "rtl.h" 27 #include "tm_p.h" 28 #include "hard-reg-set.h" 29 #include "regs.h" 30 #include "function.h" 31 #include "flags.h" 32 #include "insn-config.h" 33 #include "insn-attr.h" 34 #include "except.h" 35 #include "recog.h" 36 #include "params.h" 37 #include "target.h" 38 #include "timevar.h" 39 #include "tree-pass.h" 40 #include "sched-int.h" 41 #include "ggc.h" 42 #include "tree.h" 43 #include "vec.h" 44 #include "langhooks.h" 45 #include "rtlhooks-def.h" 46 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ 47 48 #ifdef INSN_SCHEDULING 49 #include "sel-sched-ir.h" 50 /* We don't have to use it except for sel_print_insn. */ 51 #include "sel-sched-dump.h" 52 53 /* A vector holding bb info for whole scheduling pass. */ 54 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL; 55 56 /* A vector holding bb info. */ 57 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL; 58 59 /* A pool for allocating all lists. */ 60 alloc_pool sched_lists_pool; 61 62 /* This contains information about successors for compute_av_set. */ 63 struct succs_info current_succs; 64 65 /* Data structure to describe interaction with the generic scheduler utils. */ 66 static struct common_sched_info_def sel_common_sched_info; 67 68 /* The loop nest being pipelined. */ 69 struct loop *current_loop_nest; 70 71 /* LOOP_NESTS is a vector containing the corresponding loop nest for 72 each region. */ 73 static VEC(loop_p, heap) *loop_nests = NULL; 74 75 /* Saves blocks already in loop regions, indexed by bb->index. */ 76 static sbitmap bbs_in_loop_rgns = NULL; 77 78 /* CFG hooks that are saved before changing create_basic_block hook. */ 79 static struct cfg_hooks orig_cfg_hooks; 80 81 82 /* Array containing reverse topological index of function basic blocks, 83 indexed by BB->INDEX. */ 84 static int *rev_top_order_index = NULL; 85 86 /* Length of the above array. */ 87 static int rev_top_order_index_len = -1; 88 89 /* A regset pool structure. */ 90 static struct 91 { 92 /* The stack to which regsets are returned. */ 93 regset *v; 94 95 /* Its pointer. */ 96 int n; 97 98 /* Its size. */ 99 int s; 100 101 /* In VV we save all generated regsets so that, when destructing the 102 pool, we can compare it with V and check that every regset was returned 103 back to pool. */ 104 regset *vv; 105 106 /* The pointer of VV stack. */ 107 int nn; 108 109 /* Its size. */ 110 int ss; 111 112 /* The difference between allocated and returned regsets. */ 113 int diff; 114 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; 115 116 /* This represents the nop pool. */ 117 static struct 118 { 119 /* The vector which holds previously emitted nops. */ 120 insn_t *v; 121 122 /* Its pointer. */ 123 int n; 124 125 /* Its size. */ 126 int s; 127 } nop_pool = { NULL, 0, 0 }; 128 129 /* The pool for basic block notes. */ 130 static rtx_vec_t bb_note_pool; 131 132 /* A NOP pattern used to emit placeholder insns. */ 133 rtx nop_pattern = NULL_RTX; 134 /* A special instruction that resides in EXIT_BLOCK. 135 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ 136 rtx exit_insn = NULL_RTX; 137 138 /* TRUE if while scheduling current region, which is loop, its preheader 139 was removed. */ 140 bool preheader_removed = false; 141 142 143 /* Forward static declarations. */ 144 static void fence_clear (fence_t); 145 146 static void deps_init_id (idata_t, insn_t, bool); 147 static void init_id_from_df (idata_t, insn_t, bool); 148 static expr_t set_insn_init (expr_t, vinsn_t, int); 149 150 static void cfg_preds (basic_block, insn_t **, int *); 151 static void prepare_insn_expr (insn_t, int); 152 static void free_history_vect (VEC (expr_history_def, heap) **); 153 154 static void move_bb_info (basic_block, basic_block); 155 static void remove_empty_bb (basic_block, bool); 156 static void sel_merge_blocks (basic_block, basic_block); 157 static void sel_remove_loop_preheader (void); 158 static bool bb_has_removable_jump_to_p (basic_block, basic_block); 159 160 static bool insn_is_the_only_one_in_bb_p (insn_t); 161 static void create_initial_data_sets (basic_block); 162 163 static void free_av_set (basic_block); 164 static void invalidate_av_set (basic_block); 165 static void extend_insn_data (void); 166 static void sel_init_new_insn (insn_t, int); 167 static void finish_insns (void); 168 169 /* Various list functions. */ 170 171 /* Copy an instruction list L. */ 172 ilist_t 173 ilist_copy (ilist_t l) 174 { 175 ilist_t head = NULL, *tailp = &head; 176 177 while (l) 178 { 179 ilist_add (tailp, ILIST_INSN (l)); 180 tailp = &ILIST_NEXT (*tailp); 181 l = ILIST_NEXT (l); 182 } 183 184 return head; 185 } 186 187 /* Invert an instruction list L. */ 188 ilist_t 189 ilist_invert (ilist_t l) 190 { 191 ilist_t res = NULL; 192 193 while (l) 194 { 195 ilist_add (&res, ILIST_INSN (l)); 196 l = ILIST_NEXT (l); 197 } 198 199 return res; 200 } 201 202 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ 203 void 204 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) 205 { 206 bnd_t bnd; 207 208 _list_add (lp); 209 bnd = BLIST_BND (*lp); 210 211 BND_TO (bnd) = to; 212 BND_PTR (bnd) = ptr; 213 BND_AV (bnd) = NULL; 214 BND_AV1 (bnd) = NULL; 215 BND_DC (bnd) = dc; 216 } 217 218 /* Remove the list note pointed to by LP. */ 219 void 220 blist_remove (blist_t *lp) 221 { 222 bnd_t b = BLIST_BND (*lp); 223 224 av_set_clear (&BND_AV (b)); 225 av_set_clear (&BND_AV1 (b)); 226 ilist_clear (&BND_PTR (b)); 227 228 _list_remove (lp); 229 } 230 231 /* Init a fence tail L. */ 232 void 233 flist_tail_init (flist_tail_t l) 234 { 235 FLIST_TAIL_HEAD (l) = NULL; 236 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); 237 } 238 239 /* Try to find fence corresponding to INSN in L. */ 240 fence_t 241 flist_lookup (flist_t l, insn_t insn) 242 { 243 while (l) 244 { 245 if (FENCE_INSN (FLIST_FENCE (l)) == insn) 246 return FLIST_FENCE (l); 247 248 l = FLIST_NEXT (l); 249 } 250 251 return NULL; 252 } 253 254 /* Init the fields of F before running fill_insns. */ 255 static void 256 init_fence_for_scheduling (fence_t f) 257 { 258 FENCE_BNDS (f) = NULL; 259 FENCE_PROCESSED_P (f) = false; 260 FENCE_SCHEDULED_P (f) = false; 261 } 262 263 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ 264 static void 265 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, 266 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns, 267 int *ready_ticks, int ready_ticks_size, insn_t sched_next, 268 int cycle, int cycle_issued_insns, int issue_more, 269 bool starts_cycle_p, bool after_stall_p) 270 { 271 fence_t f; 272 273 _list_add (lp); 274 f = FLIST_FENCE (*lp); 275 276 FENCE_INSN (f) = insn; 277 278 gcc_assert (state != NULL); 279 FENCE_STATE (f) = state; 280 281 FENCE_CYCLE (f) = cycle; 282 FENCE_ISSUED_INSNS (f) = cycle_issued_insns; 283 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; 284 FENCE_AFTER_STALL_P (f) = after_stall_p; 285 286 gcc_assert (dc != NULL); 287 FENCE_DC (f) = dc; 288 289 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); 290 FENCE_TC (f) = tc; 291 292 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; 293 FENCE_ISSUE_MORE (f) = issue_more; 294 FENCE_EXECUTING_INSNS (f) = executing_insns; 295 FENCE_READY_TICKS (f) = ready_ticks; 296 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; 297 FENCE_SCHED_NEXT (f) = sched_next; 298 299 init_fence_for_scheduling (f); 300 } 301 302 /* Remove the head node of the list pointed to by LP. */ 303 static void 304 flist_remove (flist_t *lp) 305 { 306 if (FENCE_INSN (FLIST_FENCE (*lp))) 307 fence_clear (FLIST_FENCE (*lp)); 308 _list_remove (lp); 309 } 310 311 /* Clear the fence list pointed to by LP. */ 312 void 313 flist_clear (flist_t *lp) 314 { 315 while (*lp) 316 flist_remove (lp); 317 } 318 319 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */ 320 void 321 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) 322 { 323 def_t d; 324 325 _list_add (dl); 326 d = DEF_LIST_DEF (*dl); 327 328 d->orig_insn = original_insn; 329 d->crosses_call = crosses_call; 330 } 331 332 333 /* Functions to work with target contexts. */ 334 335 /* Bulk target context. It is convenient for debugging purposes to ensure 336 that there are no uninitialized (null) target contexts. */ 337 static tc_t bulk_tc = (tc_t) 1; 338 339 /* Target hooks wrappers. In the future we can provide some default 340 implementations for them. */ 341 342 /* Allocate a store for the target context. */ 343 static tc_t 344 alloc_target_context (void) 345 { 346 return (targetm.sched.alloc_sched_context 347 ? targetm.sched.alloc_sched_context () : bulk_tc); 348 } 349 350 /* Init target context TC. 351 If CLEAN_P is true, then make TC as it is beginning of the scheduler. 352 Overwise, copy current backend context to TC. */ 353 static void 354 init_target_context (tc_t tc, bool clean_p) 355 { 356 if (targetm.sched.init_sched_context) 357 targetm.sched.init_sched_context (tc, clean_p); 358 } 359 360 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as 361 int init_target_context (). */ 362 tc_t 363 create_target_context (bool clean_p) 364 { 365 tc_t tc = alloc_target_context (); 366 367 init_target_context (tc, clean_p); 368 return tc; 369 } 370 371 /* Copy TC to the current backend context. */ 372 void 373 set_target_context (tc_t tc) 374 { 375 if (targetm.sched.set_sched_context) 376 targetm.sched.set_sched_context (tc); 377 } 378 379 /* TC is about to be destroyed. Free any internal data. */ 380 static void 381 clear_target_context (tc_t tc) 382 { 383 if (targetm.sched.clear_sched_context) 384 targetm.sched.clear_sched_context (tc); 385 } 386 387 /* Clear and free it. */ 388 static void 389 delete_target_context (tc_t tc) 390 { 391 clear_target_context (tc); 392 393 if (targetm.sched.free_sched_context) 394 targetm.sched.free_sched_context (tc); 395 } 396 397 /* Make a copy of FROM in TO. 398 NB: May be this should be a hook. */ 399 static void 400 copy_target_context (tc_t to, tc_t from) 401 { 402 tc_t tmp = create_target_context (false); 403 404 set_target_context (from); 405 init_target_context (to, false); 406 407 set_target_context (tmp); 408 delete_target_context (tmp); 409 } 410 411 /* Create a copy of TC. */ 412 static tc_t 413 create_copy_of_target_context (tc_t tc) 414 { 415 tc_t copy = alloc_target_context (); 416 417 copy_target_context (copy, tc); 418 419 return copy; 420 } 421 422 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P 423 is the same as in init_target_context (). */ 424 void 425 reset_target_context (tc_t tc, bool clean_p) 426 { 427 clear_target_context (tc); 428 init_target_context (tc, clean_p); 429 } 430 431 /* Functions to work with dependence contexts. 432 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence 433 context. It accumulates information about processed insns to decide if 434 current insn is dependent on the processed ones. */ 435 436 /* Make a copy of FROM in TO. */ 437 static void 438 copy_deps_context (deps_t to, deps_t from) 439 { 440 init_deps (to, false); 441 deps_join (to, from); 442 } 443 444 /* Allocate store for dep context. */ 445 static deps_t 446 alloc_deps_context (void) 447 { 448 return XNEW (struct deps_desc); 449 } 450 451 /* Allocate and initialize dep context. */ 452 static deps_t 453 create_deps_context (void) 454 { 455 deps_t dc = alloc_deps_context (); 456 457 init_deps (dc, false); 458 return dc; 459 } 460 461 /* Create a copy of FROM. */ 462 static deps_t 463 create_copy_of_deps_context (deps_t from) 464 { 465 deps_t to = alloc_deps_context (); 466 467 copy_deps_context (to, from); 468 return to; 469 } 470 471 /* Clean up internal data of DC. */ 472 static void 473 clear_deps_context (deps_t dc) 474 { 475 free_deps (dc); 476 } 477 478 /* Clear and free DC. */ 479 static void 480 delete_deps_context (deps_t dc) 481 { 482 clear_deps_context (dc); 483 free (dc); 484 } 485 486 /* Clear and init DC. */ 487 static void 488 reset_deps_context (deps_t dc) 489 { 490 clear_deps_context (dc); 491 init_deps (dc, false); 492 } 493 494 /* This structure describes the dependence analysis hooks for advancing 495 dependence context. */ 496 static struct sched_deps_info_def advance_deps_context_sched_deps_info = 497 { 498 NULL, 499 500 NULL, /* start_insn */ 501 NULL, /* finish_insn */ 502 NULL, /* start_lhs */ 503 NULL, /* finish_lhs */ 504 NULL, /* start_rhs */ 505 NULL, /* finish_rhs */ 506 haifa_note_reg_set, 507 haifa_note_reg_clobber, 508 haifa_note_reg_use, 509 NULL, /* note_mem_dep */ 510 NULL, /* note_dep */ 511 512 0, 0, 0 513 }; 514 515 /* Process INSN and add its impact on DC. */ 516 void 517 advance_deps_context (deps_t dc, insn_t insn) 518 { 519 sched_deps_info = &advance_deps_context_sched_deps_info; 520 deps_analyze_insn (dc, insn); 521 } 522 523 524 /* Functions to work with DFA states. */ 525 526 /* Allocate store for a DFA state. */ 527 static state_t 528 state_alloc (void) 529 { 530 return xmalloc (dfa_state_size); 531 } 532 533 /* Allocate and initialize DFA state. */ 534 static state_t 535 state_create (void) 536 { 537 state_t state = state_alloc (); 538 539 state_reset (state); 540 advance_state (state); 541 return state; 542 } 543 544 /* Free DFA state. */ 545 static void 546 state_free (state_t state) 547 { 548 free (state); 549 } 550 551 /* Make a copy of FROM in TO. */ 552 static void 553 state_copy (state_t to, state_t from) 554 { 555 memcpy (to, from, dfa_state_size); 556 } 557 558 /* Create a copy of FROM. */ 559 static state_t 560 state_create_copy (state_t from) 561 { 562 state_t to = state_alloc (); 563 564 state_copy (to, from); 565 return to; 566 } 567 568 569 /* Functions to work with fences. */ 570 571 /* Clear the fence. */ 572 static void 573 fence_clear (fence_t f) 574 { 575 state_t s = FENCE_STATE (f); 576 deps_t dc = FENCE_DC (f); 577 void *tc = FENCE_TC (f); 578 579 ilist_clear (&FENCE_BNDS (f)); 580 581 gcc_assert ((s != NULL && dc != NULL && tc != NULL) 582 || (s == NULL && dc == NULL && tc == NULL)); 583 584 free (s); 585 586 if (dc != NULL) 587 delete_deps_context (dc); 588 589 if (tc != NULL) 590 delete_target_context (tc); 591 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f)); 592 free (FENCE_READY_TICKS (f)); 593 FENCE_READY_TICKS (f) = NULL; 594 } 595 596 /* Init a list of fences with successors of OLD_FENCE. */ 597 void 598 init_fences (insn_t old_fence) 599 { 600 insn_t succ; 601 succ_iterator si; 602 bool first = true; 603 int ready_ticks_size = get_max_uid () + 1; 604 605 FOR_EACH_SUCC_1 (succ, si, old_fence, 606 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 607 { 608 609 if (first) 610 first = false; 611 else 612 gcc_assert (flag_sel_sched_pipelining_outer_loops); 613 614 flist_add (&fences, succ, 615 state_create (), 616 create_deps_context () /* dc */, 617 create_target_context (true) /* tc */, 618 NULL_RTX /* last_scheduled_insn */, 619 NULL, /* executing_insns */ 620 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ 621 ready_ticks_size, 622 NULL_RTX /* sched_next */, 623 1 /* cycle */, 0 /* cycle_issued_insns */, 624 issue_rate, /* issue_more */ 625 1 /* starts_cycle_p */, 0 /* after_stall_p */); 626 } 627 } 628 629 /* Merges two fences (filling fields of fence F with resulting values) by 630 following rules: 1) state, target context and last scheduled insn are 631 propagated from fallthrough edge if it is available; 632 2) deps context and cycle is propagated from more probable edge; 633 3) all other fields are set to corresponding constant values. 634 635 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, 636 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE 637 and AFTER_STALL_P are the corresponding fields of the second fence. */ 638 static void 639 merge_fences (fence_t f, insn_t insn, 640 state_t state, deps_t dc, void *tc, 641 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns, 642 int *ready_ticks, int ready_ticks_size, 643 rtx sched_next, int cycle, int issue_more, bool after_stall_p) 644 { 645 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); 646 647 gcc_assert (sel_bb_head_p (FENCE_INSN (f)) 648 && !sched_next && !FENCE_SCHED_NEXT (f)); 649 650 /* Check if we can decide which path fences came. 651 If we can't (or don't want to) - reset all. */ 652 if (last_scheduled_insn == NULL 653 || last_scheduled_insn_old == NULL 654 /* This is a case when INSN is reachable on several paths from 655 one insn (this can happen when pipelining of outer loops is on and 656 there are two edges: one going around of inner loop and the other - 657 right through it; in such case just reset everything). */ 658 || last_scheduled_insn == last_scheduled_insn_old) 659 { 660 state_reset (FENCE_STATE (f)); 661 state_free (state); 662 663 reset_deps_context (FENCE_DC (f)); 664 delete_deps_context (dc); 665 666 reset_target_context (FENCE_TC (f), true); 667 delete_target_context (tc); 668 669 if (cycle > FENCE_CYCLE (f)) 670 FENCE_CYCLE (f) = cycle; 671 672 FENCE_LAST_SCHEDULED_INSN (f) = NULL; 673 FENCE_ISSUE_MORE (f) = issue_rate; 674 VEC_free (rtx, gc, executing_insns); 675 free (ready_ticks); 676 if (FENCE_EXECUTING_INSNS (f)) 677 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, 678 VEC_length (rtx, FENCE_EXECUTING_INSNS (f))); 679 if (FENCE_READY_TICKS (f)) 680 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); 681 } 682 else 683 { 684 edge edge_old = NULL, edge_new = NULL; 685 edge candidate; 686 succ_iterator si; 687 insn_t succ; 688 689 /* Find fallthrough edge. */ 690 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); 691 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); 692 693 if (!candidate 694 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) 695 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) 696 { 697 /* No fallthrough edge leading to basic block of INSN. */ 698 state_reset (FENCE_STATE (f)); 699 state_free (state); 700 701 reset_target_context (FENCE_TC (f), true); 702 delete_target_context (tc); 703 704 FENCE_LAST_SCHEDULED_INSN (f) = NULL; 705 FENCE_ISSUE_MORE (f) = issue_rate; 706 } 707 else 708 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) 709 { 710 /* Would be weird if same insn is successor of several fallthrough 711 edges. */ 712 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb 713 != BLOCK_FOR_INSN (last_scheduled_insn_old)); 714 715 state_free (FENCE_STATE (f)); 716 FENCE_STATE (f) = state; 717 718 delete_target_context (FENCE_TC (f)); 719 FENCE_TC (f) = tc; 720 721 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; 722 FENCE_ISSUE_MORE (f) = issue_more; 723 } 724 else 725 { 726 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ 727 state_free (state); 728 delete_target_context (tc); 729 730 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb 731 != BLOCK_FOR_INSN (last_scheduled_insn)); 732 } 733 734 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ 735 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, 736 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 737 { 738 if (succ == insn) 739 { 740 /* No same successor allowed from several edges. */ 741 gcc_assert (!edge_old); 742 edge_old = si.e1; 743 } 744 } 745 /* Find edge of second predecessor (last_scheduled_insn->insn). */ 746 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, 747 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 748 { 749 if (succ == insn) 750 { 751 /* No same successor allowed from several edges. */ 752 gcc_assert (!edge_new); 753 edge_new = si.e1; 754 } 755 } 756 757 /* Check if we can choose most probable predecessor. */ 758 if (edge_old == NULL || edge_new == NULL) 759 { 760 reset_deps_context (FENCE_DC (f)); 761 delete_deps_context (dc); 762 VEC_free (rtx, gc, executing_insns); 763 free (ready_ticks); 764 765 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); 766 if (FENCE_EXECUTING_INSNS (f)) 767 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, 768 VEC_length (rtx, FENCE_EXECUTING_INSNS (f))); 769 if (FENCE_READY_TICKS (f)) 770 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); 771 } 772 else 773 if (edge_new->probability > edge_old->probability) 774 { 775 delete_deps_context (FENCE_DC (f)); 776 FENCE_DC (f) = dc; 777 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f)); 778 FENCE_EXECUTING_INSNS (f) = executing_insns; 779 free (FENCE_READY_TICKS (f)); 780 FENCE_READY_TICKS (f) = ready_ticks; 781 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; 782 FENCE_CYCLE (f) = cycle; 783 } 784 else 785 { 786 /* Leave DC and CYCLE untouched. */ 787 delete_deps_context (dc); 788 VEC_free (rtx, gc, executing_insns); 789 free (ready_ticks); 790 } 791 } 792 793 /* Fill remaining invariant fields. */ 794 if (after_stall_p) 795 FENCE_AFTER_STALL_P (f) = 1; 796 797 FENCE_ISSUED_INSNS (f) = 0; 798 FENCE_STARTS_CYCLE_P (f) = 1; 799 FENCE_SCHED_NEXT (f) = NULL; 800 } 801 802 /* Add a new fence to NEW_FENCES list, initializing it from all 803 other parameters. */ 804 static void 805 add_to_fences (flist_tail_t new_fences, insn_t insn, 806 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn, 807 VEC(rtx, gc) *executing_insns, int *ready_ticks, 808 int ready_ticks_size, rtx sched_next, int cycle, 809 int cycle_issued_insns, int issue_rate, 810 bool starts_cycle_p, bool after_stall_p) 811 { 812 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); 813 814 if (! f) 815 { 816 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, 817 last_scheduled_insn, executing_insns, ready_ticks, 818 ready_ticks_size, sched_next, cycle, cycle_issued_insns, 819 issue_rate, starts_cycle_p, after_stall_p); 820 821 FLIST_TAIL_TAILP (new_fences) 822 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); 823 } 824 else 825 { 826 merge_fences (f, insn, state, dc, tc, last_scheduled_insn, 827 executing_insns, ready_ticks, ready_ticks_size, 828 sched_next, cycle, issue_rate, after_stall_p); 829 } 830 } 831 832 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ 833 void 834 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) 835 { 836 fence_t f, old; 837 flist_t *tailp = FLIST_TAIL_TAILP (new_fences); 838 839 old = FLIST_FENCE (old_fences); 840 f = flist_lookup (FLIST_TAIL_HEAD (new_fences), 841 FENCE_INSN (FLIST_FENCE (old_fences))); 842 if (f) 843 { 844 merge_fences (f, old->insn, old->state, old->dc, old->tc, 845 old->last_scheduled_insn, old->executing_insns, 846 old->ready_ticks, old->ready_ticks_size, 847 old->sched_next, old->cycle, old->issue_more, 848 old->after_stall_p); 849 } 850 else 851 { 852 _list_add (tailp); 853 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); 854 *FLIST_FENCE (*tailp) = *old; 855 init_fence_for_scheduling (FLIST_FENCE (*tailp)); 856 } 857 FENCE_INSN (old) = NULL; 858 } 859 860 /* Add a new fence to NEW_FENCES list and initialize most of its data 861 as a clean one. */ 862 void 863 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) 864 { 865 int ready_ticks_size = get_max_uid () + 1; 866 867 add_to_fences (new_fences, 868 succ, state_create (), create_deps_context (), 869 create_target_context (true), 870 NULL_RTX, NULL, 871 XCNEWVEC (int, ready_ticks_size), ready_ticks_size, 872 NULL_RTX, FENCE_CYCLE (fence) + 1, 873 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); 874 } 875 876 /* Add a new fence to NEW_FENCES list and initialize all of its data 877 from FENCE and SUCC. */ 878 void 879 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) 880 { 881 int * new_ready_ticks 882 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); 883 884 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), 885 FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); 886 add_to_fences (new_fences, 887 succ, state_create_copy (FENCE_STATE (fence)), 888 create_copy_of_deps_context (FENCE_DC (fence)), 889 create_copy_of_target_context (FENCE_TC (fence)), 890 FENCE_LAST_SCHEDULED_INSN (fence), 891 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)), 892 new_ready_ticks, 893 FENCE_READY_TICKS_SIZE (fence), 894 FENCE_SCHED_NEXT (fence), 895 FENCE_CYCLE (fence), 896 FENCE_ISSUED_INSNS (fence), 897 FENCE_ISSUE_MORE (fence), 898 FENCE_STARTS_CYCLE_P (fence), 899 FENCE_AFTER_STALL_P (fence)); 900 } 901 902 903 /* Functions to work with regset and nop pools. */ 904 905 /* Returns the new regset from pool. It might have some of the bits set 906 from the previous usage. */ 907 regset 908 get_regset_from_pool (void) 909 { 910 regset rs; 911 912 if (regset_pool.n != 0) 913 rs = regset_pool.v[--regset_pool.n]; 914 else 915 /* We need to create the regset. */ 916 { 917 rs = ALLOC_REG_SET (®_obstack); 918 919 if (regset_pool.nn == regset_pool.ss) 920 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, 921 (regset_pool.ss = 2 * regset_pool.ss + 1)); 922 regset_pool.vv[regset_pool.nn++] = rs; 923 } 924 925 regset_pool.diff++; 926 927 return rs; 928 } 929 930 /* Same as above, but returns the empty regset. */ 931 regset 932 get_clear_regset_from_pool (void) 933 { 934 regset rs = get_regset_from_pool (); 935 936 CLEAR_REG_SET (rs); 937 return rs; 938 } 939 940 /* Return regset RS to the pool for future use. */ 941 void 942 return_regset_to_pool (regset rs) 943 { 944 gcc_assert (rs); 945 regset_pool.diff--; 946 947 if (regset_pool.n == regset_pool.s) 948 regset_pool.v = XRESIZEVEC (regset, regset_pool.v, 949 (regset_pool.s = 2 * regset_pool.s + 1)); 950 regset_pool.v[regset_pool.n++] = rs; 951 } 952 953 #ifdef ENABLE_CHECKING 954 /* This is used as a qsort callback for sorting regset pool stacks. 955 X and XX are addresses of two regsets. They are never equal. */ 956 static int 957 cmp_v_in_regset_pool (const void *x, const void *xx) 958 { 959 return *((const regset *) x) - *((const regset *) xx); 960 } 961 #endif 962 963 /* Free the regset pool possibly checking for memory leaks. */ 964 void 965 free_regset_pool (void) 966 { 967 #ifdef ENABLE_CHECKING 968 { 969 regset *v = regset_pool.v; 970 int i = 0; 971 int n = regset_pool.n; 972 973 regset *vv = regset_pool.vv; 974 int ii = 0; 975 int nn = regset_pool.nn; 976 977 int diff = 0; 978 979 gcc_assert (n <= nn); 980 981 /* Sort both vectors so it will be possible to compare them. */ 982 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); 983 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); 984 985 while (ii < nn) 986 { 987 if (v[i] == vv[ii]) 988 i++; 989 else 990 /* VV[II] was lost. */ 991 diff++; 992 993 ii++; 994 } 995 996 gcc_assert (diff == regset_pool.diff); 997 } 998 #endif 999 1000 /* If not true - we have a memory leak. */ 1001 gcc_assert (regset_pool.diff == 0); 1002 1003 while (regset_pool.n) 1004 { 1005 --regset_pool.n; 1006 FREE_REG_SET (regset_pool.v[regset_pool.n]); 1007 } 1008 1009 free (regset_pool.v); 1010 regset_pool.v = NULL; 1011 regset_pool.s = 0; 1012 1013 free (regset_pool.vv); 1014 regset_pool.vv = NULL; 1015 regset_pool.nn = 0; 1016 regset_pool.ss = 0; 1017 1018 regset_pool.diff = 0; 1019 } 1020 1021 1022 /* Functions to work with nop pools. NOP insns are used as temporary 1023 placeholders of the insns being scheduled to allow correct update of 1024 the data sets. When update is finished, NOPs are deleted. */ 1025 1026 /* A vinsn that is used to represent a nop. This vinsn is shared among all 1027 nops sel-sched generates. */ 1028 static vinsn_t nop_vinsn = NULL; 1029 1030 /* Emit a nop before INSN, taking it from pool. */ 1031 insn_t 1032 get_nop_from_pool (insn_t insn) 1033 { 1034 insn_t nop; 1035 bool old_p = nop_pool.n != 0; 1036 int flags; 1037 1038 if (old_p) 1039 nop = nop_pool.v[--nop_pool.n]; 1040 else 1041 nop = nop_pattern; 1042 1043 nop = emit_insn_before (nop, insn); 1044 1045 if (old_p) 1046 flags = INSN_INIT_TODO_SSID; 1047 else 1048 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; 1049 1050 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); 1051 sel_init_new_insn (nop, flags); 1052 1053 return nop; 1054 } 1055 1056 /* Remove NOP from the instruction stream and return it to the pool. */ 1057 void 1058 return_nop_to_pool (insn_t nop, bool full_tidying) 1059 { 1060 gcc_assert (INSN_IN_STREAM_P (nop)); 1061 sel_remove_insn (nop, false, full_tidying); 1062 1063 if (nop_pool.n == nop_pool.s) 1064 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v, 1065 (nop_pool.s = 2 * nop_pool.s + 1)); 1066 nop_pool.v[nop_pool.n++] = nop; 1067 } 1068 1069 /* Free the nop pool. */ 1070 void 1071 free_nop_pool (void) 1072 { 1073 nop_pool.n = 0; 1074 nop_pool.s = 0; 1075 free (nop_pool.v); 1076 nop_pool.v = NULL; 1077 } 1078 1079 1080 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. 1081 The callback is given two rtxes XX and YY and writes the new rtxes 1082 to NX and NY in case some needs to be skipped. */ 1083 static int 1084 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) 1085 { 1086 const_rtx x = *xx; 1087 const_rtx y = *yy; 1088 1089 if (GET_CODE (x) == UNSPEC 1090 && (targetm.sched.skip_rtx_p == NULL 1091 || targetm.sched.skip_rtx_p (x))) 1092 { 1093 *nx = XVECEXP (x, 0, 0); 1094 *ny = CONST_CAST_RTX (y); 1095 return 1; 1096 } 1097 1098 if (GET_CODE (y) == UNSPEC 1099 && (targetm.sched.skip_rtx_p == NULL 1100 || targetm.sched.skip_rtx_p (y))) 1101 { 1102 *nx = CONST_CAST_RTX (x); 1103 *ny = XVECEXP (y, 0, 0); 1104 return 1; 1105 } 1106 1107 return 0; 1108 } 1109 1110 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way 1111 to support ia64 speculation. When changes are needed, new rtx X and new mode 1112 NMODE are written, and the callback returns true. */ 1113 static int 1114 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED, 1115 rtx *nx, enum machine_mode* nmode) 1116 { 1117 if (GET_CODE (x) == UNSPEC 1118 && targetm.sched.skip_rtx_p 1119 && targetm.sched.skip_rtx_p (x)) 1120 { 1121 *nx = XVECEXP (x, 0 ,0); 1122 *nmode = VOIDmode; 1123 return 1; 1124 } 1125 1126 return 0; 1127 } 1128 1129 /* Returns LHS and RHS are ok to be scheduled separately. */ 1130 static bool 1131 lhs_and_rhs_separable_p (rtx lhs, rtx rhs) 1132 { 1133 if (lhs == NULL || rhs == NULL) 1134 return false; 1135 1136 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point 1137 to use reg, if const can be used. Moreover, scheduling const as rhs may 1138 lead to mode mismatch cause consts don't have modes but they could be 1139 merged from branches where the same const used in different modes. */ 1140 if (CONSTANT_P (rhs)) 1141 return false; 1142 1143 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ 1144 if (COMPARISON_P (rhs)) 1145 return false; 1146 1147 /* Do not allow single REG to be an rhs. */ 1148 if (REG_P (rhs)) 1149 return false; 1150 1151 /* See comment at find_used_regs_1 (*1) for explanation of this 1152 restriction. */ 1153 /* FIXME: remove this later. */ 1154 if (MEM_P (lhs)) 1155 return false; 1156 1157 /* This will filter all tricky things like ZERO_EXTRACT etc. 1158 For now we don't handle it. */ 1159 if (!REG_P (lhs) && !MEM_P (lhs)) 1160 return false; 1161 1162 return true; 1163 } 1164 1165 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When 1166 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is 1167 used e.g. for insns from recovery blocks. */ 1168 static void 1169 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) 1170 { 1171 hash_rtx_callback_function hrcf; 1172 int insn_class; 1173 1174 VINSN_INSN_RTX (vi) = insn; 1175 VINSN_COUNT (vi) = 0; 1176 vi->cost = -1; 1177 1178 if (INSN_NOP_P (insn)) 1179 return; 1180 1181 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) 1182 init_id_from_df (VINSN_ID (vi), insn, force_unique_p); 1183 else 1184 deps_init_id (VINSN_ID (vi), insn, force_unique_p); 1185 1186 /* Hash vinsn depending on whether it is separable or not. */ 1187 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; 1188 if (VINSN_SEPARABLE_P (vi)) 1189 { 1190 rtx rhs = VINSN_RHS (vi); 1191 1192 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), 1193 NULL, NULL, false, hrcf); 1194 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), 1195 VOIDmode, NULL, NULL, 1196 false, hrcf); 1197 } 1198 else 1199 { 1200 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, 1201 NULL, NULL, false, hrcf); 1202 VINSN_HASH_RTX (vi) = VINSN_HASH (vi); 1203 } 1204 1205 insn_class = haifa_classify_insn (insn); 1206 if (insn_class >= 2 1207 && (!targetm.sched.get_insn_spec_ds 1208 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) 1209 == 0))) 1210 VINSN_MAY_TRAP_P (vi) = true; 1211 else 1212 VINSN_MAY_TRAP_P (vi) = false; 1213 } 1214 1215 /* Indicate that VI has become the part of an rtx object. */ 1216 void 1217 vinsn_attach (vinsn_t vi) 1218 { 1219 /* Assert that VI is not pending for deletion. */ 1220 gcc_assert (VINSN_INSN_RTX (vi)); 1221 1222 VINSN_COUNT (vi)++; 1223 } 1224 1225 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct 1226 VINSN_TYPE (VI). */ 1227 static vinsn_t 1228 vinsn_create (insn_t insn, bool force_unique_p) 1229 { 1230 vinsn_t vi = XCNEW (struct vinsn_def); 1231 1232 vinsn_init (vi, insn, force_unique_p); 1233 return vi; 1234 } 1235 1236 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach 1237 the copy. */ 1238 vinsn_t 1239 vinsn_copy (vinsn_t vi, bool reattach_p) 1240 { 1241 rtx copy; 1242 bool unique = VINSN_UNIQUE_P (vi); 1243 vinsn_t new_vi; 1244 1245 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); 1246 new_vi = create_vinsn_from_insn_rtx (copy, unique); 1247 if (reattach_p) 1248 { 1249 vinsn_detach (vi); 1250 vinsn_attach (new_vi); 1251 } 1252 1253 return new_vi; 1254 } 1255 1256 /* Delete the VI vinsn and free its data. */ 1257 static void 1258 vinsn_delete (vinsn_t vi) 1259 { 1260 gcc_assert (VINSN_COUNT (vi) == 0); 1261 1262 if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) 1263 { 1264 return_regset_to_pool (VINSN_REG_SETS (vi)); 1265 return_regset_to_pool (VINSN_REG_USES (vi)); 1266 return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); 1267 } 1268 1269 free (vi); 1270 } 1271 1272 /* Indicate that VI is no longer a part of some rtx object. 1273 Remove VI if it is no longer needed. */ 1274 void 1275 vinsn_detach (vinsn_t vi) 1276 { 1277 gcc_assert (VINSN_COUNT (vi) > 0); 1278 1279 if (--VINSN_COUNT (vi) == 0) 1280 vinsn_delete (vi); 1281 } 1282 1283 /* Returns TRUE if VI is a branch. */ 1284 bool 1285 vinsn_cond_branch_p (vinsn_t vi) 1286 { 1287 insn_t insn; 1288 1289 if (!VINSN_UNIQUE_P (vi)) 1290 return false; 1291 1292 insn = VINSN_INSN_RTX (vi); 1293 if (BB_END (BLOCK_FOR_INSN (insn)) != insn) 1294 return false; 1295 1296 return control_flow_insn_p (insn); 1297 } 1298 1299 /* Return latency of INSN. */ 1300 static int 1301 sel_insn_rtx_cost (rtx insn) 1302 { 1303 int cost; 1304 1305 /* A USE insn, or something else we don't need to 1306 understand. We can't pass these directly to 1307 result_ready_cost or insn_default_latency because it will 1308 trigger a fatal error for unrecognizable insns. */ 1309 if (recog_memoized (insn) < 0) 1310 cost = 0; 1311 else 1312 { 1313 cost = insn_default_latency (insn); 1314 1315 if (cost < 0) 1316 cost = 0; 1317 } 1318 1319 return cost; 1320 } 1321 1322 /* Return the cost of the VI. 1323 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */ 1324 int 1325 sel_vinsn_cost (vinsn_t vi) 1326 { 1327 int cost = vi->cost; 1328 1329 if (cost < 0) 1330 { 1331 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); 1332 vi->cost = cost; 1333 } 1334 1335 return cost; 1336 } 1337 1338 1339 /* Functions for insn emitting. */ 1340 1341 /* Emit new insn after AFTER based on PATTERN and initialize its data from 1342 EXPR and SEQNO. */ 1343 insn_t 1344 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) 1345 { 1346 insn_t new_insn; 1347 1348 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); 1349 1350 new_insn = emit_insn_after (pattern, after); 1351 set_insn_init (expr, NULL, seqno); 1352 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); 1353 1354 return new_insn; 1355 } 1356 1357 /* Force newly generated vinsns to be unique. */ 1358 static bool init_insn_force_unique_p = false; 1359 1360 /* Emit new speculation recovery insn after AFTER based on PATTERN and 1361 initialize its data from EXPR and SEQNO. */ 1362 insn_t 1363 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, 1364 insn_t after) 1365 { 1366 insn_t insn; 1367 1368 gcc_assert (!init_insn_force_unique_p); 1369 1370 init_insn_force_unique_p = true; 1371 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); 1372 CANT_MOVE (insn) = 1; 1373 init_insn_force_unique_p = false; 1374 1375 return insn; 1376 } 1377 1378 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, 1379 take it as a new vinsn instead of EXPR's vinsn. 1380 We simplify insns later, after scheduling region in 1381 simplify_changed_insns. */ 1382 insn_t 1383 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, 1384 insn_t after) 1385 { 1386 expr_t emit_expr; 1387 insn_t insn; 1388 int flags; 1389 1390 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), 1391 seqno); 1392 insn = EXPR_INSN_RTX (emit_expr); 1393 add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); 1394 1395 flags = INSN_INIT_TODO_SSID; 1396 if (INSN_LUID (insn) == 0) 1397 flags |= INSN_INIT_TODO_LUID; 1398 sel_init_new_insn (insn, flags); 1399 1400 return insn; 1401 } 1402 1403 /* Move insn from EXPR after AFTER. */ 1404 insn_t 1405 sel_move_insn (expr_t expr, int seqno, insn_t after) 1406 { 1407 insn_t insn = EXPR_INSN_RTX (expr); 1408 basic_block bb = BLOCK_FOR_INSN (after); 1409 insn_t next = NEXT_INSN (after); 1410 1411 /* Assert that in move_op we disconnected this insn properly. */ 1412 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); 1413 PREV_INSN (insn) = after; 1414 NEXT_INSN (insn) = next; 1415 1416 NEXT_INSN (after) = insn; 1417 PREV_INSN (next) = insn; 1418 1419 /* Update links from insn to bb and vice versa. */ 1420 df_insn_change_bb (insn, bb); 1421 if (BB_END (bb) == after) 1422 BB_END (bb) = insn; 1423 1424 prepare_insn_expr (insn, seqno); 1425 return insn; 1426 } 1427 1428 1429 /* Functions to work with right-hand sides. */ 1430 1431 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector 1432 VECT and return true when found. Use NEW_VINSN for comparison only when 1433 COMPARE_VINSNS is true. Write to INDP the index on which 1434 the search has stopped, such that inserting the new element at INDP will 1435 retain VECT's sort order. */ 1436 static bool 1437 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, 1438 unsigned uid, vinsn_t new_vinsn, 1439 bool compare_vinsns, int *indp) 1440 { 1441 expr_history_def *arr; 1442 int i, j, len = VEC_length (expr_history_def, vect); 1443 1444 if (len == 0) 1445 { 1446 *indp = 0; 1447 return false; 1448 } 1449 1450 arr = VEC_address (expr_history_def, vect); 1451 i = 0, j = len - 1; 1452 1453 while (i <= j) 1454 { 1455 unsigned auid = arr[i].uid; 1456 vinsn_t avinsn = arr[i].new_expr_vinsn; 1457 1458 if (auid == uid 1459 /* When undoing transformation on a bookkeeping copy, the new vinsn 1460 may not be exactly equal to the one that is saved in the vector. 1461 This is because the insn whose copy we're checking was possibly 1462 substituted itself. */ 1463 && (! compare_vinsns 1464 || vinsn_equal_p (avinsn, new_vinsn))) 1465 { 1466 *indp = i; 1467 return true; 1468 } 1469 else if (auid > uid) 1470 break; 1471 i++; 1472 } 1473 1474 *indp = i; 1475 return false; 1476 } 1477 1478 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return 1479 the position found or -1, if no such value is in vector. 1480 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ 1481 int 1482 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn, 1483 vinsn_t new_vinsn, bool originators_p) 1484 { 1485 int ind; 1486 1487 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, 1488 false, &ind)) 1489 return ind; 1490 1491 if (INSN_ORIGINATORS (insn) && originators_p) 1492 { 1493 unsigned uid; 1494 bitmap_iterator bi; 1495 1496 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) 1497 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) 1498 return ind; 1499 } 1500 1501 return -1; 1502 } 1503 1504 /* Insert new element in a sorted history vector pointed to by PVECT, 1505 if it is not there already. The element is searched using 1506 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save 1507 the history of a transformation. */ 1508 void 1509 insert_in_history_vect (VEC (expr_history_def, heap) **pvect, 1510 unsigned uid, enum local_trans_type type, 1511 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, 1512 ds_t spec_ds) 1513 { 1514 VEC(expr_history_def, heap) *vect = *pvect; 1515 expr_history_def temp; 1516 bool res; 1517 int ind; 1518 1519 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); 1520 1521 if (res) 1522 { 1523 expr_history_def *phist = VEC_index (expr_history_def, vect, ind); 1524 1525 /* It is possible that speculation types of expressions that were 1526 propagated through different paths will be different here. In this 1527 case, merge the status to get the correct check later. */ 1528 if (phist->spec_ds != spec_ds) 1529 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); 1530 return; 1531 } 1532 1533 temp.uid = uid; 1534 temp.old_expr_vinsn = old_expr_vinsn; 1535 temp.new_expr_vinsn = new_expr_vinsn; 1536 temp.spec_ds = spec_ds; 1537 temp.type = type; 1538 1539 vinsn_attach (old_expr_vinsn); 1540 vinsn_attach (new_expr_vinsn); 1541 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp); 1542 *pvect = vect; 1543 } 1544 1545 /* Free history vector PVECT. */ 1546 static void 1547 free_history_vect (VEC (expr_history_def, heap) **pvect) 1548 { 1549 unsigned i; 1550 expr_history_def *phist; 1551 1552 if (! *pvect) 1553 return; 1554 1555 for (i = 0; 1556 VEC_iterate (expr_history_def, *pvect, i, phist); 1557 i++) 1558 { 1559 vinsn_detach (phist->old_expr_vinsn); 1560 vinsn_detach (phist->new_expr_vinsn); 1561 } 1562 1563 VEC_free (expr_history_def, heap, *pvect); 1564 *pvect = NULL; 1565 } 1566 1567 /* Merge vector FROM to PVECT. */ 1568 static void 1569 merge_history_vect (VEC (expr_history_def, heap) **pvect, 1570 VEC (expr_history_def, heap) *from) 1571 { 1572 expr_history_def *phist; 1573 int i; 1574 1575 /* We keep this vector sorted. */ 1576 for (i = 0; VEC_iterate (expr_history_def, from, i, phist); i++) 1577 insert_in_history_vect (pvect, phist->uid, phist->type, 1578 phist->old_expr_vinsn, phist->new_expr_vinsn, 1579 phist->spec_ds); 1580 } 1581 1582 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ 1583 bool 1584 vinsn_equal_p (vinsn_t x, vinsn_t y) 1585 { 1586 rtx_equal_p_callback_function repcf; 1587 1588 if (x == y) 1589 return true; 1590 1591 if (VINSN_TYPE (x) != VINSN_TYPE (y)) 1592 return false; 1593 1594 if (VINSN_HASH (x) != VINSN_HASH (y)) 1595 return false; 1596 1597 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; 1598 if (VINSN_SEPARABLE_P (x)) 1599 { 1600 /* Compare RHSes of VINSNs. */ 1601 gcc_assert (VINSN_RHS (x)); 1602 gcc_assert (VINSN_RHS (y)); 1603 1604 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); 1605 } 1606 1607 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); 1608 } 1609 1610 1611 /* Functions for working with expressions. */ 1612 1613 /* Initialize EXPR. */ 1614 static void 1615 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, 1616 int sched_times, int orig_bb_index, ds_t spec_done_ds, 1617 ds_t spec_to_check_ds, int orig_sched_cycle, 1618 VEC(expr_history_def, heap) *history, signed char target_available, 1619 bool was_substituted, bool was_renamed, bool needs_spec_check_p, 1620 bool cant_move) 1621 { 1622 vinsn_attach (vi); 1623 1624 EXPR_VINSN (expr) = vi; 1625 EXPR_SPEC (expr) = spec; 1626 EXPR_USEFULNESS (expr) = use; 1627 EXPR_PRIORITY (expr) = priority; 1628 EXPR_PRIORITY_ADJ (expr) = 0; 1629 EXPR_SCHED_TIMES (expr) = sched_times; 1630 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; 1631 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; 1632 EXPR_SPEC_DONE_DS (expr) = spec_done_ds; 1633 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; 1634 1635 if (history) 1636 EXPR_HISTORY_OF_CHANGES (expr) = history; 1637 else 1638 EXPR_HISTORY_OF_CHANGES (expr) = NULL; 1639 1640 EXPR_TARGET_AVAILABLE (expr) = target_available; 1641 EXPR_WAS_SUBSTITUTED (expr) = was_substituted; 1642 EXPR_WAS_RENAMED (expr) = was_renamed; 1643 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; 1644 EXPR_CANT_MOVE (expr) = cant_move; 1645 } 1646 1647 /* Make a copy of the expr FROM into the expr TO. */ 1648 void 1649 copy_expr (expr_t to, expr_t from) 1650 { 1651 VEC(expr_history_def, heap) *temp = NULL; 1652 1653 if (EXPR_HISTORY_OF_CHANGES (from)) 1654 { 1655 unsigned i; 1656 expr_history_def *phist; 1657 1658 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from)); 1659 for (i = 0; 1660 VEC_iterate (expr_history_def, temp, i, phist); 1661 i++) 1662 { 1663 vinsn_attach (phist->old_expr_vinsn); 1664 vinsn_attach (phist->new_expr_vinsn); 1665 } 1666 } 1667 1668 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), 1669 EXPR_USEFULNESS (from), EXPR_PRIORITY (from), 1670 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), 1671 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 1672 EXPR_ORIG_SCHED_CYCLE (from), temp, 1673 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), 1674 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), 1675 EXPR_CANT_MOVE (from)); 1676 } 1677 1678 /* Same, but the final expr will not ever be in av sets, so don't copy 1679 "uninteresting" data such as bitmap cache. */ 1680 void 1681 copy_expr_onside (expr_t to, expr_t from) 1682 { 1683 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), 1684 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, 1685 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL, 1686 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), 1687 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), 1688 EXPR_CANT_MOVE (from)); 1689 } 1690 1691 /* Prepare the expr of INSN for scheduling. Used when moving insn and when 1692 initializing new insns. */ 1693 static void 1694 prepare_insn_expr (insn_t insn, int seqno) 1695 { 1696 expr_t expr = INSN_EXPR (insn); 1697 ds_t ds; 1698 1699 INSN_SEQNO (insn) = seqno; 1700 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); 1701 EXPR_SPEC (expr) = 0; 1702 EXPR_ORIG_SCHED_CYCLE (expr) = 0; 1703 EXPR_WAS_SUBSTITUTED (expr) = 0; 1704 EXPR_WAS_RENAMED (expr) = 0; 1705 EXPR_TARGET_AVAILABLE (expr) = 1; 1706 INSN_LIVE_VALID_P (insn) = false; 1707 1708 /* ??? If this expression is speculative, make its dependence 1709 as weak as possible. We can filter this expression later 1710 in process_spec_exprs, because we do not distinguish 1711 between the status we got during compute_av_set and the 1712 existing status. To be fixed. */ 1713 ds = EXPR_SPEC_DONE_DS (expr); 1714 if (ds) 1715 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); 1716 1717 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr)); 1718 } 1719 1720 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT 1721 is non-null when expressions are merged from different successors at 1722 a split point. */ 1723 static void 1724 update_target_availability (expr_t to, expr_t from, insn_t split_point) 1725 { 1726 if (EXPR_TARGET_AVAILABLE (to) < 0 1727 || EXPR_TARGET_AVAILABLE (from) < 0) 1728 EXPR_TARGET_AVAILABLE (to) = -1; 1729 else 1730 { 1731 /* We try to detect the case when one of the expressions 1732 can only be reached through another one. In this case, 1733 we can do better. */ 1734 if (split_point == NULL) 1735 { 1736 int toind, fromind; 1737 1738 toind = EXPR_ORIG_BB_INDEX (to); 1739 fromind = EXPR_ORIG_BB_INDEX (from); 1740 1741 if (toind && toind == fromind) 1742 /* Do nothing -- everything is done in 1743 merge_with_other_exprs. */ 1744 ; 1745 else 1746 EXPR_TARGET_AVAILABLE (to) = -1; 1747 } 1748 else if (EXPR_TARGET_AVAILABLE (from) == 0 1749 && EXPR_LHS (from) 1750 && REG_P (EXPR_LHS (from)) 1751 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) 1752 EXPR_TARGET_AVAILABLE (to) = -1; 1753 else 1754 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); 1755 } 1756 } 1757 1758 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT 1759 is non-null when expressions are merged from different successors at 1760 a split point. */ 1761 static void 1762 update_speculative_bits (expr_t to, expr_t from, insn_t split_point) 1763 { 1764 ds_t old_to_ds, old_from_ds; 1765 1766 old_to_ds = EXPR_SPEC_DONE_DS (to); 1767 old_from_ds = EXPR_SPEC_DONE_DS (from); 1768 1769 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); 1770 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); 1771 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); 1772 1773 /* When merging e.g. control & data speculative exprs, or a control 1774 speculative with a control&data speculative one, we really have 1775 to change vinsn too. Also, when speculative status is changed, 1776 we also need to record this as a transformation in expr's history. */ 1777 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) 1778 { 1779 old_to_ds = ds_get_speculation_types (old_to_ds); 1780 old_from_ds = ds_get_speculation_types (old_from_ds); 1781 1782 if (old_to_ds != old_from_ds) 1783 { 1784 ds_t record_ds; 1785 1786 /* When both expressions are speculative, we need to change 1787 the vinsn first. */ 1788 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) 1789 { 1790 int res; 1791 1792 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); 1793 gcc_assert (res >= 0); 1794 } 1795 1796 if (split_point != NULL) 1797 { 1798 /* Record the change with proper status. */ 1799 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; 1800 record_ds &= ~(old_to_ds & SPECULATIVE); 1801 record_ds &= ~(old_from_ds & SPECULATIVE); 1802 1803 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), 1804 INSN_UID (split_point), TRANS_SPECULATION, 1805 EXPR_VINSN (from), EXPR_VINSN (to), 1806 record_ds); 1807 } 1808 } 1809 } 1810 } 1811 1812 1813 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, 1814 this is done along different paths. */ 1815 void 1816 merge_expr_data (expr_t to, expr_t from, insn_t split_point) 1817 { 1818 /* Choose the maximum of the specs of merged exprs. This is required 1819 for correctness of bookkeeping. */ 1820 if (EXPR_SPEC (to) < EXPR_SPEC (from)) 1821 EXPR_SPEC (to) = EXPR_SPEC (from); 1822 1823 if (split_point) 1824 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); 1825 else 1826 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), 1827 EXPR_USEFULNESS (from)); 1828 1829 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) 1830 EXPR_PRIORITY (to) = EXPR_PRIORITY (from); 1831 1832 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from)) 1833 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from); 1834 1835 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) 1836 EXPR_ORIG_BB_INDEX (to) = 0; 1837 1838 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), 1839 EXPR_ORIG_SCHED_CYCLE (from)); 1840 1841 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); 1842 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); 1843 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); 1844 1845 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), 1846 EXPR_HISTORY_OF_CHANGES (from)); 1847 update_target_availability (to, from, split_point); 1848 update_speculative_bits (to, from, split_point); 1849 } 1850 1851 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal 1852 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions 1853 are merged from different successors at a split point. */ 1854 void 1855 merge_expr (expr_t to, expr_t from, insn_t split_point) 1856 { 1857 vinsn_t to_vi = EXPR_VINSN (to); 1858 vinsn_t from_vi = EXPR_VINSN (from); 1859 1860 gcc_assert (vinsn_equal_p (to_vi, from_vi)); 1861 1862 /* Make sure that speculative pattern is propagated into exprs that 1863 have non-speculative one. This will provide us with consistent 1864 speculative bits and speculative patterns inside expr. */ 1865 if ((EXPR_SPEC_DONE_DS (from) != 0 1866 && EXPR_SPEC_DONE_DS (to) == 0) 1867 /* Do likewise for volatile insns, so that we always retain 1868 the may_trap_p bit on the resulting expression. */ 1869 || (VINSN_MAY_TRAP_P (EXPR_VINSN (from)) 1870 && !VINSN_MAY_TRAP_P (EXPR_VINSN (to)))) 1871 change_vinsn_in_expr (to, EXPR_VINSN (from)); 1872 1873 merge_expr_data (to, from, split_point); 1874 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); 1875 } 1876 1877 /* Clear the information of this EXPR. */ 1878 void 1879 clear_expr (expr_t expr) 1880 { 1881 1882 vinsn_detach (EXPR_VINSN (expr)); 1883 EXPR_VINSN (expr) = NULL; 1884 1885 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr)); 1886 } 1887 1888 /* For a given LV_SET, mark EXPR having unavailable target register. */ 1889 static void 1890 set_unavailable_target_for_expr (expr_t expr, regset lv_set) 1891 { 1892 if (EXPR_SEPARABLE_P (expr)) 1893 { 1894 if (REG_P (EXPR_LHS (expr)) 1895 && register_unavailable_p (lv_set, EXPR_LHS (expr))) 1896 { 1897 /* If it's an insn like r1 = use (r1, ...), and it exists in 1898 different forms in each of the av_sets being merged, we can't say 1899 whether original destination register is available or not. 1900 However, this still works if destination register is not used 1901 in the original expression: if the branch at which LV_SET we're 1902 looking here is not actually 'other branch' in sense that same 1903 expression is available through it (but it can't be determined 1904 at computation stage because of transformations on one of the 1905 branches), it still won't affect the availability. 1906 Liveness of a register somewhere on a code motion path means 1907 it's either read somewhere on a codemotion path, live on 1908 'other' branch, live at the point immediately following 1909 the original operation, or is read by the original operation. 1910 The latter case is filtered out in the condition below. 1911 It still doesn't cover the case when register is defined and used 1912 somewhere within the code motion path, and in this case we could 1913 miss a unifying code motion along both branches using a renamed 1914 register, but it won't affect a code correctness since upon 1915 an actual code motion a bookkeeping code would be generated. */ 1916 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), 1917 EXPR_LHS (expr))) 1918 EXPR_TARGET_AVAILABLE (expr) = -1; 1919 else 1920 EXPR_TARGET_AVAILABLE (expr) = false; 1921 } 1922 } 1923 else 1924 { 1925 unsigned regno; 1926 reg_set_iterator rsi; 1927 1928 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), 1929 0, regno, rsi) 1930 if (bitmap_bit_p (lv_set, regno)) 1931 { 1932 EXPR_TARGET_AVAILABLE (expr) = false; 1933 break; 1934 } 1935 1936 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), 1937 0, regno, rsi) 1938 if (bitmap_bit_p (lv_set, regno)) 1939 { 1940 EXPR_TARGET_AVAILABLE (expr) = false; 1941 break; 1942 } 1943 } 1944 } 1945 1946 /* Try to make EXPR speculative. Return 1 when EXPR's pattern 1947 or dependence status have changed, 2 when also the target register 1948 became unavailable, 0 if nothing had to be changed. */ 1949 int 1950 speculate_expr (expr_t expr, ds_t ds) 1951 { 1952 int res; 1953 rtx orig_insn_rtx; 1954 rtx spec_pat; 1955 ds_t target_ds, current_ds; 1956 1957 /* Obtain the status we need to put on EXPR. */ 1958 target_ds = (ds & SPECULATIVE); 1959 current_ds = EXPR_SPEC_DONE_DS (expr); 1960 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); 1961 1962 orig_insn_rtx = EXPR_INSN_RTX (expr); 1963 1964 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); 1965 1966 switch (res) 1967 { 1968 case 0: 1969 EXPR_SPEC_DONE_DS (expr) = ds; 1970 return current_ds != ds ? 1 : 0; 1971 1972 case 1: 1973 { 1974 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX); 1975 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); 1976 1977 change_vinsn_in_expr (expr, spec_vinsn); 1978 EXPR_SPEC_DONE_DS (expr) = ds; 1979 EXPR_NEEDS_SPEC_CHECK_P (expr) = true; 1980 1981 /* Do not allow clobbering the address register of speculative 1982 insns. */ 1983 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), 1984 expr_dest_reg (expr))) 1985 { 1986 EXPR_TARGET_AVAILABLE (expr) = false; 1987 return 2; 1988 } 1989 1990 return 1; 1991 } 1992 1993 case -1: 1994 return -1; 1995 1996 default: 1997 gcc_unreachable (); 1998 return -1; 1999 } 2000 } 2001 2002 /* Return a destination register, if any, of EXPR. */ 2003 rtx 2004 expr_dest_reg (expr_t expr) 2005 { 2006 rtx dest = VINSN_LHS (EXPR_VINSN (expr)); 2007 2008 if (dest != NULL_RTX && REG_P (dest)) 2009 return dest; 2010 2011 return NULL_RTX; 2012 } 2013 2014 /* Returns the REGNO of the R's destination. */ 2015 unsigned 2016 expr_dest_regno (expr_t expr) 2017 { 2018 rtx dest = expr_dest_reg (expr); 2019 2020 gcc_assert (dest != NULL_RTX); 2021 return REGNO (dest); 2022 } 2023 2024 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in 2025 AV_SET having unavailable target register. */ 2026 void 2027 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) 2028 { 2029 expr_t expr; 2030 av_set_iterator avi; 2031 2032 FOR_EACH_EXPR (expr, avi, join_set) 2033 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) 2034 set_unavailable_target_for_expr (expr, lv_set); 2035 } 2036 2037 2038 /* Returns true if REG (at least partially) is present in REGS. */ 2039 bool 2040 register_unavailable_p (regset regs, rtx reg) 2041 { 2042 unsigned regno, end_regno; 2043 2044 regno = REGNO (reg); 2045 if (bitmap_bit_p (regs, regno)) 2046 return true; 2047 2048 end_regno = END_REGNO (reg); 2049 2050 while (++regno < end_regno) 2051 if (bitmap_bit_p (regs, regno)) 2052 return true; 2053 2054 return false; 2055 } 2056 2057 /* Av set functions. */ 2058 2059 /* Add a new element to av set SETP. 2060 Return the element added. */ 2061 static av_set_t 2062 av_set_add_element (av_set_t *setp) 2063 { 2064 /* Insert at the beginning of the list. */ 2065 _list_add (setp); 2066 return *setp; 2067 } 2068 2069 /* Add EXPR to SETP. */ 2070 void 2071 av_set_add (av_set_t *setp, expr_t expr) 2072 { 2073 av_set_t elem; 2074 2075 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); 2076 elem = av_set_add_element (setp); 2077 copy_expr (_AV_SET_EXPR (elem), expr); 2078 } 2079 2080 /* Same, but do not copy EXPR. */ 2081 static void 2082 av_set_add_nocopy (av_set_t *setp, expr_t expr) 2083 { 2084 av_set_t elem; 2085 2086 elem = av_set_add_element (setp); 2087 *_AV_SET_EXPR (elem) = *expr; 2088 } 2089 2090 /* Remove expr pointed to by IP from the av_set. */ 2091 void 2092 av_set_iter_remove (av_set_iterator *ip) 2093 { 2094 clear_expr (_AV_SET_EXPR (*ip->lp)); 2095 _list_iter_remove (ip); 2096 } 2097 2098 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the 2099 sense of vinsn_equal_p function. Return NULL if no such expr is 2100 in SET was found. */ 2101 expr_t 2102 av_set_lookup (av_set_t set, vinsn_t sought_vinsn) 2103 { 2104 expr_t expr; 2105 av_set_iterator i; 2106 2107 FOR_EACH_EXPR (expr, i, set) 2108 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) 2109 return expr; 2110 return NULL; 2111 } 2112 2113 /* Same, but also remove the EXPR found. */ 2114 static expr_t 2115 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) 2116 { 2117 expr_t expr; 2118 av_set_iterator i; 2119 2120 FOR_EACH_EXPR_1 (expr, i, setp) 2121 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) 2122 { 2123 _list_iter_remove_nofree (&i); 2124 return expr; 2125 } 2126 return NULL; 2127 } 2128 2129 /* Search for an expr in SET, such that it's equivalent to EXPR in the 2130 sense of vinsn_equal_p function of their vinsns, but not EXPR itself. 2131 Returns NULL if no such expr is in SET was found. */ 2132 static expr_t 2133 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) 2134 { 2135 expr_t cur_expr; 2136 av_set_iterator i; 2137 2138 FOR_EACH_EXPR (cur_expr, i, set) 2139 { 2140 if (cur_expr == expr) 2141 continue; 2142 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) 2143 return cur_expr; 2144 } 2145 2146 return NULL; 2147 } 2148 2149 /* If other expression is already in AVP, remove one of them. */ 2150 expr_t 2151 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) 2152 { 2153 expr_t expr2; 2154 2155 expr2 = av_set_lookup_other_equiv_expr (*avp, expr); 2156 if (expr2 != NULL) 2157 { 2158 /* Reset target availability on merge, since taking it only from one 2159 of the exprs would be controversial for different code. */ 2160 EXPR_TARGET_AVAILABLE (expr2) = -1; 2161 EXPR_USEFULNESS (expr2) = 0; 2162 2163 merge_expr (expr2, expr, NULL); 2164 2165 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ 2166 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; 2167 2168 av_set_iter_remove (ip); 2169 return expr2; 2170 } 2171 2172 return expr; 2173 } 2174 2175 /* Return true if there is an expr that correlates to VI in SET. */ 2176 bool 2177 av_set_is_in_p (av_set_t set, vinsn_t vi) 2178 { 2179 return av_set_lookup (set, vi) != NULL; 2180 } 2181 2182 /* Return a copy of SET. */ 2183 av_set_t 2184 av_set_copy (av_set_t set) 2185 { 2186 expr_t expr; 2187 av_set_iterator i; 2188 av_set_t res = NULL; 2189 2190 FOR_EACH_EXPR (expr, i, set) 2191 av_set_add (&res, expr); 2192 2193 return res; 2194 } 2195 2196 /* Join two av sets that do not have common elements by attaching second set 2197 (pointed to by FROMP) to the end of first set (TO_TAILP must point to 2198 _AV_SET_NEXT of first set's last element). */ 2199 static void 2200 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) 2201 { 2202 gcc_assert (*to_tailp == NULL); 2203 *to_tailp = *fromp; 2204 *fromp = NULL; 2205 } 2206 2207 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set 2208 pointed to by FROMP afterwards. */ 2209 void 2210 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) 2211 { 2212 expr_t expr1; 2213 av_set_iterator i; 2214 2215 /* Delete from TOP all exprs, that present in FROMP. */ 2216 FOR_EACH_EXPR_1 (expr1, i, top) 2217 { 2218 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); 2219 2220 if (expr2) 2221 { 2222 merge_expr (expr2, expr1, insn); 2223 av_set_iter_remove (&i); 2224 } 2225 } 2226 2227 join_distinct_sets (i.lp, fromp); 2228 } 2229 2230 /* Same as above, but also update availability of target register in 2231 TOP judging by TO_LV_SET and FROM_LV_SET. */ 2232 void 2233 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, 2234 regset from_lv_set, insn_t insn) 2235 { 2236 expr_t expr1; 2237 av_set_iterator i; 2238 av_set_t *to_tailp, in_both_set = NULL; 2239 2240 /* Delete from TOP all expres, that present in FROMP. */ 2241 FOR_EACH_EXPR_1 (expr1, i, top) 2242 { 2243 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); 2244 2245 if (expr2) 2246 { 2247 /* It may be that the expressions have different destination 2248 registers, in which case we need to check liveness here. */ 2249 if (EXPR_SEPARABLE_P (expr1)) 2250 { 2251 int regno1 = (REG_P (EXPR_LHS (expr1)) 2252 ? (int) expr_dest_regno (expr1) : -1); 2253 int regno2 = (REG_P (EXPR_LHS (expr2)) 2254 ? (int) expr_dest_regno (expr2) : -1); 2255 2256 /* ??? We don't have a way to check restrictions for 2257 *other* register on the current path, we did it only 2258 for the current target register. Give up. */ 2259 if (regno1 != regno2) 2260 EXPR_TARGET_AVAILABLE (expr2) = -1; 2261 } 2262 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) 2263 EXPR_TARGET_AVAILABLE (expr2) = -1; 2264 2265 merge_expr (expr2, expr1, insn); 2266 av_set_add_nocopy (&in_both_set, expr2); 2267 av_set_iter_remove (&i); 2268 } 2269 else 2270 /* EXPR1 is present in TOP, but not in FROMP. Check it on 2271 FROM_LV_SET. */ 2272 set_unavailable_target_for_expr (expr1, from_lv_set); 2273 } 2274 to_tailp = i.lp; 2275 2276 /* These expressions are not present in TOP. Check liveness 2277 restrictions on TO_LV_SET. */ 2278 FOR_EACH_EXPR (expr1, i, *fromp) 2279 set_unavailable_target_for_expr (expr1, to_lv_set); 2280 2281 join_distinct_sets (i.lp, &in_both_set); 2282 join_distinct_sets (to_tailp, fromp); 2283 } 2284 2285 /* Clear av_set pointed to by SETP. */ 2286 void 2287 av_set_clear (av_set_t *setp) 2288 { 2289 expr_t expr; 2290 av_set_iterator i; 2291 2292 FOR_EACH_EXPR_1 (expr, i, setp) 2293 av_set_iter_remove (&i); 2294 2295 gcc_assert (*setp == NULL); 2296 } 2297 2298 /* Leave only one non-speculative element in the SETP. */ 2299 void 2300 av_set_leave_one_nonspec (av_set_t *setp) 2301 { 2302 expr_t expr; 2303 av_set_iterator i; 2304 bool has_one_nonspec = false; 2305 2306 /* Keep all speculative exprs, and leave one non-speculative 2307 (the first one). */ 2308 FOR_EACH_EXPR_1 (expr, i, setp) 2309 { 2310 if (!EXPR_SPEC_DONE_DS (expr)) 2311 { 2312 if (has_one_nonspec) 2313 av_set_iter_remove (&i); 2314 else 2315 has_one_nonspec = true; 2316 } 2317 } 2318 } 2319 2320 /* Return the N'th element of the SET. */ 2321 expr_t 2322 av_set_element (av_set_t set, int n) 2323 { 2324 expr_t expr; 2325 av_set_iterator i; 2326 2327 FOR_EACH_EXPR (expr, i, set) 2328 if (n-- == 0) 2329 return expr; 2330 2331 gcc_unreachable (); 2332 return NULL; 2333 } 2334 2335 /* Deletes all expressions from AVP that are conditional branches (IFs). */ 2336 void 2337 av_set_substract_cond_branches (av_set_t *avp) 2338 { 2339 av_set_iterator i; 2340 expr_t expr; 2341 2342 FOR_EACH_EXPR_1 (expr, i, avp) 2343 if (vinsn_cond_branch_p (EXPR_VINSN (expr))) 2344 av_set_iter_remove (&i); 2345 } 2346 2347 /* Multiplies usefulness attribute of each member of av-set *AVP by 2348 value PROB / ALL_PROB. */ 2349 void 2350 av_set_split_usefulness (av_set_t av, int prob, int all_prob) 2351 { 2352 av_set_iterator i; 2353 expr_t expr; 2354 2355 FOR_EACH_EXPR (expr, i, av) 2356 EXPR_USEFULNESS (expr) = (all_prob 2357 ? (EXPR_USEFULNESS (expr) * prob) / all_prob 2358 : 0); 2359 } 2360 2361 /* Leave in AVP only those expressions, which are present in AV, 2362 and return it, merging history expressions. */ 2363 void 2364 av_set_code_motion_filter (av_set_t *avp, av_set_t av) 2365 { 2366 av_set_iterator i; 2367 expr_t expr, expr2; 2368 2369 FOR_EACH_EXPR_1 (expr, i, avp) 2370 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) 2371 av_set_iter_remove (&i); 2372 else 2373 /* When updating av sets in bookkeeping blocks, we can add more insns 2374 there which will be transformed but the upper av sets will not 2375 reflect those transformations. We then fail to undo those 2376 when searching for such insns. So merge the history saved 2377 in the av set of the block we are processing. */ 2378 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), 2379 EXPR_HISTORY_OF_CHANGES (expr2)); 2380 } 2381 2382 2383 2384 /* Dependence hooks to initialize insn data. */ 2385 2386 /* This is used in hooks callable from dependence analysis when initializing 2387 instruction's data. */ 2388 static struct 2389 { 2390 /* Where the dependence was found (lhs/rhs). */ 2391 deps_where_t where; 2392 2393 /* The actual data object to initialize. */ 2394 idata_t id; 2395 2396 /* True when the insn should not be made clonable. */ 2397 bool force_unique_p; 2398 2399 /* True when insn should be treated as of type USE, i.e. never renamed. */ 2400 bool force_use_p; 2401 } deps_init_id_data; 2402 2403 2404 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be 2405 clonable. */ 2406 static void 2407 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) 2408 { 2409 int type; 2410 2411 /* Determine whether INSN could be cloned and return appropriate vinsn type. 2412 That clonable insns which can be separated into lhs and rhs have type SET. 2413 Other clonable insns have type USE. */ 2414 type = GET_CODE (insn); 2415 2416 /* Only regular insns could be cloned. */ 2417 if (type == INSN && !force_unique_p) 2418 type = SET; 2419 else if (type == JUMP_INSN && simplejump_p (insn)) 2420 type = PC; 2421 else if (type == DEBUG_INSN) 2422 type = !force_unique_p ? USE : INSN; 2423 2424 IDATA_TYPE (id) = type; 2425 IDATA_REG_SETS (id) = get_clear_regset_from_pool (); 2426 IDATA_REG_USES (id) = get_clear_regset_from_pool (); 2427 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); 2428 } 2429 2430 /* Start initializing insn data. */ 2431 static void 2432 deps_init_id_start_insn (insn_t insn) 2433 { 2434 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); 2435 2436 setup_id_for_insn (deps_init_id_data.id, insn, 2437 deps_init_id_data.force_unique_p); 2438 deps_init_id_data.where = DEPS_IN_INSN; 2439 } 2440 2441 /* Start initializing lhs data. */ 2442 static void 2443 deps_init_id_start_lhs (rtx lhs) 2444 { 2445 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); 2446 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); 2447 2448 if (IDATA_TYPE (deps_init_id_data.id) == SET) 2449 { 2450 IDATA_LHS (deps_init_id_data.id) = lhs; 2451 deps_init_id_data.where = DEPS_IN_LHS; 2452 } 2453 } 2454 2455 /* Finish initializing lhs data. */ 2456 static void 2457 deps_init_id_finish_lhs (void) 2458 { 2459 deps_init_id_data.where = DEPS_IN_INSN; 2460 } 2461 2462 /* Note a set of REGNO. */ 2463 static void 2464 deps_init_id_note_reg_set (int regno) 2465 { 2466 haifa_note_reg_set (regno); 2467 2468 if (deps_init_id_data.where == DEPS_IN_RHS) 2469 deps_init_id_data.force_use_p = true; 2470 2471 if (IDATA_TYPE (deps_init_id_data.id) != PC) 2472 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); 2473 2474 #ifdef STACK_REGS 2475 /* Make instructions that set stack registers to be ineligible for 2476 renaming to avoid issues with find_used_regs. */ 2477 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) 2478 deps_init_id_data.force_use_p = true; 2479 #endif 2480 } 2481 2482 /* Note a clobber of REGNO. */ 2483 static void 2484 deps_init_id_note_reg_clobber (int regno) 2485 { 2486 haifa_note_reg_clobber (regno); 2487 2488 if (deps_init_id_data.where == DEPS_IN_RHS) 2489 deps_init_id_data.force_use_p = true; 2490 2491 if (IDATA_TYPE (deps_init_id_data.id) != PC) 2492 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); 2493 } 2494 2495 /* Note a use of REGNO. */ 2496 static void 2497 deps_init_id_note_reg_use (int regno) 2498 { 2499 haifa_note_reg_use (regno); 2500 2501 if (IDATA_TYPE (deps_init_id_data.id) != PC) 2502 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); 2503 } 2504 2505 /* Start initializing rhs data. */ 2506 static void 2507 deps_init_id_start_rhs (rtx rhs) 2508 { 2509 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); 2510 2511 /* And there was no sel_deps_reset_to_insn (). */ 2512 if (IDATA_LHS (deps_init_id_data.id) != NULL) 2513 { 2514 IDATA_RHS (deps_init_id_data.id) = rhs; 2515 deps_init_id_data.where = DEPS_IN_RHS; 2516 } 2517 } 2518 2519 /* Finish initializing rhs data. */ 2520 static void 2521 deps_init_id_finish_rhs (void) 2522 { 2523 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS 2524 || deps_init_id_data.where == DEPS_IN_INSN); 2525 deps_init_id_data.where = DEPS_IN_INSN; 2526 } 2527 2528 /* Finish initializing insn data. */ 2529 static void 2530 deps_init_id_finish_insn (void) 2531 { 2532 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); 2533 2534 if (IDATA_TYPE (deps_init_id_data.id) == SET) 2535 { 2536 rtx lhs = IDATA_LHS (deps_init_id_data.id); 2537 rtx rhs = IDATA_RHS (deps_init_id_data.id); 2538 2539 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) 2540 || deps_init_id_data.force_use_p) 2541 { 2542 /* This should be a USE, as we don't want to schedule its RHS 2543 separately. However, we still want to have them recorded 2544 for the purposes of substitution. That's why we don't 2545 simply call downgrade_to_use () here. */ 2546 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); 2547 gcc_assert (!lhs == !rhs); 2548 2549 IDATA_TYPE (deps_init_id_data.id) = USE; 2550 } 2551 } 2552 2553 deps_init_id_data.where = DEPS_IN_NOWHERE; 2554 } 2555 2556 /* This is dependence info used for initializing insn's data. */ 2557 static struct sched_deps_info_def deps_init_id_sched_deps_info; 2558 2559 /* This initializes most of the static part of the above structure. */ 2560 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = 2561 { 2562 NULL, 2563 2564 deps_init_id_start_insn, 2565 deps_init_id_finish_insn, 2566 deps_init_id_start_lhs, 2567 deps_init_id_finish_lhs, 2568 deps_init_id_start_rhs, 2569 deps_init_id_finish_rhs, 2570 deps_init_id_note_reg_set, 2571 deps_init_id_note_reg_clobber, 2572 deps_init_id_note_reg_use, 2573 NULL, /* note_mem_dep */ 2574 NULL, /* note_dep */ 2575 2576 0, /* use_cselib */ 2577 0, /* use_deps_list */ 2578 0 /* generate_spec_deps */ 2579 }; 2580 2581 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, 2582 we don't actually need information about lhs and rhs. */ 2583 static void 2584 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) 2585 { 2586 rtx pat = PATTERN (insn); 2587 2588 if (NONJUMP_INSN_P (insn) 2589 && GET_CODE (pat) == SET 2590 && !force_unique_p) 2591 { 2592 IDATA_RHS (id) = SET_SRC (pat); 2593 IDATA_LHS (id) = SET_DEST (pat); 2594 } 2595 else 2596 IDATA_LHS (id) = IDATA_RHS (id) = NULL; 2597 } 2598 2599 /* Possibly downgrade INSN to USE. */ 2600 static void 2601 maybe_downgrade_id_to_use (idata_t id, insn_t insn) 2602 { 2603 bool must_be_use = false; 2604 unsigned uid = INSN_UID (insn); 2605 df_ref *rec; 2606 rtx lhs = IDATA_LHS (id); 2607 rtx rhs = IDATA_RHS (id); 2608 2609 /* We downgrade only SETs. */ 2610 if (IDATA_TYPE (id) != SET) 2611 return; 2612 2613 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) 2614 { 2615 IDATA_TYPE (id) = USE; 2616 return; 2617 } 2618 2619 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++) 2620 { 2621 df_ref def = *rec; 2622 2623 if (DF_REF_INSN (def) 2624 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) 2625 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) 2626 { 2627 must_be_use = true; 2628 break; 2629 } 2630 2631 #ifdef STACK_REGS 2632 /* Make instructions that set stack registers to be ineligible for 2633 renaming to avoid issues with find_used_regs. */ 2634 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) 2635 { 2636 must_be_use = true; 2637 break; 2638 } 2639 #endif 2640 } 2641 2642 if (must_be_use) 2643 IDATA_TYPE (id) = USE; 2644 } 2645 2646 /* Setup register sets describing INSN in ID. */ 2647 static void 2648 setup_id_reg_sets (idata_t id, insn_t insn) 2649 { 2650 unsigned uid = INSN_UID (insn); 2651 df_ref *rec; 2652 regset tmp = get_clear_regset_from_pool (); 2653 2654 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++) 2655 { 2656 df_ref def = *rec; 2657 unsigned int regno = DF_REF_REGNO (def); 2658 2659 /* Post modifies are treated like clobbers by sched-deps.c. */ 2660 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER 2661 | DF_REF_PRE_POST_MODIFY))) 2662 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); 2663 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) 2664 { 2665 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); 2666 2667 #ifdef STACK_REGS 2668 /* For stack registers, treat writes to them as writes 2669 to the first one to be consistent with sched-deps.c. */ 2670 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) 2671 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); 2672 #endif 2673 } 2674 /* Mark special refs that generate read/write def pair. */ 2675 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) 2676 || regno == STACK_POINTER_REGNUM) 2677 bitmap_set_bit (tmp, regno); 2678 } 2679 2680 for (rec = DF_INSN_UID_USES (uid); *rec; rec++) 2681 { 2682 df_ref use = *rec; 2683 unsigned int regno = DF_REF_REGNO (use); 2684 2685 /* When these refs are met for the first time, skip them, as 2686 these uses are just counterparts of some defs. */ 2687 if (bitmap_bit_p (tmp, regno)) 2688 bitmap_clear_bit (tmp, regno); 2689 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) 2690 { 2691 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); 2692 2693 #ifdef STACK_REGS 2694 /* For stack registers, treat reads from them as reads from 2695 the first one to be consistent with sched-deps.c. */ 2696 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) 2697 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); 2698 #endif 2699 } 2700 } 2701 2702 return_regset_to_pool (tmp); 2703 } 2704 2705 /* Initialize instruction data for INSN in ID using DF's data. */ 2706 static void 2707 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) 2708 { 2709 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); 2710 2711 setup_id_for_insn (id, insn, force_unique_p); 2712 setup_id_lhs_rhs (id, insn, force_unique_p); 2713 2714 if (INSN_NOP_P (insn)) 2715 return; 2716 2717 maybe_downgrade_id_to_use (id, insn); 2718 setup_id_reg_sets (id, insn); 2719 } 2720 2721 /* Initialize instruction data for INSN in ID. */ 2722 static void 2723 deps_init_id (idata_t id, insn_t insn, bool force_unique_p) 2724 { 2725 struct deps_desc _dc, *dc = &_dc; 2726 2727 deps_init_id_data.where = DEPS_IN_NOWHERE; 2728 deps_init_id_data.id = id; 2729 deps_init_id_data.force_unique_p = force_unique_p; 2730 deps_init_id_data.force_use_p = false; 2731 2732 init_deps (dc, false); 2733 2734 memcpy (&deps_init_id_sched_deps_info, 2735 &const_deps_init_id_sched_deps_info, 2736 sizeof (deps_init_id_sched_deps_info)); 2737 2738 if (spec_info != NULL) 2739 deps_init_id_sched_deps_info.generate_spec_deps = 1; 2740 2741 sched_deps_info = &deps_init_id_sched_deps_info; 2742 2743 deps_analyze_insn (dc, insn); 2744 2745 free_deps (dc); 2746 2747 deps_init_id_data.id = NULL; 2748 } 2749 2750 2751 struct sched_scan_info_def 2752 { 2753 /* This hook notifies scheduler frontend to extend its internal per basic 2754 block data structures. This hook should be called once before a series of 2755 calls to bb_init (). */ 2756 void (*extend_bb) (void); 2757 2758 /* This hook makes scheduler frontend to initialize its internal data 2759 structures for the passed basic block. */ 2760 void (*init_bb) (basic_block); 2761 2762 /* This hook notifies scheduler frontend to extend its internal per insn data 2763 structures. This hook should be called once before a series of calls to 2764 insn_init (). */ 2765 void (*extend_insn) (void); 2766 2767 /* This hook makes scheduler frontend to initialize its internal data 2768 structures for the passed insn. */ 2769 void (*init_insn) (rtx); 2770 }; 2771 2772 /* A driver function to add a set of basic blocks (BBS) to the 2773 scheduling region. */ 2774 static void 2775 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) 2776 { 2777 unsigned i; 2778 basic_block bb; 2779 2780 if (ssi->extend_bb) 2781 ssi->extend_bb (); 2782 2783 if (ssi->init_bb) 2784 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb) 2785 ssi->init_bb (bb); 2786 2787 if (ssi->extend_insn) 2788 ssi->extend_insn (); 2789 2790 if (ssi->init_insn) 2791 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb) 2792 { 2793 rtx insn; 2794 2795 FOR_BB_INSNS (bb, insn) 2796 ssi->init_insn (insn); 2797 } 2798 } 2799 2800 /* Implement hooks for collecting fundamental insn properties like if insn is 2801 an ASM or is within a SCHED_GROUP. */ 2802 2803 /* True when a "one-time init" data for INSN was already inited. */ 2804 static bool 2805 first_time_insn_init (insn_t insn) 2806 { 2807 return INSN_LIVE (insn) == NULL; 2808 } 2809 2810 /* Hash an entry in a transformed_insns hashtable. */ 2811 static hashval_t 2812 hash_transformed_insns (const void *p) 2813 { 2814 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); 2815 } 2816 2817 /* Compare the entries in a transformed_insns hashtable. */ 2818 static int 2819 eq_transformed_insns (const void *p, const void *q) 2820 { 2821 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); 2822 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); 2823 2824 if (INSN_UID (i1) == INSN_UID (i2)) 2825 return 1; 2826 return rtx_equal_p (PATTERN (i1), PATTERN (i2)); 2827 } 2828 2829 /* Free an entry in a transformed_insns hashtable. */ 2830 static void 2831 free_transformed_insns (void *p) 2832 { 2833 struct transformed_insns *pti = (struct transformed_insns *) p; 2834 2835 vinsn_detach (pti->vinsn_old); 2836 vinsn_detach (pti->vinsn_new); 2837 free (pti); 2838 } 2839 2840 /* Init the s_i_d data for INSN which should be inited just once, when 2841 we first see the insn. */ 2842 static void 2843 init_first_time_insn_data (insn_t insn) 2844 { 2845 /* This should not be set if this is the first time we init data for 2846 insn. */ 2847 gcc_assert (first_time_insn_init (insn)); 2848 2849 /* These are needed for nops too. */ 2850 INSN_LIVE (insn) = get_regset_from_pool (); 2851 INSN_LIVE_VALID_P (insn) = false; 2852 2853 if (!INSN_NOP_P (insn)) 2854 { 2855 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); 2856 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); 2857 INSN_TRANSFORMED_INSNS (insn) 2858 = htab_create (16, hash_transformed_insns, 2859 eq_transformed_insns, free_transformed_insns); 2860 init_deps (&INSN_DEPS_CONTEXT (insn), true); 2861 } 2862 } 2863 2864 /* Free almost all above data for INSN that is scheduled already. 2865 Used for extra-large basic blocks. */ 2866 void 2867 free_data_for_scheduled_insn (insn_t insn) 2868 { 2869 gcc_assert (! first_time_insn_init (insn)); 2870 2871 if (! INSN_ANALYZED_DEPS (insn)) 2872 return; 2873 2874 BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); 2875 BITMAP_FREE (INSN_FOUND_DEPS (insn)); 2876 htab_delete (INSN_TRANSFORMED_INSNS (insn)); 2877 2878 /* This is allocated only for bookkeeping insns. */ 2879 if (INSN_ORIGINATORS (insn)) 2880 BITMAP_FREE (INSN_ORIGINATORS (insn)); 2881 free_deps (&INSN_DEPS_CONTEXT (insn)); 2882 2883 INSN_ANALYZED_DEPS (insn) = NULL; 2884 2885 /* Clear the readonly flag so we would ICE when trying to recalculate 2886 the deps context (as we believe that it should not happen). */ 2887 (&INSN_DEPS_CONTEXT (insn))->readonly = 0; 2888 } 2889 2890 /* Free the same data as above for INSN. */ 2891 static void 2892 free_first_time_insn_data (insn_t insn) 2893 { 2894 gcc_assert (! first_time_insn_init (insn)); 2895 2896 free_data_for_scheduled_insn (insn); 2897 return_regset_to_pool (INSN_LIVE (insn)); 2898 INSN_LIVE (insn) = NULL; 2899 INSN_LIVE_VALID_P (insn) = false; 2900 } 2901 2902 /* Initialize region-scope data structures for basic blocks. */ 2903 static void 2904 init_global_and_expr_for_bb (basic_block bb) 2905 { 2906 if (sel_bb_empty_p (bb)) 2907 return; 2908 2909 invalidate_av_set (bb); 2910 } 2911 2912 /* Data for global dependency analysis (to initialize CANT_MOVE and 2913 SCHED_GROUP_P). */ 2914 static struct 2915 { 2916 /* Previous insn. */ 2917 insn_t prev_insn; 2918 } init_global_data; 2919 2920 /* Determine if INSN is in the sched_group, is an asm or should not be 2921 cloned. After that initialize its expr. */ 2922 static void 2923 init_global_and_expr_for_insn (insn_t insn) 2924 { 2925 if (LABEL_P (insn)) 2926 return; 2927 2928 if (NOTE_INSN_BASIC_BLOCK_P (insn)) 2929 { 2930 init_global_data.prev_insn = NULL_RTX; 2931 return; 2932 } 2933 2934 gcc_assert (INSN_P (insn)); 2935 2936 if (SCHED_GROUP_P (insn)) 2937 /* Setup a sched_group. */ 2938 { 2939 insn_t prev_insn = init_global_data.prev_insn; 2940 2941 if (prev_insn) 2942 INSN_SCHED_NEXT (prev_insn) = insn; 2943 2944 init_global_data.prev_insn = insn; 2945 } 2946 else 2947 init_global_data.prev_insn = NULL_RTX; 2948 2949 if (GET_CODE (PATTERN (insn)) == ASM_INPUT 2950 || asm_noperands (PATTERN (insn)) >= 0) 2951 /* Mark INSN as an asm. */ 2952 INSN_ASM_P (insn) = true; 2953 2954 { 2955 bool force_unique_p; 2956 ds_t spec_done_ds; 2957 2958 /* Certain instructions cannot be cloned, and frame related insns and 2959 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of 2960 their block. */ 2961 if (prologue_epilogue_contains (insn)) 2962 { 2963 if (RTX_FRAME_RELATED_P (insn)) 2964 CANT_MOVE (insn) = 1; 2965 else 2966 { 2967 rtx note; 2968 for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) 2969 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE 2970 && ((enum insn_note) INTVAL (XEXP (note, 0)) 2971 == NOTE_INSN_EPILOGUE_BEG)) 2972 { 2973 CANT_MOVE (insn) = 1; 2974 break; 2975 } 2976 } 2977 force_unique_p = true; 2978 } 2979 else 2980 if (CANT_MOVE (insn) 2981 || INSN_ASM_P (insn) 2982 || SCHED_GROUP_P (insn) 2983 || CALL_P (insn) 2984 /* Exception handling insns are always unique. */ 2985 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) 2986 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ 2987 || control_flow_insn_p (insn) 2988 || volatile_insn_p (PATTERN (insn)) 2989 || (targetm.cannot_copy_insn_p 2990 && targetm.cannot_copy_insn_p (insn))) 2991 force_unique_p = true; 2992 else 2993 force_unique_p = false; 2994 2995 if (targetm.sched.get_insn_spec_ds) 2996 { 2997 spec_done_ds = targetm.sched.get_insn_spec_ds (insn); 2998 spec_done_ds = ds_get_max_dep_weak (spec_done_ds); 2999 } 3000 else 3001 spec_done_ds = 0; 3002 3003 /* Initialize INSN's expr. */ 3004 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, 3005 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), 3006 spec_done_ds, 0, 0, NULL, true, false, false, false, 3007 CANT_MOVE (insn)); 3008 } 3009 3010 init_first_time_insn_data (insn); 3011 } 3012 3013 /* Scan the region and initialize instruction data for basic blocks BBS. */ 3014 void 3015 sel_init_global_and_expr (bb_vec_t bbs) 3016 { 3017 /* ??? It would be nice to implement push / pop scheme for sched_infos. */ 3018 const struct sched_scan_info_def ssi = 3019 { 3020 NULL, /* extend_bb */ 3021 init_global_and_expr_for_bb, /* init_bb */ 3022 extend_insn_data, /* extend_insn */ 3023 init_global_and_expr_for_insn /* init_insn */ 3024 }; 3025 3026 sched_scan (&ssi, bbs); 3027 } 3028 3029 /* Finalize region-scope data structures for basic blocks. */ 3030 static void 3031 finish_global_and_expr_for_bb (basic_block bb) 3032 { 3033 av_set_clear (&BB_AV_SET (bb)); 3034 BB_AV_LEVEL (bb) = 0; 3035 } 3036 3037 /* Finalize INSN's data. */ 3038 static void 3039 finish_global_and_expr_insn (insn_t insn) 3040 { 3041 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) 3042 return; 3043 3044 gcc_assert (INSN_P (insn)); 3045 3046 if (INSN_LUID (insn) > 0) 3047 { 3048 free_first_time_insn_data (insn); 3049 INSN_WS_LEVEL (insn) = 0; 3050 CANT_MOVE (insn) = 0; 3051 3052 /* We can no longer assert this, as vinsns of this insn could be 3053 easily live in other insn's caches. This should be changed to 3054 a counter-like approach among all vinsns. */ 3055 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); 3056 clear_expr (INSN_EXPR (insn)); 3057 } 3058 } 3059 3060 /* Finalize per instruction data for the whole region. */ 3061 void 3062 sel_finish_global_and_expr (void) 3063 { 3064 { 3065 bb_vec_t bbs; 3066 int i; 3067 3068 bbs = VEC_alloc (basic_block, heap, current_nr_blocks); 3069 3070 for (i = 0; i < current_nr_blocks; i++) 3071 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i))); 3072 3073 /* Clear AV_SETs and INSN_EXPRs. */ 3074 { 3075 const struct sched_scan_info_def ssi = 3076 { 3077 NULL, /* extend_bb */ 3078 finish_global_and_expr_for_bb, /* init_bb */ 3079 NULL, /* extend_insn */ 3080 finish_global_and_expr_insn /* init_insn */ 3081 }; 3082 3083 sched_scan (&ssi, bbs); 3084 } 3085 3086 VEC_free (basic_block, heap, bbs); 3087 } 3088 3089 finish_insns (); 3090 } 3091 3092 3093 /* In the below hooks, we merely calculate whether or not a dependence 3094 exists, and in what part of insn. However, we will need more data 3095 when we'll start caching dependence requests. */ 3096 3097 /* Container to hold information for dependency analysis. */ 3098 static struct 3099 { 3100 deps_t dc; 3101 3102 /* A variable to track which part of rtx we are scanning in 3103 sched-deps.c: sched_analyze_insn (). */ 3104 deps_where_t where; 3105 3106 /* Current producer. */ 3107 insn_t pro; 3108 3109 /* Current consumer. */ 3110 vinsn_t con; 3111 3112 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. 3113 X is from { INSN, LHS, RHS }. */ 3114 ds_t has_dep_p[DEPS_IN_NOWHERE]; 3115 } has_dependence_data; 3116 3117 /* Start analyzing dependencies of INSN. */ 3118 static void 3119 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) 3120 { 3121 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); 3122 3123 has_dependence_data.where = DEPS_IN_INSN; 3124 } 3125 3126 /* Finish analyzing dependencies of an insn. */ 3127 static void 3128 has_dependence_finish_insn (void) 3129 { 3130 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3131 3132 has_dependence_data.where = DEPS_IN_NOWHERE; 3133 } 3134 3135 /* Start analyzing dependencies of LHS. */ 3136 static void 3137 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) 3138 { 3139 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3140 3141 if (VINSN_LHS (has_dependence_data.con) != NULL) 3142 has_dependence_data.where = DEPS_IN_LHS; 3143 } 3144 3145 /* Finish analyzing dependencies of an lhs. */ 3146 static void 3147 has_dependence_finish_lhs (void) 3148 { 3149 has_dependence_data.where = DEPS_IN_INSN; 3150 } 3151 3152 /* Start analyzing dependencies of RHS. */ 3153 static void 3154 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) 3155 { 3156 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3157 3158 if (VINSN_RHS (has_dependence_data.con) != NULL) 3159 has_dependence_data.where = DEPS_IN_RHS; 3160 } 3161 3162 /* Start analyzing dependencies of an rhs. */ 3163 static void 3164 has_dependence_finish_rhs (void) 3165 { 3166 gcc_assert (has_dependence_data.where == DEPS_IN_RHS 3167 || has_dependence_data.where == DEPS_IN_INSN); 3168 3169 has_dependence_data.where = DEPS_IN_INSN; 3170 } 3171 3172 /* Note a set of REGNO. */ 3173 static void 3174 has_dependence_note_reg_set (int regno) 3175 { 3176 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; 3177 3178 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3179 VINSN_INSN_RTX 3180 (has_dependence_data.con))) 3181 { 3182 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3183 3184 if (reg_last->sets != NULL 3185 || reg_last->clobbers != NULL) 3186 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; 3187 3188 if (reg_last->uses) 3189 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; 3190 } 3191 } 3192 3193 /* Note a clobber of REGNO. */ 3194 static void 3195 has_dependence_note_reg_clobber (int regno) 3196 { 3197 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; 3198 3199 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3200 VINSN_INSN_RTX 3201 (has_dependence_data.con))) 3202 { 3203 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3204 3205 if (reg_last->sets) 3206 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; 3207 3208 if (reg_last->uses) 3209 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; 3210 } 3211 } 3212 3213 /* Note a use of REGNO. */ 3214 static void 3215 has_dependence_note_reg_use (int regno) 3216 { 3217 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; 3218 3219 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3220 VINSN_INSN_RTX 3221 (has_dependence_data.con))) 3222 { 3223 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3224 3225 if (reg_last->sets) 3226 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; 3227 3228 if (reg_last->clobbers) 3229 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; 3230 3231 /* Merge BE_IN_SPEC bits into *DSP when the dependency producer 3232 is actually a check insn. We need to do this for any register 3233 read-read dependency with the check unless we track properly 3234 all registers written by BE_IN_SPEC-speculated insns, as 3235 we don't have explicit dependence lists. See PR 53975. */ 3236 if (reg_last->uses) 3237 { 3238 ds_t pro_spec_checked_ds; 3239 3240 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); 3241 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); 3242 3243 if (pro_spec_checked_ds != 0) 3244 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, 3245 NULL_RTX, NULL_RTX); 3246 } 3247 } 3248 } 3249 3250 /* Note a memory dependence. */ 3251 static void 3252 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, 3253 rtx pending_mem ATTRIBUTE_UNUSED, 3254 insn_t pending_insn ATTRIBUTE_UNUSED, 3255 ds_t ds ATTRIBUTE_UNUSED) 3256 { 3257 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3258 VINSN_INSN_RTX (has_dependence_data.con))) 3259 { 3260 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3261 3262 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); 3263 } 3264 } 3265 3266 /* Note a dependence. */ 3267 static void 3268 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED, 3269 ds_t ds ATTRIBUTE_UNUSED) 3270 { 3271 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, 3272 VINSN_INSN_RTX (has_dependence_data.con))) 3273 { 3274 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; 3275 3276 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); 3277 } 3278 } 3279 3280 /* Mark the insn as having a hard dependence that prevents speculation. */ 3281 void 3282 sel_mark_hard_insn (rtx insn) 3283 { 3284 int i; 3285 3286 /* Only work when we're in has_dependence_p mode. 3287 ??? This is a hack, this should actually be a hook. */ 3288 if (!has_dependence_data.dc || !has_dependence_data.pro) 3289 return; 3290 3291 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); 3292 gcc_assert (has_dependence_data.where == DEPS_IN_INSN); 3293 3294 for (i = 0; i < DEPS_IN_NOWHERE; i++) 3295 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE; 3296 } 3297 3298 /* This structure holds the hooks for the dependency analysis used when 3299 actually processing dependencies in the scheduler. */ 3300 static struct sched_deps_info_def has_dependence_sched_deps_info; 3301 3302 /* This initializes most of the fields of the above structure. */ 3303 static const struct sched_deps_info_def const_has_dependence_sched_deps_info = 3304 { 3305 NULL, 3306 3307 has_dependence_start_insn, 3308 has_dependence_finish_insn, 3309 has_dependence_start_lhs, 3310 has_dependence_finish_lhs, 3311 has_dependence_start_rhs, 3312 has_dependence_finish_rhs, 3313 has_dependence_note_reg_set, 3314 has_dependence_note_reg_clobber, 3315 has_dependence_note_reg_use, 3316 has_dependence_note_mem_dep, 3317 has_dependence_note_dep, 3318 3319 0, /* use_cselib */ 3320 0, /* use_deps_list */ 3321 0 /* generate_spec_deps */ 3322 }; 3323 3324 /* Initialize has_dependence_sched_deps_info with extra spec field. */ 3325 static void 3326 setup_has_dependence_sched_deps_info (void) 3327 { 3328 memcpy (&has_dependence_sched_deps_info, 3329 &const_has_dependence_sched_deps_info, 3330 sizeof (has_dependence_sched_deps_info)); 3331 3332 if (spec_info != NULL) 3333 has_dependence_sched_deps_info.generate_spec_deps = 1; 3334 3335 sched_deps_info = &has_dependence_sched_deps_info; 3336 } 3337 3338 /* Remove all dependences found and recorded in has_dependence_data array. */ 3339 void 3340 sel_clear_has_dependence (void) 3341 { 3342 int i; 3343 3344 for (i = 0; i < DEPS_IN_NOWHERE; i++) 3345 has_dependence_data.has_dep_p[i] = 0; 3346 } 3347 3348 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer 3349 to the dependence information array in HAS_DEP_PP. */ 3350 ds_t 3351 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) 3352 { 3353 int i; 3354 ds_t ds; 3355 struct deps_desc *dc; 3356 3357 if (INSN_SIMPLEJUMP_P (pred)) 3358 /* Unconditional jump is just a transfer of control flow. 3359 Ignore it. */ 3360 return false; 3361 3362 dc = &INSN_DEPS_CONTEXT (pred); 3363 3364 /* We init this field lazily. */ 3365 if (dc->reg_last == NULL) 3366 init_deps_reg_last (dc); 3367 3368 if (!dc->readonly) 3369 { 3370 has_dependence_data.pro = NULL; 3371 /* Initialize empty dep context with information about PRED. */ 3372 advance_deps_context (dc, pred); 3373 dc->readonly = 1; 3374 } 3375 3376 has_dependence_data.where = DEPS_IN_NOWHERE; 3377 has_dependence_data.pro = pred; 3378 has_dependence_data.con = EXPR_VINSN (expr); 3379 has_dependence_data.dc = dc; 3380 3381 sel_clear_has_dependence (); 3382 3383 /* Now catch all dependencies that would be generated between PRED and 3384 INSN. */ 3385 setup_has_dependence_sched_deps_info (); 3386 deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); 3387 has_dependence_data.dc = NULL; 3388 3389 /* When a barrier was found, set DEPS_IN_INSN bits. */ 3390 if (dc->last_reg_pending_barrier == TRUE_BARRIER) 3391 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE; 3392 else if (dc->last_reg_pending_barrier == MOVE_BARRIER) 3393 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; 3394 3395 /* Do not allow stores to memory to move through checks. Currently 3396 we don't move this to sched-deps.c as the check doesn't have 3397 obvious places to which this dependence can be attached. 3398 FIMXE: this should go to a hook. */ 3399 if (EXPR_LHS (expr) 3400 && MEM_P (EXPR_LHS (expr)) 3401 && sel_insn_is_speculation_check (pred)) 3402 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; 3403 3404 *has_dep_pp = has_dependence_data.has_dep_p; 3405 ds = 0; 3406 for (i = 0; i < DEPS_IN_NOWHERE; i++) 3407 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i], 3408 NULL_RTX, NULL_RTX); 3409 3410 return ds; 3411 } 3412 3413 3414 /* Dependence hooks implementation that checks dependence latency constraints 3415 on the insns being scheduled. The entry point for these routines is 3416 tick_check_p predicate. */ 3417 3418 static struct 3419 { 3420 /* An expr we are currently checking. */ 3421 expr_t expr; 3422 3423 /* A minimal cycle for its scheduling. */ 3424 int cycle; 3425 3426 /* Whether we have seen a true dependence while checking. */ 3427 bool seen_true_dep_p; 3428 } tick_check_data; 3429 3430 /* Update minimal scheduling cycle for tick_check_insn given that it depends 3431 on PRO with status DS and weight DW. */ 3432 static void 3433 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw) 3434 { 3435 expr_t con_expr = tick_check_data.expr; 3436 insn_t con_insn = EXPR_INSN_RTX (con_expr); 3437 3438 if (con_insn != pro_insn) 3439 { 3440 enum reg_note dt; 3441 int tick; 3442 3443 if (/* PROducer was removed from above due to pipelining. */ 3444 !INSN_IN_STREAM_P (pro_insn) 3445 /* Or PROducer was originally on the next iteration regarding the 3446 CONsumer. */ 3447 || (INSN_SCHED_TIMES (pro_insn) 3448 - EXPR_SCHED_TIMES (con_expr)) > 1) 3449 /* Don't count this dependence. */ 3450 return; 3451 3452 dt = ds_to_dt (ds); 3453 if (dt == REG_DEP_TRUE) 3454 tick_check_data.seen_true_dep_p = true; 3455 3456 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0); 3457 3458 { 3459 dep_def _dep, *dep = &_dep; 3460 3461 init_dep (dep, pro_insn, con_insn, dt); 3462 3463 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw); 3464 } 3465 3466 /* When there are several kinds of dependencies between pro and con, 3467 only REG_DEP_TRUE should be taken into account. */ 3468 if (tick > tick_check_data.cycle 3469 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p)) 3470 tick_check_data.cycle = tick; 3471 } 3472 } 3473 3474 /* An implementation of note_dep hook. */ 3475 static void 3476 tick_check_note_dep (insn_t pro, ds_t ds) 3477 { 3478 tick_check_dep_with_dw (pro, ds, 0); 3479 } 3480 3481 /* An implementation of note_mem_dep hook. */ 3482 static void 3483 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds) 3484 { 3485 dw_t dw; 3486 3487 dw = (ds_to_dt (ds) == REG_DEP_TRUE 3488 ? estimate_dep_weak (mem1, mem2) 3489 : 0); 3490 3491 tick_check_dep_with_dw (pro, ds, dw); 3492 } 3493 3494 /* This structure contains hooks for dependence analysis used when determining 3495 whether an insn is ready for scheduling. */ 3496 static struct sched_deps_info_def tick_check_sched_deps_info = 3497 { 3498 NULL, 3499 3500 NULL, 3501 NULL, 3502 NULL, 3503 NULL, 3504 NULL, 3505 NULL, 3506 haifa_note_reg_set, 3507 haifa_note_reg_clobber, 3508 haifa_note_reg_use, 3509 tick_check_note_mem_dep, 3510 tick_check_note_dep, 3511 3512 0, 0, 0 3513 }; 3514 3515 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be 3516 scheduled. Return 0 if all data from producers in DC is ready. */ 3517 int 3518 tick_check_p (expr_t expr, deps_t dc, fence_t fence) 3519 { 3520 int cycles_left; 3521 /* Initialize variables. */ 3522 tick_check_data.expr = expr; 3523 tick_check_data.cycle = 0; 3524 tick_check_data.seen_true_dep_p = false; 3525 sched_deps_info = &tick_check_sched_deps_info; 3526 3527 gcc_assert (!dc->readonly); 3528 dc->readonly = 1; 3529 deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); 3530 dc->readonly = 0; 3531 3532 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence); 3533 3534 return cycles_left >= 0 ? cycles_left : 0; 3535 } 3536 3537 3538 /* Functions to work with insns. */ 3539 3540 /* Returns true if LHS of INSN is the same as DEST of an insn 3541 being moved. */ 3542 bool 3543 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) 3544 { 3545 rtx lhs = INSN_LHS (insn); 3546 3547 if (lhs == NULL || dest == NULL) 3548 return false; 3549 3550 return rtx_equal_p (lhs, dest); 3551 } 3552 3553 /* Return s_i_d entry of INSN. Callable from debugger. */ 3554 sel_insn_data_def 3555 insn_sid (insn_t insn) 3556 { 3557 return *SID (insn); 3558 } 3559 3560 /* True when INSN is a speculative check. We can tell this by looking 3561 at the data structures of the selective scheduler, not by examining 3562 the pattern. */ 3563 bool 3564 sel_insn_is_speculation_check (rtx insn) 3565 { 3566 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn); 3567 } 3568 3569 /* Extracts machine mode MODE and destination location DST_LOC 3570 for given INSN. */ 3571 void 3572 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode) 3573 { 3574 rtx pat = PATTERN (insn); 3575 3576 gcc_assert (dst_loc); 3577 gcc_assert (GET_CODE (pat) == SET); 3578 3579 *dst_loc = SET_DEST (pat); 3580 3581 gcc_assert (*dst_loc); 3582 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc)); 3583 3584 if (mode) 3585 *mode = GET_MODE (*dst_loc); 3586 } 3587 3588 /* Returns true when moving through JUMP will result in bookkeeping 3589 creation. */ 3590 bool 3591 bookkeeping_can_be_created_if_moved_through_p (insn_t jump) 3592 { 3593 insn_t succ; 3594 succ_iterator si; 3595 3596 FOR_EACH_SUCC (succ, si, jump) 3597 if (sel_num_cfg_preds_gt_1 (succ)) 3598 return true; 3599 3600 return false; 3601 } 3602 3603 /* Return 'true' if INSN is the only one in its basic block. */ 3604 static bool 3605 insn_is_the_only_one_in_bb_p (insn_t insn) 3606 { 3607 return sel_bb_head_p (insn) && sel_bb_end_p (insn); 3608 } 3609 3610 #ifdef ENABLE_CHECKING 3611 /* Check that the region we're scheduling still has at most one 3612 backedge. */ 3613 static void 3614 verify_backedges (void) 3615 { 3616 if (pipelining_p) 3617 { 3618 int i, n = 0; 3619 edge e; 3620 edge_iterator ei; 3621 3622 for (i = 0; i < current_nr_blocks; i++) 3623 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs) 3624 if (in_current_region_p (e->dest) 3625 && BLOCK_TO_BB (e->dest->index) < i) 3626 n++; 3627 3628 gcc_assert (n <= 1); 3629 } 3630 } 3631 #endif 3632 3633 3634 /* Functions to work with control flow. */ 3635 3636 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks 3637 are sorted in topological order (it might have been invalidated by 3638 redirecting an edge). */ 3639 static void 3640 sel_recompute_toporder (void) 3641 { 3642 int i, n, rgn; 3643 int *postorder, n_blocks; 3644 3645 postorder = XALLOCAVEC (int, n_basic_blocks); 3646 n_blocks = post_order_compute (postorder, false, false); 3647 3648 rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 3649 for (n = 0, i = n_blocks - 1; i >= 0; i--) 3650 if (CONTAINING_RGN (postorder[i]) == rgn) 3651 { 3652 BLOCK_TO_BB (postorder[i]) = n; 3653 BB_TO_BLOCK (n) = postorder[i]; 3654 n++; 3655 } 3656 3657 /* Assert that we updated info for all blocks. We may miss some blocks if 3658 this function is called when redirecting an edge made a block 3659 unreachable, but that block is not deleted yet. */ 3660 gcc_assert (n == RGN_NR_BLOCKS (rgn)); 3661 } 3662 3663 /* Tidy the possibly empty block BB. */ 3664 static bool 3665 maybe_tidy_empty_bb (basic_block bb) 3666 { 3667 basic_block succ_bb, pred_bb, note_bb; 3668 VEC (basic_block, heap) *dom_bbs; 3669 edge e; 3670 edge_iterator ei; 3671 bool rescan_p; 3672 3673 /* Keep empty bb only if this block immediately precedes EXIT and 3674 has incoming non-fallthrough edge, or it has no predecessors or 3675 successors. Otherwise remove it. */ 3676 if (!sel_bb_empty_p (bb) 3677 || (single_succ_p (bb) 3678 && single_succ (bb) == EXIT_BLOCK_PTR 3679 && (!single_pred_p (bb) 3680 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) 3681 || EDGE_COUNT (bb->preds) == 0 3682 || EDGE_COUNT (bb->succs) == 0) 3683 return false; 3684 3685 /* Do not attempt to redirect complex edges. */ 3686 FOR_EACH_EDGE (e, ei, bb->preds) 3687 if (e->flags & EDGE_COMPLEX) 3688 return false; 3689 else if (e->flags & EDGE_FALLTHRU) 3690 { 3691 rtx note; 3692 /* If prev bb ends with asm goto, see if any of the 3693 ASM_OPERANDS_LABELs don't point to the fallthru 3694 label. Do not attempt to redirect it in that case. */ 3695 if (JUMP_P (BB_END (e->src)) 3696 && (note = extract_asm_operands (PATTERN (BB_END (e->src))))) 3697 { 3698 int i, n = ASM_OPERANDS_LABEL_LENGTH (note); 3699 3700 for (i = 0; i < n; ++i) 3701 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb)) 3702 return false; 3703 } 3704 } 3705 3706 free_data_sets (bb); 3707 3708 /* Do not delete BB if it has more than one successor. 3709 That can occur when we moving a jump. */ 3710 if (!single_succ_p (bb)) 3711 { 3712 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb)); 3713 sel_merge_blocks (bb->prev_bb, bb); 3714 return true; 3715 } 3716 3717 succ_bb = single_succ (bb); 3718 rescan_p = true; 3719 pred_bb = NULL; 3720 dom_bbs = NULL; 3721 3722 /* Save a pred/succ from the current region to attach the notes to. */ 3723 note_bb = NULL; 3724 FOR_EACH_EDGE (e, ei, bb->preds) 3725 if (in_current_region_p (e->src)) 3726 { 3727 note_bb = e->src; 3728 break; 3729 } 3730 if (note_bb == NULL) 3731 note_bb = succ_bb; 3732 3733 /* Redirect all non-fallthru edges to the next bb. */ 3734 while (rescan_p) 3735 { 3736 rescan_p = false; 3737 3738 FOR_EACH_EDGE (e, ei, bb->preds) 3739 { 3740 pred_bb = e->src; 3741 3742 if (!(e->flags & EDGE_FALLTHRU)) 3743 { 3744 /* We can not invalidate computed topological order by moving 3745 the edge destination block (E->SUCC) along a fallthru edge. 3746 3747 We will update dominators here only when we'll get 3748 an unreachable block when redirecting, otherwise 3749 sel_redirect_edge_and_branch will take care of it. */ 3750 if (e->dest != bb 3751 && single_pred_p (e->dest)) 3752 VEC_safe_push (basic_block, heap, dom_bbs, e->dest); 3753 sel_redirect_edge_and_branch (e, succ_bb); 3754 rescan_p = true; 3755 break; 3756 } 3757 /* If the edge is fallthru, but PRED_BB ends in a conditional jump 3758 to BB (so there is no non-fallthru edge from PRED_BB to BB), we 3759 still have to adjust it. */ 3760 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb))) 3761 { 3762 /* If possible, try to remove the unneeded conditional jump. */ 3763 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0 3764 && !IN_CURRENT_FENCE_P (BB_END (pred_bb))) 3765 { 3766 if (!sel_remove_insn (BB_END (pred_bb), false, false)) 3767 tidy_fallthru_edge (e); 3768 } 3769 else 3770 sel_redirect_edge_and_branch (e, succ_bb); 3771 rescan_p = true; 3772 break; 3773 } 3774 } 3775 } 3776 3777 if (can_merge_blocks_p (bb->prev_bb, bb)) 3778 sel_merge_blocks (bb->prev_bb, bb); 3779 else 3780 { 3781 /* This is a block without fallthru predecessor. Just delete it. */ 3782 gcc_assert (note_bb); 3783 move_bb_info (note_bb, bb); 3784 remove_empty_bb (bb, true); 3785 } 3786 3787 if (!VEC_empty (basic_block, dom_bbs)) 3788 { 3789 VEC_safe_push (basic_block, heap, dom_bbs, succ_bb); 3790 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); 3791 VEC_free (basic_block, heap, dom_bbs); 3792 } 3793 3794 return true; 3795 } 3796 3797 /* Tidy the control flow after we have removed original insn from 3798 XBB. Return true if we have removed some blocks. When FULL_TIDYING 3799 is true, also try to optimize control flow on non-empty blocks. */ 3800 bool 3801 tidy_control_flow (basic_block xbb, bool full_tidying) 3802 { 3803 bool changed = true; 3804 insn_t first, last; 3805 3806 /* First check whether XBB is empty. */ 3807 changed = maybe_tidy_empty_bb (xbb); 3808 if (changed || !full_tidying) 3809 return changed; 3810 3811 /* Check if there is a unnecessary jump after insn left. */ 3812 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb) 3813 && INSN_SCHED_TIMES (BB_END (xbb)) == 0 3814 && !IN_CURRENT_FENCE_P (BB_END (xbb))) 3815 { 3816 if (sel_remove_insn (BB_END (xbb), false, false)) 3817 return true; 3818 tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); 3819 } 3820 3821 first = sel_bb_head (xbb); 3822 last = sel_bb_end (xbb); 3823 if (MAY_HAVE_DEBUG_INSNS) 3824 { 3825 if (first != last && DEBUG_INSN_P (first)) 3826 do 3827 first = NEXT_INSN (first); 3828 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); 3829 3830 if (first != last && DEBUG_INSN_P (last)) 3831 do 3832 last = PREV_INSN (last); 3833 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); 3834 } 3835 /* Check if there is an unnecessary jump in previous basic block leading 3836 to next basic block left after removing INSN from stream. 3837 If it is so, remove that jump and redirect edge to current 3838 basic block (where there was INSN before deletion). This way 3839 when NOP will be deleted several instructions later with its 3840 basic block we will not get a jump to next instruction, which 3841 can be harmful. */ 3842 if (first == last 3843 && !sel_bb_empty_p (xbb) 3844 && INSN_NOP_P (last) 3845 /* Flow goes fallthru from current block to the next. */ 3846 && EDGE_COUNT (xbb->succs) == 1 3847 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) 3848 /* When successor is an EXIT block, it may not be the next block. */ 3849 && single_succ (xbb) != EXIT_BLOCK_PTR 3850 /* And unconditional jump in previous basic block leads to 3851 next basic block of XBB and this jump can be safely removed. */ 3852 && in_current_region_p (xbb->prev_bb) 3853 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb) 3854 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0 3855 /* Also this jump is not at the scheduling boundary. */ 3856 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) 3857 { 3858 bool recompute_toporder_p; 3859 /* Clear data structures of jump - jump itself will be removed 3860 by sel_redirect_edge_and_branch. */ 3861 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); 3862 recompute_toporder_p 3863 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); 3864 3865 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); 3866 3867 /* It can turn out that after removing unused jump, basic block 3868 that contained that jump, becomes empty too. In such case 3869 remove it too. */ 3870 if (sel_bb_empty_p (xbb->prev_bb)) 3871 changed = maybe_tidy_empty_bb (xbb->prev_bb); 3872 if (recompute_toporder_p) 3873 sel_recompute_toporder (); 3874 } 3875 3876 #ifdef ENABLE_CHECKING 3877 verify_backedges (); 3878 verify_dominators (CDI_DOMINATORS); 3879 #endif 3880 3881 return changed; 3882 } 3883 3884 /* Purge meaningless empty blocks in the middle of a region. */ 3885 void 3886 purge_empty_blocks (void) 3887 { 3888 int i; 3889 3890 /* Do not attempt to delete the first basic block in the region. */ 3891 for (i = 1; i < current_nr_blocks; ) 3892 { 3893 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i)); 3894 3895 if (maybe_tidy_empty_bb (b)) 3896 continue; 3897 3898 i++; 3899 } 3900 } 3901 3902 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, 3903 do not delete insn's data, because it will be later re-emitted. 3904 Return true if we have removed some blocks afterwards. */ 3905 bool 3906 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) 3907 { 3908 basic_block bb = BLOCK_FOR_INSN (insn); 3909 3910 gcc_assert (INSN_IN_STREAM_P (insn)); 3911 3912 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) 3913 { 3914 expr_t expr; 3915 av_set_iterator i; 3916 3917 /* When we remove a debug insn that is head of a BB, it remains 3918 in the AV_SET of the block, but it shouldn't. */ 3919 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) 3920 if (EXPR_INSN_RTX (expr) == insn) 3921 { 3922 av_set_iter_remove (&i); 3923 break; 3924 } 3925 } 3926 3927 if (only_disconnect) 3928 { 3929 insn_t prev = PREV_INSN (insn); 3930 insn_t next = NEXT_INSN (insn); 3931 basic_block bb = BLOCK_FOR_INSN (insn); 3932 3933 NEXT_INSN (prev) = next; 3934 PREV_INSN (next) = prev; 3935 3936 if (BB_HEAD (bb) == insn) 3937 { 3938 gcc_assert (BLOCK_FOR_INSN (prev) == bb); 3939 BB_HEAD (bb) = prev; 3940 } 3941 if (BB_END (bb) == insn) 3942 BB_END (bb) = prev; 3943 } 3944 else 3945 { 3946 remove_insn (insn); 3947 clear_expr (INSN_EXPR (insn)); 3948 } 3949 3950 /* It is necessary to null this fields before calling add_insn (). */ 3951 PREV_INSN (insn) = NULL_RTX; 3952 NEXT_INSN (insn) = NULL_RTX; 3953 3954 return tidy_control_flow (bb, full_tidying); 3955 } 3956 3957 /* Estimate number of the insns in BB. */ 3958 static int 3959 sel_estimate_number_of_insns (basic_block bb) 3960 { 3961 int res = 0; 3962 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); 3963 3964 for (; insn != next_tail; insn = NEXT_INSN (insn)) 3965 if (NONDEBUG_INSN_P (insn)) 3966 res++; 3967 3968 return res; 3969 } 3970 3971 /* We don't need separate luids for notes or labels. */ 3972 static int 3973 sel_luid_for_non_insn (rtx x) 3974 { 3975 gcc_assert (NOTE_P (x) || LABEL_P (x)); 3976 3977 return -1; 3978 } 3979 3980 /* Find the proper seqno for inserting at INSN by successors. 3981 Return -1 if no successors with positive seqno exist. */ 3982 static int 3983 get_seqno_by_succs (rtx insn) 3984 { 3985 basic_block bb = BLOCK_FOR_INSN (insn); 3986 rtx tmp = insn, end = BB_END (bb); 3987 int seqno; 3988 insn_t succ = NULL; 3989 succ_iterator si; 3990 3991 while (tmp != end) 3992 { 3993 tmp = NEXT_INSN (tmp); 3994 if (INSN_P (tmp)) 3995 return INSN_SEQNO (tmp); 3996 } 3997 3998 seqno = INT_MAX; 3999 4000 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL) 4001 if (INSN_SEQNO (succ) > 0) 4002 seqno = MIN (seqno, INSN_SEQNO (succ)); 4003 4004 if (seqno == INT_MAX) 4005 return -1; 4006 4007 return seqno; 4008 } 4009 4010 /* Compute seqno for INSN by its preds or succs. */ 4011 static int 4012 get_seqno_for_a_jump (insn_t insn) 4013 { 4014 int seqno; 4015 4016 gcc_assert (INSN_SIMPLEJUMP_P (insn)); 4017 4018 if (!sel_bb_head_p (insn)) 4019 seqno = INSN_SEQNO (PREV_INSN (insn)); 4020 else 4021 { 4022 basic_block bb = BLOCK_FOR_INSN (insn); 4023 4024 if (single_pred_p (bb) 4025 && !in_current_region_p (single_pred (bb))) 4026 { 4027 /* We can have preds outside a region when splitting edges 4028 for pipelining of an outer loop. Use succ instead. 4029 There should be only one of them. */ 4030 insn_t succ = NULL; 4031 succ_iterator si; 4032 bool first = true; 4033 4034 gcc_assert (flag_sel_sched_pipelining_outer_loops 4035 && current_loop_nest); 4036 FOR_EACH_SUCC_1 (succ, si, insn, 4037 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) 4038 { 4039 gcc_assert (first); 4040 first = false; 4041 } 4042 4043 gcc_assert (succ != NULL); 4044 seqno = INSN_SEQNO (succ); 4045 } 4046 else 4047 { 4048 insn_t *preds; 4049 int n; 4050 4051 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n); 4052 4053 gcc_assert (n > 0); 4054 /* For one predecessor, use simple method. */ 4055 if (n == 1) 4056 seqno = INSN_SEQNO (preds[0]); 4057 else 4058 seqno = get_seqno_by_preds (insn); 4059 4060 free (preds); 4061 } 4062 } 4063 4064 /* We were unable to find a good seqno among preds. */ 4065 if (seqno < 0) 4066 seqno = get_seqno_by_succs (insn); 4067 4068 gcc_assert (seqno >= 0); 4069 4070 return seqno; 4071 } 4072 4073 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors 4074 with positive seqno exist. */ 4075 int 4076 get_seqno_by_preds (rtx insn) 4077 { 4078 basic_block bb = BLOCK_FOR_INSN (insn); 4079 rtx tmp = insn, head = BB_HEAD (bb); 4080 insn_t *preds; 4081 int n, i, seqno; 4082 4083 while (tmp != head) 4084 { 4085 tmp = PREV_INSN (tmp); 4086 if (INSN_P (tmp)) 4087 return INSN_SEQNO (tmp); 4088 } 4089 4090 cfg_preds (bb, &preds, &n); 4091 for (i = 0, seqno = -1; i < n; i++) 4092 seqno = MAX (seqno, INSN_SEQNO (preds[i])); 4093 4094 return seqno; 4095 } 4096 4097 4098 4099 /* Extend pass-scope data structures for basic blocks. */ 4100 void 4101 sel_extend_global_bb_info (void) 4102 { 4103 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info, 4104 last_basic_block); 4105 } 4106 4107 /* Extend region-scope data structures for basic blocks. */ 4108 static void 4109 extend_region_bb_info (void) 4110 { 4111 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info, 4112 last_basic_block); 4113 } 4114 4115 /* Extend all data structures to fit for all basic blocks. */ 4116 static void 4117 extend_bb_info (void) 4118 { 4119 sel_extend_global_bb_info (); 4120 extend_region_bb_info (); 4121 } 4122 4123 /* Finalize pass-scope data structures for basic blocks. */ 4124 void 4125 sel_finish_global_bb_info (void) 4126 { 4127 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info); 4128 } 4129 4130 /* Finalize region-scope data structures for basic blocks. */ 4131 static void 4132 finish_region_bb_info (void) 4133 { 4134 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info); 4135 } 4136 4137 4138 /* Data for each insn in current region. */ 4139 VEC (sel_insn_data_def, heap) *s_i_d = NULL; 4140 4141 /* Extend data structures for insns from current region. */ 4142 static void 4143 extend_insn_data (void) 4144 { 4145 int reserve; 4146 4147 sched_extend_target (); 4148 sched_deps_init (false); 4149 4150 /* Extend data structures for insns from current region. */ 4151 reserve = (sched_max_luid + 1 4152 - VEC_length (sel_insn_data_def, s_i_d)); 4153 if (reserve > 0 4154 && ! VEC_space (sel_insn_data_def, s_i_d, reserve)) 4155 { 4156 int size; 4157 4158 if (sched_max_luid / 2 > 1024) 4159 size = sched_max_luid + 1024; 4160 else 4161 size = 3 * sched_max_luid / 2; 4162 4163 4164 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size); 4165 } 4166 } 4167 4168 /* Finalize data structures for insns from current region. */ 4169 static void 4170 finish_insns (void) 4171 { 4172 unsigned i; 4173 4174 /* Clear here all dependence contexts that may have left from insns that were 4175 removed during the scheduling. */ 4176 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++) 4177 { 4178 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i); 4179 4180 if (sid_entry->live) 4181 return_regset_to_pool (sid_entry->live); 4182 if (sid_entry->analyzed_deps) 4183 { 4184 BITMAP_FREE (sid_entry->analyzed_deps); 4185 BITMAP_FREE (sid_entry->found_deps); 4186 htab_delete (sid_entry->transformed_insns); 4187 free_deps (&sid_entry->deps_context); 4188 } 4189 if (EXPR_VINSN (&sid_entry->expr)) 4190 { 4191 clear_expr (&sid_entry->expr); 4192 4193 /* Also, clear CANT_MOVE bit here, because we really don't want it 4194 to be passed to the next region. */ 4195 CANT_MOVE_BY_LUID (i) = 0; 4196 } 4197 } 4198 4199 VEC_free (sel_insn_data_def, heap, s_i_d); 4200 } 4201 4202 /* A proxy to pass initialization data to init_insn (). */ 4203 static sel_insn_data_def _insn_init_ssid; 4204 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid; 4205 4206 /* If true create a new vinsn. Otherwise use the one from EXPR. */ 4207 static bool insn_init_create_new_vinsn_p; 4208 4209 /* Set all necessary data for initialization of the new insn[s]. */ 4210 static expr_t 4211 set_insn_init (expr_t expr, vinsn_t vi, int seqno) 4212 { 4213 expr_t x = &insn_init_ssid->expr; 4214 4215 copy_expr_onside (x, expr); 4216 if (vi != NULL) 4217 { 4218 insn_init_create_new_vinsn_p = false; 4219 change_vinsn_in_expr (x, vi); 4220 } 4221 else 4222 insn_init_create_new_vinsn_p = true; 4223 4224 insn_init_ssid->seqno = seqno; 4225 return x; 4226 } 4227 4228 /* Init data for INSN. */ 4229 static void 4230 init_insn_data (insn_t insn) 4231 { 4232 expr_t expr; 4233 sel_insn_data_t ssid = insn_init_ssid; 4234 4235 /* The fields mentioned below are special and hence are not being 4236 propagated to the new insns. */ 4237 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL 4238 && !ssid->after_stall_p && ssid->sched_cycle == 0); 4239 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0); 4240 4241 expr = INSN_EXPR (insn); 4242 copy_expr (expr, &ssid->expr); 4243 prepare_insn_expr (insn, ssid->seqno); 4244 4245 if (insn_init_create_new_vinsn_p) 4246 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); 4247 4248 if (first_time_insn_init (insn)) 4249 init_first_time_insn_data (insn); 4250 } 4251 4252 /* This is used to initialize spurious jumps generated by 4253 sel_redirect_edge (). */ 4254 static void 4255 init_simplejump_data (insn_t insn) 4256 { 4257 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, 4258 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false, 4259 false, true); 4260 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn); 4261 init_first_time_insn_data (insn); 4262 } 4263 4264 /* Perform deferred initialization of insns. This is used to process 4265 a new jump that may be created by redirect_edge. */ 4266 void 4267 sel_init_new_insn (insn_t insn, int flags) 4268 { 4269 /* We create data structures for bb when the first insn is emitted in it. */ 4270 if (INSN_P (insn) 4271 && INSN_IN_STREAM_P (insn) 4272 && insn_is_the_only_one_in_bb_p (insn)) 4273 { 4274 extend_bb_info (); 4275 create_initial_data_sets (BLOCK_FOR_INSN (insn)); 4276 } 4277 4278 if (flags & INSN_INIT_TODO_LUID) 4279 { 4280 sched_extend_luids (); 4281 sched_init_insn_luid (insn); 4282 } 4283 4284 if (flags & INSN_INIT_TODO_SSID) 4285 { 4286 extend_insn_data (); 4287 init_insn_data (insn); 4288 clear_expr (&insn_init_ssid->expr); 4289 } 4290 4291 if (flags & INSN_INIT_TODO_SIMPLEJUMP) 4292 { 4293 extend_insn_data (); 4294 init_simplejump_data (insn); 4295 } 4296 4297 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) 4298 == CONTAINING_RGN (BB_TO_BLOCK (0))); 4299 } 4300 4301 4302 /* Functions to init/finish work with lv sets. */ 4303 4304 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */ 4305 static void 4306 init_lv_set (basic_block bb) 4307 { 4308 gcc_assert (!BB_LV_SET_VALID_P (bb)); 4309 4310 BB_LV_SET (bb) = get_regset_from_pool (); 4311 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); 4312 BB_LV_SET_VALID_P (bb) = true; 4313 } 4314 4315 /* Copy liveness information to BB from FROM_BB. */ 4316 static void 4317 copy_lv_set_from (basic_block bb, basic_block from_bb) 4318 { 4319 gcc_assert (!BB_LV_SET_VALID_P (bb)); 4320 4321 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); 4322 BB_LV_SET_VALID_P (bb) = true; 4323 } 4324 4325 /* Initialize lv set of all bb headers. */ 4326 void 4327 init_lv_sets (void) 4328 { 4329 basic_block bb; 4330 4331 /* Initialize of LV sets. */ 4332 FOR_EACH_BB (bb) 4333 init_lv_set (bb); 4334 4335 /* Don't forget EXIT_BLOCK. */ 4336 init_lv_set (EXIT_BLOCK_PTR); 4337 } 4338 4339 /* Release lv set of HEAD. */ 4340 static void 4341 free_lv_set (basic_block bb) 4342 { 4343 gcc_assert (BB_LV_SET (bb) != NULL); 4344 4345 return_regset_to_pool (BB_LV_SET (bb)); 4346 BB_LV_SET (bb) = NULL; 4347 BB_LV_SET_VALID_P (bb) = false; 4348 } 4349 4350 /* Finalize lv sets of all bb headers. */ 4351 void 4352 free_lv_sets (void) 4353 { 4354 basic_block bb; 4355 4356 /* Don't forget EXIT_BLOCK. */ 4357 free_lv_set (EXIT_BLOCK_PTR); 4358 4359 /* Free LV sets. */ 4360 FOR_EACH_BB (bb) 4361 if (BB_LV_SET (bb)) 4362 free_lv_set (bb); 4363 } 4364 4365 /* Mark AV_SET for BB as invalid, so this set will be updated the next time 4366 compute_av() processes BB. This function is called when creating new basic 4367 blocks, as well as for blocks (either new or existing) where new jumps are 4368 created when the control flow is being updated. */ 4369 static void 4370 invalidate_av_set (basic_block bb) 4371 { 4372 BB_AV_LEVEL (bb) = -1; 4373 } 4374 4375 /* Create initial data sets for BB (they will be invalid). */ 4376 static void 4377 create_initial_data_sets (basic_block bb) 4378 { 4379 if (BB_LV_SET (bb)) 4380 BB_LV_SET_VALID_P (bb) = false; 4381 else 4382 BB_LV_SET (bb) = get_regset_from_pool (); 4383 invalidate_av_set (bb); 4384 } 4385 4386 /* Free av set of BB. */ 4387 static void 4388 free_av_set (basic_block bb) 4389 { 4390 av_set_clear (&BB_AV_SET (bb)); 4391 BB_AV_LEVEL (bb) = 0; 4392 } 4393 4394 /* Free data sets of BB. */ 4395 void 4396 free_data_sets (basic_block bb) 4397 { 4398 free_lv_set (bb); 4399 free_av_set (bb); 4400 } 4401 4402 /* Exchange lv sets of TO and FROM. */ 4403 static void 4404 exchange_lv_sets (basic_block to, basic_block from) 4405 { 4406 { 4407 regset to_lv_set = BB_LV_SET (to); 4408 4409 BB_LV_SET (to) = BB_LV_SET (from); 4410 BB_LV_SET (from) = to_lv_set; 4411 } 4412 4413 { 4414 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to); 4415 4416 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); 4417 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p; 4418 } 4419 } 4420 4421 4422 /* Exchange av sets of TO and FROM. */ 4423 static void 4424 exchange_av_sets (basic_block to, basic_block from) 4425 { 4426 { 4427 av_set_t to_av_set = BB_AV_SET (to); 4428 4429 BB_AV_SET (to) = BB_AV_SET (from); 4430 BB_AV_SET (from) = to_av_set; 4431 } 4432 4433 { 4434 int to_av_level = BB_AV_LEVEL (to); 4435 4436 BB_AV_LEVEL (to) = BB_AV_LEVEL (from); 4437 BB_AV_LEVEL (from) = to_av_level; 4438 } 4439 } 4440 4441 /* Exchange data sets of TO and FROM. */ 4442 void 4443 exchange_data_sets (basic_block to, basic_block from) 4444 { 4445 exchange_lv_sets (to, from); 4446 exchange_av_sets (to, from); 4447 } 4448 4449 /* Copy data sets of FROM to TO. */ 4450 void 4451 copy_data_sets (basic_block to, basic_block from) 4452 { 4453 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to)); 4454 gcc_assert (BB_AV_SET (to) == NULL); 4455 4456 BB_AV_LEVEL (to) = BB_AV_LEVEL (from); 4457 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); 4458 4459 if (BB_AV_SET_VALID_P (from)) 4460 { 4461 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from)); 4462 } 4463 if (BB_LV_SET_VALID_P (from)) 4464 { 4465 gcc_assert (BB_LV_SET (to) != NULL); 4466 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from)); 4467 } 4468 } 4469 4470 /* Return an av set for INSN, if any. */ 4471 av_set_t 4472 get_av_set (insn_t insn) 4473 { 4474 av_set_t av_set; 4475 4476 gcc_assert (AV_SET_VALID_P (insn)); 4477 4478 if (sel_bb_head_p (insn)) 4479 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn)); 4480 else 4481 av_set = NULL; 4482 4483 return av_set; 4484 } 4485 4486 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */ 4487 int 4488 get_av_level (insn_t insn) 4489 { 4490 int av_level; 4491 4492 gcc_assert (INSN_P (insn)); 4493 4494 if (sel_bb_head_p (insn)) 4495 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn)); 4496 else 4497 av_level = INSN_WS_LEVEL (insn); 4498 4499 return av_level; 4500 } 4501 4502 4503 4504 /* Variables to work with control-flow graph. */ 4505 4506 /* The basic block that already has been processed by the sched_data_update (), 4507 but hasn't been in sel_add_bb () yet. */ 4508 static VEC (basic_block, heap) *last_added_blocks = NULL; 4509 4510 /* A pool for allocating successor infos. */ 4511 static struct 4512 { 4513 /* A stack for saving succs_info structures. */ 4514 struct succs_info *stack; 4515 4516 /* Its size. */ 4517 int size; 4518 4519 /* Top of the stack. */ 4520 int top; 4521 4522 /* Maximal value of the top. */ 4523 int max_top; 4524 } succs_info_pool; 4525 4526 /* Functions to work with control-flow graph. */ 4527 4528 /* Return basic block note of BB. */ 4529 insn_t 4530 sel_bb_head (basic_block bb) 4531 { 4532 insn_t head; 4533 4534 if (bb == EXIT_BLOCK_PTR) 4535 { 4536 gcc_assert (exit_insn != NULL_RTX); 4537 head = exit_insn; 4538 } 4539 else 4540 { 4541 insn_t note; 4542 4543 note = bb_note (bb); 4544 head = next_nonnote_insn (note); 4545 4546 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb)) 4547 head = NULL_RTX; 4548 } 4549 4550 return head; 4551 } 4552 4553 /* Return true if INSN is a basic block header. */ 4554 bool 4555 sel_bb_head_p (insn_t insn) 4556 { 4557 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn; 4558 } 4559 4560 /* Return last insn of BB. */ 4561 insn_t 4562 sel_bb_end (basic_block bb) 4563 { 4564 if (sel_bb_empty_p (bb)) 4565 return NULL_RTX; 4566 4567 gcc_assert (bb != EXIT_BLOCK_PTR); 4568 4569 return BB_END (bb); 4570 } 4571 4572 /* Return true if INSN is the last insn in its basic block. */ 4573 bool 4574 sel_bb_end_p (insn_t insn) 4575 { 4576 return insn == sel_bb_end (BLOCK_FOR_INSN (insn)); 4577 } 4578 4579 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */ 4580 bool 4581 sel_bb_empty_p (basic_block bb) 4582 { 4583 return sel_bb_head (bb) == NULL; 4584 } 4585 4586 /* True when BB belongs to the current scheduling region. */ 4587 bool 4588 in_current_region_p (basic_block bb) 4589 { 4590 if (bb->index < NUM_FIXED_BLOCKS) 4591 return false; 4592 4593 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0)); 4594 } 4595 4596 /* Return the block which is a fallthru bb of a conditional jump JUMP. */ 4597 basic_block 4598 fallthru_bb_of_jump (rtx jump) 4599 { 4600 if (!JUMP_P (jump)) 4601 return NULL; 4602 4603 if (!any_condjump_p (jump)) 4604 return NULL; 4605 4606 /* A basic block that ends with a conditional jump may still have one successor 4607 (and be followed by a barrier), we are not interested. */ 4608 if (single_succ_p (BLOCK_FOR_INSN (jump))) 4609 return NULL; 4610 4611 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; 4612 } 4613 4614 /* Remove all notes from BB. */ 4615 static void 4616 init_bb (basic_block bb) 4617 { 4618 remove_notes (bb_note (bb), BB_END (bb)); 4619 BB_NOTE_LIST (bb) = note_list; 4620 } 4621 4622 void 4623 sel_init_bbs (bb_vec_t bbs) 4624 { 4625 const struct sched_scan_info_def ssi = 4626 { 4627 extend_bb_info, /* extend_bb */ 4628 init_bb, /* init_bb */ 4629 NULL, /* extend_insn */ 4630 NULL /* init_insn */ 4631 }; 4632 4633 sched_scan (&ssi, bbs); 4634 } 4635 4636 /* Restore notes for the whole region. */ 4637 static void 4638 sel_restore_notes (void) 4639 { 4640 int bb; 4641 insn_t insn; 4642 4643 for (bb = 0; bb < current_nr_blocks; bb++) 4644 { 4645 basic_block first, last; 4646 4647 first = EBB_FIRST_BB (bb); 4648 last = EBB_LAST_BB (bb)->next_bb; 4649 4650 do 4651 { 4652 note_list = BB_NOTE_LIST (first); 4653 restore_other_notes (NULL, first); 4654 BB_NOTE_LIST (first) = NULL_RTX; 4655 4656 FOR_BB_INSNS (first, insn) 4657 if (NONDEBUG_INSN_P (insn)) 4658 reemit_notes (insn); 4659 4660 first = first->next_bb; 4661 } 4662 while (first != last); 4663 } 4664 } 4665 4666 /* Free per-bb data structures. */ 4667 void 4668 sel_finish_bbs (void) 4669 { 4670 sel_restore_notes (); 4671 4672 /* Remove current loop preheader from this loop. */ 4673 if (current_loop_nest) 4674 sel_remove_loop_preheader (); 4675 4676 finish_region_bb_info (); 4677 } 4678 4679 /* Return true if INSN has a single successor of type FLAGS. */ 4680 bool 4681 sel_insn_has_single_succ_p (insn_t insn, int flags) 4682 { 4683 insn_t succ; 4684 succ_iterator si; 4685 bool first_p = true; 4686 4687 FOR_EACH_SUCC_1 (succ, si, insn, flags) 4688 { 4689 if (first_p) 4690 first_p = false; 4691 else 4692 return false; 4693 } 4694 4695 return true; 4696 } 4697 4698 /* Allocate successor's info. */ 4699 static struct succs_info * 4700 alloc_succs_info (void) 4701 { 4702 if (succs_info_pool.top == succs_info_pool.max_top) 4703 { 4704 int i; 4705 4706 if (++succs_info_pool.max_top >= succs_info_pool.size) 4707 gcc_unreachable (); 4708 4709 i = ++succs_info_pool.top; 4710 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10); 4711 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10); 4712 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10); 4713 } 4714 else 4715 succs_info_pool.top++; 4716 4717 return &succs_info_pool.stack[succs_info_pool.top]; 4718 } 4719 4720 /* Free successor's info. */ 4721 void 4722 free_succs_info (struct succs_info * sinfo) 4723 { 4724 gcc_assert (succs_info_pool.top >= 0 4725 && &succs_info_pool.stack[succs_info_pool.top] == sinfo); 4726 succs_info_pool.top--; 4727 4728 /* Clear stale info. */ 4729 VEC_block_remove (rtx, sinfo->succs_ok, 4730 0, VEC_length (rtx, sinfo->succs_ok)); 4731 VEC_block_remove (rtx, sinfo->succs_other, 4732 0, VEC_length (rtx, sinfo->succs_other)); 4733 VEC_block_remove (int, sinfo->probs_ok, 4734 0, VEC_length (int, sinfo->probs_ok)); 4735 sinfo->all_prob = 0; 4736 sinfo->succs_ok_n = 0; 4737 sinfo->all_succs_n = 0; 4738 } 4739 4740 /* Compute successor info for INSN. FLAGS are the flags passed 4741 to the FOR_EACH_SUCC_1 iterator. */ 4742 struct succs_info * 4743 compute_succs_info (insn_t insn, short flags) 4744 { 4745 succ_iterator si; 4746 insn_t succ; 4747 struct succs_info *sinfo = alloc_succs_info (); 4748 4749 /* Traverse *all* successors and decide what to do with each. */ 4750 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) 4751 { 4752 /* FIXME: this doesn't work for skipping to loop exits, as we don't 4753 perform code motion through inner loops. */ 4754 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS; 4755 4756 if (current_flags & flags) 4757 { 4758 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ); 4759 VEC_safe_push (int, heap, sinfo->probs_ok, 4760 /* FIXME: Improve calculation when skipping 4761 inner loop to exits. */ 4762 (si.bb_end 4763 ? si.e1->probability 4764 : REG_BR_PROB_BASE)); 4765 sinfo->succs_ok_n++; 4766 } 4767 else 4768 VEC_safe_push (rtx, heap, sinfo->succs_other, succ); 4769 4770 /* Compute all_prob. */ 4771 if (!si.bb_end) 4772 sinfo->all_prob = REG_BR_PROB_BASE; 4773 else 4774 sinfo->all_prob += si.e1->probability; 4775 4776 sinfo->all_succs_n++; 4777 } 4778 4779 return sinfo; 4780 } 4781 4782 /* Return the predecessors of BB in PREDS and their number in N. 4783 Empty blocks are skipped. SIZE is used to allocate PREDS. */ 4784 static void 4785 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) 4786 { 4787 edge e; 4788 edge_iterator ei; 4789 4790 gcc_assert (BLOCK_TO_BB (bb->index) != 0); 4791 4792 FOR_EACH_EDGE (e, ei, bb->preds) 4793 { 4794 basic_block pred_bb = e->src; 4795 insn_t bb_end = BB_END (pred_bb); 4796 4797 if (!in_current_region_p (pred_bb)) 4798 { 4799 gcc_assert (flag_sel_sched_pipelining_outer_loops 4800 && current_loop_nest); 4801 continue; 4802 } 4803 4804 if (sel_bb_empty_p (pred_bb)) 4805 cfg_preds_1 (pred_bb, preds, n, size); 4806 else 4807 { 4808 if (*n == *size) 4809 *preds = XRESIZEVEC (insn_t, *preds, 4810 (*size = 2 * *size + 1)); 4811 (*preds)[(*n)++] = bb_end; 4812 } 4813 } 4814 4815 gcc_assert (*n != 0 4816 || (flag_sel_sched_pipelining_outer_loops 4817 && current_loop_nest)); 4818 } 4819 4820 /* Find all predecessors of BB and record them in PREDS and their number 4821 in N. Empty blocks are skipped, and only normal (forward in-region) 4822 edges are processed. */ 4823 static void 4824 cfg_preds (basic_block bb, insn_t **preds, int *n) 4825 { 4826 int size = 0; 4827 4828 *preds = NULL; 4829 *n = 0; 4830 cfg_preds_1 (bb, preds, n, &size); 4831 } 4832 4833 /* Returns true if we are moving INSN through join point. */ 4834 bool 4835 sel_num_cfg_preds_gt_1 (insn_t insn) 4836 { 4837 basic_block bb; 4838 4839 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0) 4840 return false; 4841 4842 bb = BLOCK_FOR_INSN (insn); 4843 4844 while (1) 4845 { 4846 if (EDGE_COUNT (bb->preds) > 1) 4847 return true; 4848 4849 gcc_assert (EDGE_PRED (bb, 0)->dest == bb); 4850 bb = EDGE_PRED (bb, 0)->src; 4851 4852 if (!sel_bb_empty_p (bb)) 4853 break; 4854 } 4855 4856 return false; 4857 } 4858 4859 /* Returns true when BB should be the end of an ebb. Adapted from the 4860 code in sched-ebb.c. */ 4861 bool 4862 bb_ends_ebb_p (basic_block bb) 4863 { 4864 basic_block next_bb = bb_next_bb (bb); 4865 edge e; 4866 4867 if (next_bb == EXIT_BLOCK_PTR 4868 || bitmap_bit_p (forced_ebb_heads, next_bb->index) 4869 || (LABEL_P (BB_HEAD (next_bb)) 4870 /* NB: LABEL_NUSES () is not maintained outside of jump.c. 4871 Work around that. */ 4872 && !single_pred_p (next_bb))) 4873 return true; 4874 4875 if (!in_current_region_p (next_bb)) 4876 return true; 4877 4878 e = find_fallthru_edge (bb->succs); 4879 if (e) 4880 { 4881 gcc_assert (e->dest == next_bb); 4882 4883 return false; 4884 } 4885 4886 return true; 4887 } 4888 4889 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a 4890 successor of INSN. */ 4891 bool 4892 in_same_ebb_p (insn_t insn, insn_t succ) 4893 { 4894 basic_block ptr = BLOCK_FOR_INSN (insn); 4895 4896 for(;;) 4897 { 4898 if (ptr == BLOCK_FOR_INSN (succ)) 4899 return true; 4900 4901 if (bb_ends_ebb_p (ptr)) 4902 return false; 4903 4904 ptr = bb_next_bb (ptr); 4905 } 4906 4907 gcc_unreachable (); 4908 return false; 4909 } 4910 4911 /* Recomputes the reverse topological order for the function and 4912 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also 4913 modified appropriately. */ 4914 static void 4915 recompute_rev_top_order (void) 4916 { 4917 int *postorder; 4918 int n_blocks, i; 4919 4920 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block) 4921 { 4922 rev_top_order_index_len = last_basic_block; 4923 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index, 4924 rev_top_order_index_len); 4925 } 4926 4927 postorder = XNEWVEC (int, n_basic_blocks); 4928 4929 n_blocks = post_order_compute (postorder, true, false); 4930 gcc_assert (n_basic_blocks == n_blocks); 4931 4932 /* Build reverse function: for each basic block with BB->INDEX == K 4933 rev_top_order_index[K] is it's reverse topological sort number. */ 4934 for (i = 0; i < n_blocks; i++) 4935 { 4936 gcc_assert (postorder[i] < rev_top_order_index_len); 4937 rev_top_order_index[postorder[i]] = i; 4938 } 4939 4940 free (postorder); 4941 } 4942 4943 /* Clear all flags from insns in BB that could spoil its rescheduling. */ 4944 void 4945 clear_outdated_rtx_info (basic_block bb) 4946 { 4947 rtx insn; 4948 4949 FOR_BB_INSNS (bb, insn) 4950 if (INSN_P (insn)) 4951 { 4952 SCHED_GROUP_P (insn) = 0; 4953 INSN_AFTER_STALL_P (insn) = 0; 4954 INSN_SCHED_TIMES (insn) = 0; 4955 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0; 4956 4957 /* We cannot use the changed caches, as previously we could ignore 4958 the LHS dependence due to enabled renaming and transform 4959 the expression, and currently we'll be unable to do this. */ 4960 htab_empty (INSN_TRANSFORMED_INSNS (insn)); 4961 } 4962 } 4963 4964 /* Add BB_NOTE to the pool of available basic block notes. */ 4965 static void 4966 return_bb_to_pool (basic_block bb) 4967 { 4968 rtx note = bb_note (bb); 4969 4970 gcc_assert (NOTE_BASIC_BLOCK (note) == bb 4971 && bb->aux == NULL); 4972 4973 /* It turns out that current cfg infrastructure does not support 4974 reuse of basic blocks. Don't bother for now. */ 4975 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/ 4976 } 4977 4978 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */ 4979 static rtx 4980 get_bb_note_from_pool (void) 4981 { 4982 if (VEC_empty (rtx, bb_note_pool)) 4983 return NULL_RTX; 4984 else 4985 { 4986 rtx note = VEC_pop (rtx, bb_note_pool); 4987 4988 PREV_INSN (note) = NULL_RTX; 4989 NEXT_INSN (note) = NULL_RTX; 4990 4991 return note; 4992 } 4993 } 4994 4995 /* Free bb_note_pool. */ 4996 void 4997 free_bb_note_pool (void) 4998 { 4999 VEC_free (rtx, heap, bb_note_pool); 5000 } 5001 5002 /* Setup scheduler pool and successor structure. */ 5003 void 5004 alloc_sched_pools (void) 5005 { 5006 int succs_size; 5007 5008 succs_size = MAX_WS + 1; 5009 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); 5010 succs_info_pool.size = succs_size; 5011 succs_info_pool.top = -1; 5012 succs_info_pool.max_top = -1; 5013 5014 sched_lists_pool = create_alloc_pool ("sel-sched-lists", 5015 sizeof (struct _list_node), 500); 5016 } 5017 5018 /* Free the pools. */ 5019 void 5020 free_sched_pools (void) 5021 { 5022 int i; 5023 5024 free_alloc_pool (sched_lists_pool); 5025 gcc_assert (succs_info_pool.top == -1); 5026 for (i = 0; i <= succs_info_pool.max_top; i++) 5027 { 5028 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok); 5029 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other); 5030 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok); 5031 } 5032 free (succs_info_pool.stack); 5033 } 5034 5035 5036 /* Returns a position in RGN where BB can be inserted retaining 5037 topological order. */ 5038 static int 5039 find_place_to_insert_bb (basic_block bb, int rgn) 5040 { 5041 bool has_preds_outside_rgn = false; 5042 edge e; 5043 edge_iterator ei; 5044 5045 /* Find whether we have preds outside the region. */ 5046 FOR_EACH_EDGE (e, ei, bb->preds) 5047 if (!in_current_region_p (e->src)) 5048 { 5049 has_preds_outside_rgn = true; 5050 break; 5051 } 5052 5053 /* Recompute the top order -- needed when we have > 1 pred 5054 and in case we don't have preds outside. */ 5055 if (flag_sel_sched_pipelining_outer_loops 5056 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1)) 5057 { 5058 int i, bbi = bb->index, cur_bbi; 5059 5060 recompute_rev_top_order (); 5061 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--) 5062 { 5063 cur_bbi = BB_TO_BLOCK (i); 5064 if (rev_top_order_index[bbi] 5065 < rev_top_order_index[cur_bbi]) 5066 break; 5067 } 5068 5069 /* We skipped the right block, so we increase i. We accomodate 5070 it for increasing by step later, so we decrease i. */ 5071 return (i + 1) - 1; 5072 } 5073 else if (has_preds_outside_rgn) 5074 { 5075 /* This is the case when we generate an extra empty block 5076 to serve as region head during pipelining. */ 5077 e = EDGE_SUCC (bb, 0); 5078 gcc_assert (EDGE_COUNT (bb->succs) == 1 5079 && in_current_region_p (EDGE_SUCC (bb, 0)->dest) 5080 && (BLOCK_TO_BB (e->dest->index) == 0)); 5081 return -1; 5082 } 5083 5084 /* We don't have preds outside the region. We should have 5085 the only pred, because the multiple preds case comes from 5086 the pipelining of outer loops, and that is handled above. 5087 Just take the bbi of this single pred. */ 5088 if (EDGE_COUNT (bb->succs) > 0) 5089 { 5090 int pred_bbi; 5091 5092 gcc_assert (EDGE_COUNT (bb->preds) == 1); 5093 5094 pred_bbi = EDGE_PRED (bb, 0)->src->index; 5095 return BLOCK_TO_BB (pred_bbi); 5096 } 5097 else 5098 /* BB has no successors. It is safe to put it in the end. */ 5099 return current_nr_blocks - 1; 5100 } 5101 5102 /* Deletes an empty basic block freeing its data. */ 5103 static void 5104 delete_and_free_basic_block (basic_block bb) 5105 { 5106 gcc_assert (sel_bb_empty_p (bb)); 5107 5108 if (BB_LV_SET (bb)) 5109 free_lv_set (bb); 5110 5111 bitmap_clear_bit (blocks_to_reschedule, bb->index); 5112 5113 /* Can't assert av_set properties because we use sel_aremove_bb 5114 when removing loop preheader from the region. At the point of 5115 removing the preheader we already have deallocated sel_region_bb_info. */ 5116 gcc_assert (BB_LV_SET (bb) == NULL 5117 && !BB_LV_SET_VALID_P (bb) 5118 && BB_AV_LEVEL (bb) == 0 5119 && BB_AV_SET (bb) == NULL); 5120 5121 delete_basic_block (bb); 5122 } 5123 5124 /* Add BB to the current region and update the region data. */ 5125 static void 5126 add_block_to_current_region (basic_block bb) 5127 { 5128 int i, pos, bbi = -2, rgn; 5129 5130 rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 5131 bbi = find_place_to_insert_bb (bb, rgn); 5132 bbi += 1; 5133 pos = RGN_BLOCKS (rgn) + bbi; 5134 5135 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 5136 && ebb_head[bbi] == pos); 5137 5138 /* Make a place for the new block. */ 5139 extend_regions (); 5140 5141 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) 5142 BLOCK_TO_BB (rgn_bb_table[i])++; 5143 5144 memmove (rgn_bb_table + pos + 1, 5145 rgn_bb_table + pos, 5146 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); 5147 5148 /* Initialize data for BB. */ 5149 rgn_bb_table[pos] = bb->index; 5150 BLOCK_TO_BB (bb->index) = bbi; 5151 CONTAINING_RGN (bb->index) = rgn; 5152 5153 RGN_NR_BLOCKS (rgn)++; 5154 5155 for (i = rgn + 1; i <= nr_regions; i++) 5156 RGN_BLOCKS (i)++; 5157 } 5158 5159 /* Remove BB from the current region and update the region data. */ 5160 static void 5161 remove_bb_from_region (basic_block bb) 5162 { 5163 int i, pos, bbi = -2, rgn; 5164 5165 rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 5166 bbi = BLOCK_TO_BB (bb->index); 5167 pos = RGN_BLOCKS (rgn) + bbi; 5168 5169 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 5170 && ebb_head[bbi] == pos); 5171 5172 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) 5173 BLOCK_TO_BB (rgn_bb_table[i])--; 5174 5175 memmove (rgn_bb_table + pos, 5176 rgn_bb_table + pos + 1, 5177 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); 5178 5179 RGN_NR_BLOCKS (rgn)--; 5180 for (i = rgn + 1; i <= nr_regions; i++) 5181 RGN_BLOCKS (i)--; 5182 } 5183 5184 /* Add BB to the current region and update all data. If BB is NULL, add all 5185 blocks from last_added_blocks vector. */ 5186 static void 5187 sel_add_bb (basic_block bb) 5188 { 5189 /* Extend luids so that new notes will receive zero luids. */ 5190 sched_extend_luids (); 5191 sched_init_bbs (); 5192 sel_init_bbs (last_added_blocks); 5193 5194 /* When bb is passed explicitly, the vector should contain 5195 the only element that equals to bb; otherwise, the vector 5196 should not be NULL. */ 5197 gcc_assert (last_added_blocks != NULL); 5198 5199 if (bb != NULL) 5200 { 5201 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1 5202 && VEC_index (basic_block, 5203 last_added_blocks, 0) == bb); 5204 add_block_to_current_region (bb); 5205 5206 /* We associate creating/deleting data sets with the first insn 5207 appearing / disappearing in the bb. */ 5208 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL) 5209 create_initial_data_sets (bb); 5210 5211 VEC_free (basic_block, heap, last_added_blocks); 5212 } 5213 else 5214 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */ 5215 { 5216 int i; 5217 basic_block temp_bb = NULL; 5218 5219 for (i = 0; 5220 VEC_iterate (basic_block, last_added_blocks, i, bb); i++) 5221 { 5222 add_block_to_current_region (bb); 5223 temp_bb = bb; 5224 } 5225 5226 /* We need to fetch at least one bb so we know the region 5227 to update. */ 5228 gcc_assert (temp_bb != NULL); 5229 bb = temp_bb; 5230 5231 VEC_free (basic_block, heap, last_added_blocks); 5232 } 5233 5234 rgn_setup_region (CONTAINING_RGN (bb->index)); 5235 } 5236 5237 /* Remove BB from the current region and update all data. 5238 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */ 5239 static void 5240 sel_remove_bb (basic_block bb, bool remove_from_cfg_p) 5241 { 5242 unsigned idx = bb->index; 5243 5244 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); 5245 5246 remove_bb_from_region (bb); 5247 return_bb_to_pool (bb); 5248 bitmap_clear_bit (blocks_to_reschedule, idx); 5249 5250 if (remove_from_cfg_p) 5251 { 5252 basic_block succ = single_succ (bb); 5253 delete_and_free_basic_block (bb); 5254 set_immediate_dominator (CDI_DOMINATORS, succ, 5255 recompute_dominator (CDI_DOMINATORS, succ)); 5256 } 5257 5258 rgn_setup_region (CONTAINING_RGN (idx)); 5259 } 5260 5261 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */ 5262 static void 5263 move_bb_info (basic_block merge_bb, basic_block empty_bb) 5264 { 5265 if (in_current_region_p (merge_bb)) 5266 concat_note_lists (BB_NOTE_LIST (empty_bb), 5267 &BB_NOTE_LIST (merge_bb)); 5268 BB_NOTE_LIST (empty_bb) = NULL_RTX; 5269 5270 } 5271 5272 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from 5273 region, but keep it in CFG. */ 5274 static void 5275 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p) 5276 { 5277 /* The block should contain just a note or a label. 5278 We try to check whether it is unused below. */ 5279 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb) 5280 || LABEL_P (BB_HEAD (empty_bb))); 5281 5282 /* If basic block has predecessors or successors, redirect them. */ 5283 if (remove_from_cfg_p 5284 && (EDGE_COUNT (empty_bb->preds) > 0 5285 || EDGE_COUNT (empty_bb->succs) > 0)) 5286 { 5287 basic_block pred; 5288 basic_block succ; 5289 5290 /* We need to init PRED and SUCC before redirecting edges. */ 5291 if (EDGE_COUNT (empty_bb->preds) > 0) 5292 { 5293 edge e; 5294 5295 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1); 5296 5297 e = EDGE_PRED (empty_bb, 0); 5298 gcc_assert (e->src == empty_bb->prev_bb 5299 && (e->flags & EDGE_FALLTHRU)); 5300 5301 pred = empty_bb->prev_bb; 5302 } 5303 else 5304 pred = NULL; 5305 5306 if (EDGE_COUNT (empty_bb->succs) > 0) 5307 { 5308 /* We do not check fallthruness here as above, because 5309 after removing a jump the edge may actually be not fallthru. */ 5310 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1); 5311 succ = EDGE_SUCC (empty_bb, 0)->dest; 5312 } 5313 else 5314 succ = NULL; 5315 5316 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL) 5317 { 5318 edge e = EDGE_PRED (empty_bb, 0); 5319 5320 if (e->flags & EDGE_FALLTHRU) 5321 redirect_edge_succ_nodup (e, succ); 5322 else 5323 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ); 5324 } 5325 5326 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL) 5327 { 5328 edge e = EDGE_SUCC (empty_bb, 0); 5329 5330 if (find_edge (pred, e->dest) == NULL) 5331 redirect_edge_pred (e, pred); 5332 } 5333 } 5334 5335 /* Finish removing. */ 5336 sel_remove_bb (empty_bb, remove_from_cfg_p); 5337 } 5338 5339 /* An implementation of create_basic_block hook, which additionally updates 5340 per-bb data structures. */ 5341 static basic_block 5342 sel_create_basic_block (void *headp, void *endp, basic_block after) 5343 { 5344 basic_block new_bb; 5345 insn_t new_bb_note; 5346 5347 gcc_assert (flag_sel_sched_pipelining_outer_loops 5348 || last_added_blocks == NULL); 5349 5350 new_bb_note = get_bb_note_from_pool (); 5351 5352 if (new_bb_note == NULL_RTX) 5353 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after); 5354 else 5355 { 5356 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp, 5357 new_bb_note, after); 5358 new_bb->aux = NULL; 5359 } 5360 5361 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb); 5362 5363 return new_bb; 5364 } 5365 5366 /* Implement sched_init_only_bb (). */ 5367 static void 5368 sel_init_only_bb (basic_block bb, basic_block after) 5369 { 5370 gcc_assert (after == NULL); 5371 5372 extend_regions (); 5373 rgn_make_new_region_out_of_new_block (bb); 5374 } 5375 5376 /* Update the latch when we've splitted or merged it from FROM block to TO. 5377 This should be checked for all outer loops, too. */ 5378 static void 5379 change_loops_latches (basic_block from, basic_block to) 5380 { 5381 gcc_assert (from != to); 5382 5383 if (current_loop_nest) 5384 { 5385 struct loop *loop; 5386 5387 for (loop = current_loop_nest; loop; loop = loop_outer (loop)) 5388 if (considered_for_pipelining_p (loop) && loop->latch == from) 5389 { 5390 gcc_assert (loop == current_loop_nest); 5391 loop->latch = to; 5392 gcc_assert (loop_latch_edge (loop)); 5393 } 5394 } 5395 } 5396 5397 /* Splits BB on two basic blocks, adding it to the region and extending 5398 per-bb data structures. Returns the newly created bb. */ 5399 static basic_block 5400 sel_split_block (basic_block bb, rtx after) 5401 { 5402 basic_block new_bb; 5403 insn_t insn; 5404 5405 new_bb = sched_split_block_1 (bb, after); 5406 sel_add_bb (new_bb); 5407 5408 /* This should be called after sel_add_bb, because this uses 5409 CONTAINING_RGN for the new block, which is not yet initialized. 5410 FIXME: this function may be a no-op now. */ 5411 change_loops_latches (bb, new_bb); 5412 5413 /* Update ORIG_BB_INDEX for insns moved into the new block. */ 5414 FOR_BB_INSNS (new_bb, insn) 5415 if (INSN_P (insn)) 5416 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index; 5417 5418 if (sel_bb_empty_p (bb)) 5419 { 5420 gcc_assert (!sel_bb_empty_p (new_bb)); 5421 5422 /* NEW_BB has data sets that need to be updated and BB holds 5423 data sets that should be removed. Exchange these data sets 5424 so that we won't lose BB's valid data sets. */ 5425 exchange_data_sets (new_bb, bb); 5426 free_data_sets (bb); 5427 } 5428 5429 if (!sel_bb_empty_p (new_bb) 5430 && bitmap_bit_p (blocks_to_reschedule, bb->index)) 5431 bitmap_set_bit (blocks_to_reschedule, new_bb->index); 5432 5433 return new_bb; 5434 } 5435 5436 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it. 5437 Otherwise returns NULL. */ 5438 static rtx 5439 check_for_new_jump (basic_block bb, int prev_max_uid) 5440 { 5441 rtx end; 5442 5443 end = sel_bb_end (bb); 5444 if (end && INSN_UID (end) >= prev_max_uid) 5445 return end; 5446 return NULL; 5447 } 5448 5449 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. 5450 New means having UID at least equal to PREV_MAX_UID. */ 5451 static rtx 5452 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) 5453 { 5454 rtx jump; 5455 5456 /* Return immediately if no new insns were emitted. */ 5457 if (get_max_uid () == prev_max_uid) 5458 return NULL; 5459 5460 /* Now check both blocks for new jumps. It will ever be only one. */ 5461 if ((jump = check_for_new_jump (from, prev_max_uid))) 5462 return jump; 5463 5464 if (jump_bb != NULL 5465 && (jump = check_for_new_jump (jump_bb, prev_max_uid))) 5466 return jump; 5467 return NULL; 5468 } 5469 5470 /* Splits E and adds the newly created basic block to the current region. 5471 Returns this basic block. */ 5472 basic_block 5473 sel_split_edge (edge e) 5474 { 5475 basic_block new_bb, src, other_bb = NULL; 5476 int prev_max_uid; 5477 rtx jump; 5478 5479 src = e->src; 5480 prev_max_uid = get_max_uid (); 5481 new_bb = split_edge (e); 5482 5483 if (flag_sel_sched_pipelining_outer_loops 5484 && current_loop_nest) 5485 { 5486 int i; 5487 basic_block bb; 5488 5489 /* Some of the basic blocks might not have been added to the loop. 5490 Add them here, until this is fixed in force_fallthru. */ 5491 for (i = 0; 5492 VEC_iterate (basic_block, last_added_blocks, i, bb); i++) 5493 if (!bb->loop_father) 5494 { 5495 add_bb_to_loop (bb, e->dest->loop_father); 5496 5497 gcc_assert (!other_bb && (new_bb->index != bb->index)); 5498 other_bb = bb; 5499 } 5500 } 5501 5502 /* Add all last_added_blocks to the region. */ 5503 sel_add_bb (NULL); 5504 5505 jump = find_new_jump (src, new_bb, prev_max_uid); 5506 if (jump) 5507 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5508 5509 /* Put the correct lv set on this block. */ 5510 if (other_bb && !sel_bb_empty_p (other_bb)) 5511 compute_live (sel_bb_head (other_bb)); 5512 5513 return new_bb; 5514 } 5515 5516 /* Implement sched_create_empty_bb (). */ 5517 static basic_block 5518 sel_create_empty_bb (basic_block after) 5519 { 5520 basic_block new_bb; 5521 5522 new_bb = sched_create_empty_bb_1 (after); 5523 5524 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit 5525 later. */ 5526 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1 5527 && VEC_index (basic_block, last_added_blocks, 0) == new_bb); 5528 5529 VEC_free (basic_block, heap, last_added_blocks); 5530 return new_bb; 5531 } 5532 5533 /* Implement sched_create_recovery_block. ORIG_INSN is where block 5534 will be splitted to insert a check. */ 5535 basic_block 5536 sel_create_recovery_block (insn_t orig_insn) 5537 { 5538 basic_block first_bb, second_bb, recovery_block; 5539 basic_block before_recovery = NULL; 5540 rtx jump; 5541 5542 first_bb = BLOCK_FOR_INSN (orig_insn); 5543 if (sel_bb_end_p (orig_insn)) 5544 { 5545 /* Avoid introducing an empty block while splitting. */ 5546 gcc_assert (single_succ_p (first_bb)); 5547 second_bb = single_succ (first_bb); 5548 } 5549 else 5550 second_bb = sched_split_block (first_bb, orig_insn); 5551 5552 recovery_block = sched_create_recovery_block (&before_recovery); 5553 if (before_recovery) 5554 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR); 5555 5556 gcc_assert (sel_bb_empty_p (recovery_block)); 5557 sched_create_recovery_edges (first_bb, recovery_block, second_bb); 5558 if (current_loops != NULL) 5559 add_bb_to_loop (recovery_block, first_bb->loop_father); 5560 5561 sel_add_bb (recovery_block); 5562 5563 jump = BB_END (recovery_block); 5564 gcc_assert (sel_bb_head (recovery_block) == jump); 5565 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5566 5567 return recovery_block; 5568 } 5569 5570 /* Merge basic block B into basic block A. */ 5571 static void 5572 sel_merge_blocks (basic_block a, basic_block b) 5573 { 5574 gcc_assert (sel_bb_empty_p (b) 5575 && EDGE_COUNT (b->preds) == 1 5576 && EDGE_PRED (b, 0)->src == b->prev_bb); 5577 5578 move_bb_info (b->prev_bb, b); 5579 remove_empty_bb (b, false); 5580 merge_blocks (a, b); 5581 change_loops_latches (b, a); 5582 } 5583 5584 /* A wrapper for redirect_edge_and_branch_force, which also initializes 5585 data structures for possibly created bb and insns. Returns the newly 5586 added bb or NULL, when a bb was not needed. */ 5587 void 5588 sel_redirect_edge_and_branch_force (edge e, basic_block to) 5589 { 5590 basic_block jump_bb, src, orig_dest = e->dest; 5591 int prev_max_uid; 5592 rtx jump; 5593 5594 /* This function is now used only for bookkeeping code creation, where 5595 we'll never get the single pred of orig_dest block and thus will not 5596 hit unreachable blocks when updating dominator info. */ 5597 gcc_assert (!sel_bb_empty_p (e->src) 5598 && !single_pred_p (orig_dest)); 5599 src = e->src; 5600 prev_max_uid = get_max_uid (); 5601 jump_bb = redirect_edge_and_branch_force (e, to); 5602 5603 if (jump_bb != NULL) 5604 sel_add_bb (jump_bb); 5605 5606 /* This function could not be used to spoil the loop structure by now, 5607 thus we don't care to update anything. But check it to be sure. */ 5608 if (current_loop_nest 5609 && pipelining_p) 5610 gcc_assert (loop_latch_edge (current_loop_nest)); 5611 5612 jump = find_new_jump (src, jump_bb, prev_max_uid); 5613 if (jump) 5614 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5615 set_immediate_dominator (CDI_DOMINATORS, to, 5616 recompute_dominator (CDI_DOMINATORS, to)); 5617 set_immediate_dominator (CDI_DOMINATORS, orig_dest, 5618 recompute_dominator (CDI_DOMINATORS, orig_dest)); 5619 } 5620 5621 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by 5622 redirected edge are in reverse topological order. */ 5623 bool 5624 sel_redirect_edge_and_branch (edge e, basic_block to) 5625 { 5626 bool latch_edge_p; 5627 basic_block src, orig_dest = e->dest; 5628 int prev_max_uid; 5629 rtx jump; 5630 edge redirected; 5631 bool recompute_toporder_p = false; 5632 bool maybe_unreachable = single_pred_p (orig_dest); 5633 5634 latch_edge_p = (pipelining_p 5635 && current_loop_nest 5636 && e == loop_latch_edge (current_loop_nest)); 5637 5638 src = e->src; 5639 prev_max_uid = get_max_uid (); 5640 5641 redirected = redirect_edge_and_branch (e, to); 5642 5643 gcc_assert (redirected && last_added_blocks == NULL); 5644 5645 /* When we've redirected a latch edge, update the header. */ 5646 if (latch_edge_p) 5647 { 5648 current_loop_nest->header = to; 5649 gcc_assert (loop_latch_edge (current_loop_nest)); 5650 } 5651 5652 /* In rare situations, the topological relation between the blocks connected 5653 by the redirected edge can change (see PR42245 for an example). Update 5654 block_to_bb/bb_to_block. */ 5655 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index) 5656 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index)) 5657 recompute_toporder_p = true; 5658 5659 jump = find_new_jump (src, NULL, prev_max_uid); 5660 if (jump) 5661 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); 5662 5663 /* Only update dominator info when we don't have unreachable blocks. 5664 Otherwise we'll update in maybe_tidy_empty_bb. */ 5665 if (!maybe_unreachable) 5666 { 5667 set_immediate_dominator (CDI_DOMINATORS, to, 5668 recompute_dominator (CDI_DOMINATORS, to)); 5669 set_immediate_dominator (CDI_DOMINATORS, orig_dest, 5670 recompute_dominator (CDI_DOMINATORS, orig_dest)); 5671 } 5672 return recompute_toporder_p; 5673 } 5674 5675 /* This variable holds the cfg hooks used by the selective scheduler. */ 5676 static struct cfg_hooks sel_cfg_hooks; 5677 5678 /* Register sel-sched cfg hooks. */ 5679 void 5680 sel_register_cfg_hooks (void) 5681 { 5682 sched_split_block = sel_split_block; 5683 5684 orig_cfg_hooks = get_cfg_hooks (); 5685 sel_cfg_hooks = orig_cfg_hooks; 5686 5687 sel_cfg_hooks.create_basic_block = sel_create_basic_block; 5688 5689 set_cfg_hooks (sel_cfg_hooks); 5690 5691 sched_init_only_bb = sel_init_only_bb; 5692 sched_split_block = sel_split_block; 5693 sched_create_empty_bb = sel_create_empty_bb; 5694 } 5695 5696 /* Unregister sel-sched cfg hooks. */ 5697 void 5698 sel_unregister_cfg_hooks (void) 5699 { 5700 sched_create_empty_bb = NULL; 5701 sched_split_block = NULL; 5702 sched_init_only_bb = NULL; 5703 5704 set_cfg_hooks (orig_cfg_hooks); 5705 } 5706 5707 5708 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted, 5709 LABEL is where this jump should be directed. */ 5710 rtx 5711 create_insn_rtx_from_pattern (rtx pattern, rtx label) 5712 { 5713 rtx insn_rtx; 5714 5715 gcc_assert (!INSN_P (pattern)); 5716 5717 start_sequence (); 5718 5719 if (label == NULL_RTX) 5720 insn_rtx = emit_insn (pattern); 5721 else if (DEBUG_INSN_P (label)) 5722 insn_rtx = emit_debug_insn (pattern); 5723 else 5724 { 5725 insn_rtx = emit_jump_insn (pattern); 5726 JUMP_LABEL (insn_rtx) = label; 5727 ++LABEL_NUSES (label); 5728 } 5729 5730 end_sequence (); 5731 5732 sched_extend_luids (); 5733 sched_extend_target (); 5734 sched_deps_init (false); 5735 5736 /* Initialize INSN_CODE now. */ 5737 recog_memoized (insn_rtx); 5738 return insn_rtx; 5739 } 5740 5741 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn 5742 must not be clonable. */ 5743 vinsn_t 5744 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p) 5745 { 5746 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx)); 5747 5748 /* If VINSN_TYPE is not USE, retain its uniqueness. */ 5749 return vinsn_create (insn_rtx, force_unique_p); 5750 } 5751 5752 /* Create a copy of INSN_RTX. */ 5753 rtx 5754 create_copy_of_insn_rtx (rtx insn_rtx) 5755 { 5756 rtx res, link; 5757 5758 if (DEBUG_INSN_P (insn_rtx)) 5759 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), 5760 insn_rtx); 5761 5762 gcc_assert (NONJUMP_INSN_P (insn_rtx)); 5763 5764 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), 5765 NULL_RTX); 5766 5767 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND 5768 since mark_jump_label will make them. REG_LABEL_TARGETs are created 5769 there too, but are supposed to be sticky, so we copy them. */ 5770 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1)) 5771 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND 5772 && REG_NOTE_KIND (link) != REG_EQUAL 5773 && REG_NOTE_KIND (link) != REG_EQUIV) 5774 { 5775 if (GET_CODE (link) == EXPR_LIST) 5776 add_reg_note (res, REG_NOTE_KIND (link), 5777 copy_insn_1 (XEXP (link, 0))); 5778 else 5779 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0)); 5780 } 5781 5782 return res; 5783 } 5784 5785 /* Change vinsn field of EXPR to hold NEW_VINSN. */ 5786 void 5787 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn) 5788 { 5789 vinsn_detach (EXPR_VINSN (expr)); 5790 5791 EXPR_VINSN (expr) = new_vinsn; 5792 vinsn_attach (new_vinsn); 5793 } 5794 5795 /* Helpers for global init. */ 5796 /* This structure is used to be able to call existing bundling mechanism 5797 and calculate insn priorities. */ 5798 static struct haifa_sched_info sched_sel_haifa_sched_info = 5799 { 5800 NULL, /* init_ready_list */ 5801 NULL, /* can_schedule_ready_p */ 5802 NULL, /* schedule_more_p */ 5803 NULL, /* new_ready */ 5804 NULL, /* rgn_rank */ 5805 sel_print_insn, /* rgn_print_insn */ 5806 contributes_to_priority, 5807 NULL, /* insn_finishes_block_p */ 5808 5809 NULL, NULL, 5810 NULL, NULL, 5811 0, 0, 5812 5813 NULL, /* add_remove_insn */ 5814 NULL, /* begin_schedule_ready */ 5815 NULL, /* begin_move_insn */ 5816 NULL, /* advance_target_bb */ 5817 5818 NULL, 5819 NULL, 5820 5821 SEL_SCHED | NEW_BBS 5822 }; 5823 5824 /* Setup special insns used in the scheduler. */ 5825 void 5826 setup_nop_and_exit_insns (void) 5827 { 5828 gcc_assert (nop_pattern == NULL_RTX 5829 && exit_insn == NULL_RTX); 5830 5831 nop_pattern = constm1_rtx; 5832 5833 start_sequence (); 5834 emit_insn (nop_pattern); 5835 exit_insn = get_insns (); 5836 end_sequence (); 5837 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR); 5838 } 5839 5840 /* Free special insns used in the scheduler. */ 5841 void 5842 free_nop_and_exit_insns (void) 5843 { 5844 exit_insn = NULL_RTX; 5845 nop_pattern = NULL_RTX; 5846 } 5847 5848 /* Setup a special vinsn used in new insns initialization. */ 5849 void 5850 setup_nop_vinsn (void) 5851 { 5852 nop_vinsn = vinsn_create (exit_insn, false); 5853 vinsn_attach (nop_vinsn); 5854 } 5855 5856 /* Free a special vinsn used in new insns initialization. */ 5857 void 5858 free_nop_vinsn (void) 5859 { 5860 gcc_assert (VINSN_COUNT (nop_vinsn) == 1); 5861 vinsn_detach (nop_vinsn); 5862 nop_vinsn = NULL; 5863 } 5864 5865 /* Call a set_sched_flags hook. */ 5866 void 5867 sel_set_sched_flags (void) 5868 { 5869 /* ??? This means that set_sched_flags were called, and we decided to 5870 support speculation. However, set_sched_flags also modifies flags 5871 on current_sched_info, doing this only at global init. And we 5872 sometimes change c_s_i later. So put the correct flags again. */ 5873 if (spec_info && targetm.sched.set_sched_flags) 5874 targetm.sched.set_sched_flags (spec_info); 5875 } 5876 5877 /* Setup pointers to global sched info structures. */ 5878 void 5879 sel_setup_sched_infos (void) 5880 { 5881 rgn_setup_common_sched_info (); 5882 5883 memcpy (&sel_common_sched_info, common_sched_info, 5884 sizeof (sel_common_sched_info)); 5885 5886 sel_common_sched_info.fix_recovery_cfg = NULL; 5887 sel_common_sched_info.add_block = NULL; 5888 sel_common_sched_info.estimate_number_of_insns 5889 = sel_estimate_number_of_insns; 5890 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn; 5891 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS; 5892 5893 common_sched_info = &sel_common_sched_info; 5894 5895 current_sched_info = &sched_sel_haifa_sched_info; 5896 current_sched_info->sched_max_insns_priority = 5897 get_rgn_sched_max_insns_priority (); 5898 5899 sel_set_sched_flags (); 5900 } 5901 5902 5903 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX, 5904 *BB_ORD_INDEX after that is increased. */ 5905 static void 5906 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn) 5907 { 5908 RGN_NR_BLOCKS (rgn) += 1; 5909 RGN_DONT_CALC_DEPS (rgn) = 0; 5910 RGN_HAS_REAL_EBB (rgn) = 0; 5911 CONTAINING_RGN (bb->index) = rgn; 5912 BLOCK_TO_BB (bb->index) = *bb_ord_index; 5913 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index; 5914 (*bb_ord_index)++; 5915 5916 /* FIXME: it is true only when not scheduling ebbs. */ 5917 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn); 5918 } 5919 5920 /* Functions to support pipelining of outer loops. */ 5921 5922 /* Creates a new empty region and returns it's number. */ 5923 static int 5924 sel_create_new_region (void) 5925 { 5926 int new_rgn_number = nr_regions; 5927 5928 RGN_NR_BLOCKS (new_rgn_number) = 0; 5929 5930 /* FIXME: This will work only when EBBs are not created. */ 5931 if (new_rgn_number != 0) 5932 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + 5933 RGN_NR_BLOCKS (new_rgn_number - 1); 5934 else 5935 RGN_BLOCKS (new_rgn_number) = 0; 5936 5937 /* Set the blocks of the next region so the other functions may 5938 calculate the number of blocks in the region. */ 5939 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + 5940 RGN_NR_BLOCKS (new_rgn_number); 5941 5942 nr_regions++; 5943 5944 return new_rgn_number; 5945 } 5946 5947 /* If X has a smaller topological sort number than Y, returns -1; 5948 if greater, returns 1. */ 5949 static int 5950 bb_top_order_comparator (const void *x, const void *y) 5951 { 5952 basic_block bb1 = *(const basic_block *) x; 5953 basic_block bb2 = *(const basic_block *) y; 5954 5955 gcc_assert (bb1 == bb2 5956 || rev_top_order_index[bb1->index] 5957 != rev_top_order_index[bb2->index]); 5958 5959 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so 5960 bbs with greater number should go earlier. */ 5961 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index]) 5962 return -1; 5963 else 5964 return 1; 5965 } 5966 5967 /* Create a region for LOOP and return its number. If we don't want 5968 to pipeline LOOP, return -1. */ 5969 static int 5970 make_region_from_loop (struct loop *loop) 5971 { 5972 unsigned int i; 5973 int new_rgn_number = -1; 5974 struct loop *inner; 5975 5976 /* Basic block index, to be assigned to BLOCK_TO_BB. */ 5977 int bb_ord_index = 0; 5978 basic_block *loop_blocks; 5979 basic_block preheader_block; 5980 5981 if (loop->num_nodes 5982 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS)) 5983 return -1; 5984 5985 /* Don't pipeline loops whose latch belongs to some of its inner loops. */ 5986 for (inner = loop->inner; inner; inner = inner->inner) 5987 if (flow_bb_inside_loop_p (inner, loop->latch)) 5988 return -1; 5989 5990 loop->ninsns = num_loop_insns (loop); 5991 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS)) 5992 return -1; 5993 5994 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator); 5995 5996 for (i = 0; i < loop->num_nodes; i++) 5997 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP) 5998 { 5999 free (loop_blocks); 6000 return -1; 6001 } 6002 6003 preheader_block = loop_preheader_edge (loop)->src; 6004 gcc_assert (preheader_block); 6005 gcc_assert (loop_blocks[0] == loop->header); 6006 6007 new_rgn_number = sel_create_new_region (); 6008 6009 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number); 6010 SET_BIT (bbs_in_loop_rgns, preheader_block->index); 6011 6012 for (i = 0; i < loop->num_nodes; i++) 6013 { 6014 /* Add only those blocks that haven't been scheduled in the inner loop. 6015 The exception is the basic blocks with bookkeeping code - they should 6016 be added to the region (and they actually don't belong to the loop 6017 body, but to the region containing that loop body). */ 6018 6019 gcc_assert (new_rgn_number >= 0); 6020 6021 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index)) 6022 { 6023 sel_add_block_to_region (loop_blocks[i], &bb_ord_index, 6024 new_rgn_number); 6025 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index); 6026 } 6027 } 6028 6029 free (loop_blocks); 6030 MARK_LOOP_FOR_PIPELINING (loop); 6031 6032 return new_rgn_number; 6033 } 6034 6035 /* Create a new region from preheader blocks LOOP_BLOCKS. */ 6036 void 6037 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks) 6038 { 6039 unsigned int i; 6040 int new_rgn_number = -1; 6041 basic_block bb; 6042 6043 /* Basic block index, to be assigned to BLOCK_TO_BB. */ 6044 int bb_ord_index = 0; 6045 6046 new_rgn_number = sel_create_new_region (); 6047 6048 FOR_EACH_VEC_ELT (basic_block, *loop_blocks, i, bb) 6049 { 6050 gcc_assert (new_rgn_number >= 0); 6051 6052 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number); 6053 } 6054 6055 VEC_free (basic_block, heap, *loop_blocks); 6056 gcc_assert (*loop_blocks == NULL); 6057 } 6058 6059 6060 /* Create region(s) from loop nest LOOP, such that inner loops will be 6061 pipelined before outer loops. Returns true when a region for LOOP 6062 is created. */ 6063 static bool 6064 make_regions_from_loop_nest (struct loop *loop) 6065 { 6066 struct loop *cur_loop; 6067 int rgn_number; 6068 6069 /* Traverse all inner nodes of the loop. */ 6070 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next) 6071 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index)) 6072 return false; 6073 6074 /* At this moment all regular inner loops should have been pipelined. 6075 Try to create a region from this loop. */ 6076 rgn_number = make_region_from_loop (loop); 6077 6078 if (rgn_number < 0) 6079 return false; 6080 6081 VEC_safe_push (loop_p, heap, loop_nests, loop); 6082 return true; 6083 } 6084 6085 /* Initalize data structures needed. */ 6086 void 6087 sel_init_pipelining (void) 6088 { 6089 /* Collect loop information to be used in outer loops pipelining. */ 6090 loop_optimizer_init (LOOPS_HAVE_PREHEADERS 6091 | LOOPS_HAVE_FALLTHRU_PREHEADERS 6092 | LOOPS_HAVE_RECORDED_EXITS 6093 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS); 6094 current_loop_nest = NULL; 6095 6096 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block); 6097 sbitmap_zero (bbs_in_loop_rgns); 6098 6099 recompute_rev_top_order (); 6100 } 6101 6102 /* Returns a struct loop for region RGN. */ 6103 loop_p 6104 get_loop_nest_for_rgn (unsigned int rgn) 6105 { 6106 /* Regions created with extend_rgns don't have corresponding loop nests, 6107 because they don't represent loops. */ 6108 if (rgn < VEC_length (loop_p, loop_nests)) 6109 return VEC_index (loop_p, loop_nests, rgn); 6110 else 6111 return NULL; 6112 } 6113 6114 /* True when LOOP was included into pipelining regions. */ 6115 bool 6116 considered_for_pipelining_p (struct loop *loop) 6117 { 6118 if (loop_depth (loop) == 0) 6119 return false; 6120 6121 /* Now, the loop could be too large or irreducible. Check whether its 6122 region is in LOOP_NESTS. 6123 We determine the region number of LOOP as the region number of its 6124 latch. We can't use header here, because this header could be 6125 just removed preheader and it will give us the wrong region number. 6126 Latch can't be used because it could be in the inner loop too. */ 6127 if (LOOP_MARKED_FOR_PIPELINING_P (loop)) 6128 { 6129 int rgn = CONTAINING_RGN (loop->latch->index); 6130 6131 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests)); 6132 return true; 6133 } 6134 6135 return false; 6136 } 6137 6138 /* Makes regions from the rest of the blocks, after loops are chosen 6139 for pipelining. */ 6140 static void 6141 make_regions_from_the_rest (void) 6142 { 6143 int cur_rgn_blocks; 6144 int *loop_hdr; 6145 int i; 6146 6147 basic_block bb; 6148 edge e; 6149 edge_iterator ei; 6150 int *degree; 6151 6152 /* Index in rgn_bb_table where to start allocating new regions. */ 6153 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0; 6154 6155 /* Make regions from all the rest basic blocks - those that don't belong to 6156 any loop or belong to irreducible loops. Prepare the data structures 6157 for extend_rgns. */ 6158 6159 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop, 6160 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same 6161 loop. */ 6162 loop_hdr = XNEWVEC (int, last_basic_block); 6163 degree = XCNEWVEC (int, last_basic_block); 6164 6165 6166 /* For each basic block that belongs to some loop assign the number 6167 of innermost loop it belongs to. */ 6168 for (i = 0; i < last_basic_block; i++) 6169 loop_hdr[i] = -1; 6170 6171 FOR_EACH_BB (bb) 6172 { 6173 if (bb->loop_father && !bb->loop_father->num == 0 6174 && !(bb->flags & BB_IRREDUCIBLE_LOOP)) 6175 loop_hdr[bb->index] = bb->loop_father->num; 6176 } 6177 6178 /* For each basic block degree is calculated as the number of incoming 6179 edges, that are going out of bbs that are not yet scheduled. 6180 The basic blocks that are scheduled have degree value of zero. */ 6181 FOR_EACH_BB (bb) 6182 { 6183 degree[bb->index] = 0; 6184 6185 if (!TEST_BIT (bbs_in_loop_rgns, bb->index)) 6186 { 6187 FOR_EACH_EDGE (e, ei, bb->preds) 6188 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index)) 6189 degree[bb->index]++; 6190 } 6191 else 6192 degree[bb->index] = -1; 6193 } 6194 6195 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr); 6196 6197 /* Any block that did not end up in a region is placed into a region 6198 by itself. */ 6199 FOR_EACH_BB (bb) 6200 if (degree[bb->index] >= 0) 6201 { 6202 rgn_bb_table[cur_rgn_blocks] = bb->index; 6203 RGN_NR_BLOCKS (nr_regions) = 1; 6204 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++; 6205 RGN_DONT_CALC_DEPS (nr_regions) = 0; 6206 RGN_HAS_REAL_EBB (nr_regions) = 0; 6207 CONTAINING_RGN (bb->index) = nr_regions++; 6208 BLOCK_TO_BB (bb->index) = 0; 6209 } 6210 6211 free (degree); 6212 free (loop_hdr); 6213 } 6214 6215 /* Free data structures used in pipelining of loops. */ 6216 void sel_finish_pipelining (void) 6217 { 6218 loop_iterator li; 6219 struct loop *loop; 6220 6221 /* Release aux fields so we don't free them later by mistake. */ 6222 FOR_EACH_LOOP (li, loop, 0) 6223 loop->aux = NULL; 6224 6225 loop_optimizer_finalize (); 6226 6227 VEC_free (loop_p, heap, loop_nests); 6228 6229 free (rev_top_order_index); 6230 rev_top_order_index = NULL; 6231 } 6232 6233 /* This function replaces the find_rgns when 6234 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */ 6235 void 6236 sel_find_rgns (void) 6237 { 6238 sel_init_pipelining (); 6239 extend_regions (); 6240 6241 if (current_loops) 6242 { 6243 loop_p loop; 6244 loop_iterator li; 6245 6246 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops 6247 ? LI_FROM_INNERMOST 6248 : LI_ONLY_INNERMOST)) 6249 make_regions_from_loop_nest (loop); 6250 } 6251 6252 /* Make regions from all the rest basic blocks and schedule them. 6253 These blocks include blocks that don't belong to any loop or belong 6254 to irreducible loops. */ 6255 make_regions_from_the_rest (); 6256 6257 /* We don't need bbs_in_loop_rgns anymore. */ 6258 sbitmap_free (bbs_in_loop_rgns); 6259 bbs_in_loop_rgns = NULL; 6260 } 6261 6262 /* Add the preheader blocks from previous loop to current region taking 6263 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS. 6264 This function is only used with -fsel-sched-pipelining-outer-loops. */ 6265 void 6266 sel_add_loop_preheaders (bb_vec_t *bbs) 6267 { 6268 int i; 6269 basic_block bb; 6270 VEC(basic_block, heap) *preheader_blocks 6271 = LOOP_PREHEADER_BLOCKS (current_loop_nest); 6272 6273 for (i = 0; 6274 VEC_iterate (basic_block, preheader_blocks, i, bb); 6275 i++) 6276 { 6277 VEC_safe_push (basic_block, heap, *bbs, bb); 6278 VEC_safe_push (basic_block, heap, last_added_blocks, bb); 6279 sel_add_bb (bb); 6280 } 6281 6282 VEC_free (basic_block, heap, preheader_blocks); 6283 } 6284 6285 /* While pipelining outer loops, returns TRUE if BB is a loop preheader. 6286 Please note that the function should also work when pipelining_p is 6287 false, because it is used when deciding whether we should or should 6288 not reschedule pipelined code. */ 6289 bool 6290 sel_is_loop_preheader_p (basic_block bb) 6291 { 6292 if (current_loop_nest) 6293 { 6294 struct loop *outer; 6295 6296 if (preheader_removed) 6297 return false; 6298 6299 /* Preheader is the first block in the region. */ 6300 if (BLOCK_TO_BB (bb->index) == 0) 6301 return true; 6302 6303 /* We used to find a preheader with the topological information. 6304 Check that the above code is equivalent to what we did before. */ 6305 6306 if (in_current_region_p (current_loop_nest->header)) 6307 gcc_assert (!(BLOCK_TO_BB (bb->index) 6308 < BLOCK_TO_BB (current_loop_nest->header->index))); 6309 6310 /* Support the situation when the latch block of outer loop 6311 could be from here. */ 6312 for (outer = loop_outer (current_loop_nest); 6313 outer; 6314 outer = loop_outer (outer)) 6315 if (considered_for_pipelining_p (outer) && outer->latch == bb) 6316 gcc_unreachable (); 6317 } 6318 6319 return false; 6320 } 6321 6322 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and 6323 can be removed, making the corresponding edge fallthrough (assuming that 6324 all basic blocks between JUMP_BB and DEST_BB are empty). */ 6325 static bool 6326 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb) 6327 { 6328 if (!onlyjump_p (BB_END (jump_bb)) 6329 || tablejump_p (BB_END (jump_bb), NULL, NULL)) 6330 return false; 6331 6332 /* Several outgoing edges, abnormal edge or destination of jump is 6333 not DEST_BB. */ 6334 if (EDGE_COUNT (jump_bb->succs) != 1 6335 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING) 6336 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb) 6337 return false; 6338 6339 /* If not anything of the upper. */ 6340 return true; 6341 } 6342 6343 /* Removes the loop preheader from the current region and saves it in 6344 PREHEADER_BLOCKS of the father loop, so they will be added later to 6345 region that represents an outer loop. */ 6346 static void 6347 sel_remove_loop_preheader (void) 6348 { 6349 int i, old_len; 6350 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); 6351 basic_block bb; 6352 bool all_empty_p = true; 6353 VEC(basic_block, heap) *preheader_blocks 6354 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest)); 6355 6356 gcc_assert (current_loop_nest); 6357 old_len = VEC_length (basic_block, preheader_blocks); 6358 6359 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */ 6360 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++) 6361 { 6362 bb = BASIC_BLOCK (BB_TO_BLOCK (i)); 6363 6364 /* If the basic block belongs to region, but doesn't belong to 6365 corresponding loop, then it should be a preheader. */ 6366 if (sel_is_loop_preheader_p (bb)) 6367 { 6368 VEC_safe_push (basic_block, heap, preheader_blocks, bb); 6369 if (BB_END (bb) != bb_note (bb)) 6370 all_empty_p = false; 6371 } 6372 } 6373 6374 /* Remove these blocks only after iterating over the whole region. */ 6375 for (i = VEC_length (basic_block, preheader_blocks) - 1; 6376 i >= old_len; 6377 i--) 6378 { 6379 bb = VEC_index (basic_block, preheader_blocks, i); 6380 sel_remove_bb (bb, false); 6381 } 6382 6383 if (!considered_for_pipelining_p (loop_outer (current_loop_nest))) 6384 { 6385 if (!all_empty_p) 6386 /* Immediately create new region from preheader. */ 6387 make_region_from_loop_preheader (&preheader_blocks); 6388 else 6389 { 6390 /* If all preheader blocks are empty - dont create new empty region. 6391 Instead, remove them completely. */ 6392 FOR_EACH_VEC_ELT (basic_block, preheader_blocks, i, bb) 6393 { 6394 edge e; 6395 edge_iterator ei; 6396 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb; 6397 6398 /* Redirect all incoming edges to next basic block. */ 6399 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 6400 { 6401 if (! (e->flags & EDGE_FALLTHRU)) 6402 redirect_edge_and_branch (e, bb->next_bb); 6403 else 6404 redirect_edge_succ (e, bb->next_bb); 6405 } 6406 gcc_assert (BB_NOTE_LIST (bb) == NULL); 6407 delete_and_free_basic_block (bb); 6408 6409 /* Check if after deleting preheader there is a nonconditional 6410 jump in PREV_BB that leads to the next basic block NEXT_BB. 6411 If it is so - delete this jump and clear data sets of its 6412 basic block if it becomes empty. */ 6413 if (next_bb->prev_bb == prev_bb 6414 && prev_bb != ENTRY_BLOCK_PTR 6415 && bb_has_removable_jump_to_p (prev_bb, next_bb)) 6416 { 6417 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb); 6418 if (BB_END (prev_bb) == bb_note (prev_bb)) 6419 free_data_sets (prev_bb); 6420 } 6421 6422 set_immediate_dominator (CDI_DOMINATORS, next_bb, 6423 recompute_dominator (CDI_DOMINATORS, 6424 next_bb)); 6425 } 6426 } 6427 VEC_free (basic_block, heap, preheader_blocks); 6428 } 6429 else 6430 /* Store preheader within the father's loop structure. */ 6431 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest), 6432 preheader_blocks); 6433 } 6434 #endif 6435