1 /* Passes for transactional memory support. 2 Copyright (C) 2008-2018 Free Software Foundation, Inc. 3 Contributed by Richard Henderson <rth@redhat.com> 4 and Aldy Hernandez <aldyh@redhat.com>. 5 6 This file is part of GCC. 7 8 GCC is free software; you can redistribute it and/or modify it under 9 the terms of the GNU General Public License as published by the Free 10 Software Foundation; either version 3, or (at your option) any later 11 version. 12 13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 14 WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with GCC; see the file COPYING3. If not see 20 <http://www.gnu.org/licenses/>. */ 21 22 #include "config.h" 23 #include "system.h" 24 #include "coretypes.h" 25 #include "backend.h" 26 #include "target.h" 27 #include "rtl.h" 28 #include "tree.h" 29 #include "gimple.h" 30 #include "cfghooks.h" 31 #include "tree-pass.h" 32 #include "ssa.h" 33 #include "cgraph.h" 34 #include "gimple-pretty-print.h" 35 #include "diagnostic-core.h" 36 #include "fold-const.h" 37 #include "tree-eh.h" 38 #include "calls.h" 39 #include "gimplify.h" 40 #include "gimple-iterator.h" 41 #include "gimplify-me.h" 42 #include "gimple-walk.h" 43 #include "tree-cfg.h" 44 #include "tree-into-ssa.h" 45 #include "tree-inline.h" 46 #include "demangle.h" 47 #include "output.h" 48 #include "trans-mem.h" 49 #include "params.h" 50 #include "langhooks.h" 51 #include "cfgloop.h" 52 #include "tree-ssa-address.h" 53 #include "stringpool.h" 54 #include "attribs.h" 55 56 #define A_RUNINSTRUMENTEDCODE 0x0001 57 #define A_RUNUNINSTRUMENTEDCODE 0x0002 58 #define A_SAVELIVEVARIABLES 0x0004 59 #define A_RESTORELIVEVARIABLES 0x0008 60 #define A_ABORTTRANSACTION 0x0010 61 62 #define AR_USERABORT 0x0001 63 #define AR_USERRETRY 0x0002 64 #define AR_TMCONFLICT 0x0004 65 #define AR_EXCEPTIONBLOCKABORT 0x0008 66 #define AR_OUTERABORT 0x0010 67 68 #define MODE_SERIALIRREVOCABLE 0x0000 69 70 71 /* The representation of a transaction changes several times during the 72 lowering process. In the beginning, in the front-end we have the 73 GENERIC tree TRANSACTION_EXPR. For example, 74 75 __transaction { 76 local++; 77 if (++global == 10) 78 __tm_abort; 79 } 80 81 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is 82 trivially replaced with a GIMPLE_TRANSACTION node. 83 84 During pass_lower_tm, we examine the body of transactions looking 85 for aborts. Transactions that do not contain an abort may be 86 merged into an outer transaction. We also add a TRY-FINALLY node 87 to arrange for the transaction to be committed on any exit. 88 89 [??? Think about how this arrangement affects throw-with-commit 90 and throw-with-abort operations. In this case we want the TRY to 91 handle gotos, but not to catch any exceptions because the transaction 92 will already be closed.] 93 94 GIMPLE_TRANSACTION [label=NULL] { 95 try { 96 local = local + 1; 97 t0 = global; 98 t1 = t0 + 1; 99 global = t1; 100 if (t1 == 10) 101 __builtin___tm_abort (); 102 } finally { 103 __builtin___tm_commit (); 104 } 105 } 106 107 During pass_lower_eh, we create EH regions for the transactions, 108 intermixed with the regular EH stuff. This gives us a nice persistent 109 mapping (all the way through rtl) from transactional memory operation 110 back to the transaction, which allows us to get the abnormal edges 111 correct to model transaction aborts and restarts: 112 113 GIMPLE_TRANSACTION [label=over] 114 local = local + 1; 115 t0 = global; 116 t1 = t0 + 1; 117 global = t1; 118 if (t1 == 10) 119 __builtin___tm_abort (); 120 __builtin___tm_commit (); 121 over: 122 123 This is the end of all_lowering_passes, and so is what is present 124 during the IPA passes, and through all of the optimization passes. 125 126 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all 127 functions and mark functions for cloning. 128 129 At the end of gimple optimization, before exiting SSA form, 130 pass_tm_edges replaces statements that perform transactional 131 memory operations with the appropriate TM builtins, and swap 132 out function calls with their transactional clones. At this 133 point we introduce the abnormal transaction restart edges and 134 complete lowering of the GIMPLE_TRANSACTION node. 135 136 x = __builtin___tm_start (MAY_ABORT); 137 eh_label: 138 if (x & abort_transaction) 139 goto over; 140 local = local + 1; 141 t0 = __builtin___tm_load (global); 142 t1 = t0 + 1; 143 __builtin___tm_store (&global, t1); 144 if (t1 == 10) 145 __builtin___tm_abort (); 146 __builtin___tm_commit (); 147 over: 148 */ 149 150 static void *expand_regions (struct tm_region *, 151 void *(*callback)(struct tm_region *, void *), 152 void *, bool); 153 154 155 /* Return the attributes we want to examine for X, or NULL if it's not 156 something we examine. We look at function types, but allow pointers 157 to function types and function decls and peek through. */ 158 159 static tree 160 get_attrs_for (const_tree x) 161 { 162 if (x == NULL_TREE) 163 return NULL_TREE; 164 165 switch (TREE_CODE (x)) 166 { 167 case FUNCTION_DECL: 168 return TYPE_ATTRIBUTES (TREE_TYPE (x)); 169 170 default: 171 if (TYPE_P (x)) 172 return NULL_TREE; 173 x = TREE_TYPE (x); 174 if (TREE_CODE (x) != POINTER_TYPE) 175 return NULL_TREE; 176 /* FALLTHRU */ 177 178 case POINTER_TYPE: 179 x = TREE_TYPE (x); 180 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) 181 return NULL_TREE; 182 /* FALLTHRU */ 183 184 case FUNCTION_TYPE: 185 case METHOD_TYPE: 186 return TYPE_ATTRIBUTES (x); 187 } 188 } 189 190 /* Return true if X has been marked TM_PURE. */ 191 192 bool 193 is_tm_pure (const_tree x) 194 { 195 unsigned flags; 196 197 switch (TREE_CODE (x)) 198 { 199 case FUNCTION_DECL: 200 case FUNCTION_TYPE: 201 case METHOD_TYPE: 202 break; 203 204 default: 205 if (TYPE_P (x)) 206 return false; 207 x = TREE_TYPE (x); 208 if (TREE_CODE (x) != POINTER_TYPE) 209 return false; 210 /* FALLTHRU */ 211 212 case POINTER_TYPE: 213 x = TREE_TYPE (x); 214 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) 215 return false; 216 break; 217 } 218 219 flags = flags_from_decl_or_type (x); 220 return (flags & ECF_TM_PURE) != 0; 221 } 222 223 /* Return true if X has been marked TM_IRREVOCABLE. */ 224 225 static bool 226 is_tm_irrevocable (tree x) 227 { 228 tree attrs = get_attrs_for (x); 229 230 if (attrs && lookup_attribute ("transaction_unsafe", attrs)) 231 return true; 232 233 /* A call to the irrevocable builtin is by definition, 234 irrevocable. */ 235 if (TREE_CODE (x) == ADDR_EXPR) 236 x = TREE_OPERAND (x, 0); 237 if (TREE_CODE (x) == FUNCTION_DECL 238 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL 239 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE) 240 return true; 241 242 return false; 243 } 244 245 /* Return true if X has been marked TM_SAFE. */ 246 247 bool 248 is_tm_safe (const_tree x) 249 { 250 if (flag_tm) 251 { 252 tree attrs = get_attrs_for (x); 253 if (attrs) 254 { 255 if (lookup_attribute ("transaction_safe", attrs)) 256 return true; 257 if (lookup_attribute ("transaction_may_cancel_outer", attrs)) 258 return true; 259 } 260 } 261 return false; 262 } 263 264 /* Return true if CALL is const, or tm_pure. */ 265 266 static bool 267 is_tm_pure_call (gimple *call) 268 { 269 if (gimple_call_internal_p (call)) 270 return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0; 271 272 tree fn = gimple_call_fn (call); 273 274 if (TREE_CODE (fn) == ADDR_EXPR) 275 { 276 fn = TREE_OPERAND (fn, 0); 277 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL); 278 } 279 else 280 fn = TREE_TYPE (fn); 281 282 return is_tm_pure (fn); 283 } 284 285 /* Return true if X has been marked TM_CALLABLE. */ 286 287 static bool 288 is_tm_callable (tree x) 289 { 290 tree attrs = get_attrs_for (x); 291 if (attrs) 292 { 293 if (lookup_attribute ("transaction_callable", attrs)) 294 return true; 295 if (lookup_attribute ("transaction_safe", attrs)) 296 return true; 297 if (lookup_attribute ("transaction_may_cancel_outer", attrs)) 298 return true; 299 } 300 return false; 301 } 302 303 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */ 304 305 bool 306 is_tm_may_cancel_outer (tree x) 307 { 308 tree attrs = get_attrs_for (x); 309 if (attrs) 310 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL; 311 return false; 312 } 313 314 /* Return true for built in functions that "end" a transaction. */ 315 316 bool 317 is_tm_ending_fndecl (tree fndecl) 318 { 319 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) 320 switch (DECL_FUNCTION_CODE (fndecl)) 321 { 322 case BUILT_IN_TM_COMMIT: 323 case BUILT_IN_TM_COMMIT_EH: 324 case BUILT_IN_TM_ABORT: 325 case BUILT_IN_TM_IRREVOCABLE: 326 return true; 327 default: 328 break; 329 } 330 331 return false; 332 } 333 334 /* Return true if STMT is a built in function call that "ends" a 335 transaction. */ 336 337 bool 338 is_tm_ending (gimple *stmt) 339 { 340 tree fndecl; 341 342 if (gimple_code (stmt) != GIMPLE_CALL) 343 return false; 344 345 fndecl = gimple_call_fndecl (stmt); 346 return (fndecl != NULL_TREE 347 && is_tm_ending_fndecl (fndecl)); 348 } 349 350 /* Return true if STMT is a TM load. */ 351 352 static bool 353 is_tm_load (gimple *stmt) 354 { 355 tree fndecl; 356 357 if (gimple_code (stmt) != GIMPLE_CALL) 358 return false; 359 360 fndecl = gimple_call_fndecl (stmt); 361 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL 362 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl))); 363 } 364 365 /* Same as above, but for simple TM loads, that is, not the 366 after-write, after-read, etc optimized variants. */ 367 368 static bool 369 is_tm_simple_load (gimple *stmt) 370 { 371 tree fndecl; 372 373 if (gimple_code (stmt) != GIMPLE_CALL) 374 return false; 375 376 fndecl = gimple_call_fndecl (stmt); 377 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) 378 { 379 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); 380 return (fcode == BUILT_IN_TM_LOAD_1 381 || fcode == BUILT_IN_TM_LOAD_2 382 || fcode == BUILT_IN_TM_LOAD_4 383 || fcode == BUILT_IN_TM_LOAD_8 384 || fcode == BUILT_IN_TM_LOAD_FLOAT 385 || fcode == BUILT_IN_TM_LOAD_DOUBLE 386 || fcode == BUILT_IN_TM_LOAD_LDOUBLE 387 || fcode == BUILT_IN_TM_LOAD_M64 388 || fcode == BUILT_IN_TM_LOAD_M128 389 || fcode == BUILT_IN_TM_LOAD_M256); 390 } 391 return false; 392 } 393 394 /* Return true if STMT is a TM store. */ 395 396 static bool 397 is_tm_store (gimple *stmt) 398 { 399 tree fndecl; 400 401 if (gimple_code (stmt) != GIMPLE_CALL) 402 return false; 403 404 fndecl = gimple_call_fndecl (stmt); 405 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL 406 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl))); 407 } 408 409 /* Same as above, but for simple TM stores, that is, not the 410 after-write, after-read, etc optimized variants. */ 411 412 static bool 413 is_tm_simple_store (gimple *stmt) 414 { 415 tree fndecl; 416 417 if (gimple_code (stmt) != GIMPLE_CALL) 418 return false; 419 420 fndecl = gimple_call_fndecl (stmt); 421 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) 422 { 423 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); 424 return (fcode == BUILT_IN_TM_STORE_1 425 || fcode == BUILT_IN_TM_STORE_2 426 || fcode == BUILT_IN_TM_STORE_4 427 || fcode == BUILT_IN_TM_STORE_8 428 || fcode == BUILT_IN_TM_STORE_FLOAT 429 || fcode == BUILT_IN_TM_STORE_DOUBLE 430 || fcode == BUILT_IN_TM_STORE_LDOUBLE 431 || fcode == BUILT_IN_TM_STORE_M64 432 || fcode == BUILT_IN_TM_STORE_M128 433 || fcode == BUILT_IN_TM_STORE_M256); 434 } 435 return false; 436 } 437 438 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */ 439 440 static bool 441 is_tm_abort (tree fndecl) 442 { 443 return (fndecl 444 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL 445 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT); 446 } 447 448 /* Build a GENERIC tree for a user abort. This is called by front ends 449 while transforming the __tm_abort statement. */ 450 451 tree 452 build_tm_abort_call (location_t loc, bool is_outer) 453 { 454 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1, 455 build_int_cst (integer_type_node, 456 AR_USERABORT 457 | (is_outer ? AR_OUTERABORT : 0))); 458 } 459 460 /* Map for arbitrary function replacement under TM, as created 461 by the tm_wrap attribute. */ 462 463 struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map> 464 { 465 static inline hashval_t hash (tree_map *m) { return m->hash; } 466 static inline bool 467 equal (tree_map *a, tree_map *b) 468 { 469 return a->base.from == b->base.from; 470 } 471 472 static int 473 keep_cache_entry (tree_map *&m) 474 { 475 return ggc_marked_p (m->base.from); 476 } 477 }; 478 479 static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map; 480 481 void 482 record_tm_replacement (tree from, tree to) 483 { 484 struct tree_map **slot, *h; 485 486 /* Do not inline wrapper functions that will get replaced in the TM 487 pass. 488 489 Suppose you have foo() that will get replaced into tmfoo(). Make 490 sure the inliner doesn't try to outsmart us and inline foo() 491 before we get a chance to do the TM replacement. */ 492 DECL_UNINLINABLE (from) = 1; 493 494 if (tm_wrap_map == NULL) 495 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32); 496 497 h = ggc_alloc<tree_map> (); 498 h->hash = htab_hash_pointer (from); 499 h->base.from = from; 500 h->to = to; 501 502 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT); 503 *slot = h; 504 } 505 506 /* Return a TM-aware replacement function for DECL. */ 507 508 static tree 509 find_tm_replacement_function (tree fndecl) 510 { 511 if (tm_wrap_map) 512 { 513 struct tree_map *h, in; 514 515 in.base.from = fndecl; 516 in.hash = htab_hash_pointer (fndecl); 517 h = tm_wrap_map->find_with_hash (&in, in.hash); 518 if (h) 519 return h->to; 520 } 521 522 /* ??? We may well want TM versions of most of the common <string.h> 523 functions. For now, we've already these two defined. */ 524 /* Adjust expand_call_tm() attributes as necessary for the cases 525 handled here: */ 526 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) 527 switch (DECL_FUNCTION_CODE (fndecl)) 528 { 529 case BUILT_IN_MEMCPY: 530 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY); 531 case BUILT_IN_MEMMOVE: 532 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); 533 case BUILT_IN_MEMSET: 534 return builtin_decl_explicit (BUILT_IN_TM_MEMSET); 535 default: 536 return NULL; 537 } 538 539 return NULL; 540 } 541 542 /* When appropriate, record TM replacement for memory allocation functions. 543 544 FROM is the FNDECL to wrap. */ 545 void 546 tm_malloc_replacement (tree from) 547 { 548 const char *str; 549 tree to; 550 551 if (TREE_CODE (from) != FUNCTION_DECL) 552 return; 553 554 /* If we have a previous replacement, the user must be explicitly 555 wrapping malloc/calloc/free. They better know what they're 556 doing... */ 557 if (find_tm_replacement_function (from)) 558 return; 559 560 str = IDENTIFIER_POINTER (DECL_NAME (from)); 561 562 if (!strcmp (str, "malloc")) 563 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC); 564 else if (!strcmp (str, "calloc")) 565 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC); 566 else if (!strcmp (str, "free")) 567 to = builtin_decl_explicit (BUILT_IN_TM_FREE); 568 else 569 return; 570 571 TREE_NOTHROW (to) = 0; 572 573 record_tm_replacement (from, to); 574 } 575 576 /* Diagnostics for tm_safe functions/regions. Called by the front end 577 once we've lowered the function to high-gimple. */ 578 579 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq. 580 Process exactly one statement. WI->INFO is set to non-null when in 581 the context of a tm_safe function, and null for a __transaction block. */ 582 583 #define DIAG_TM_OUTER 1 584 #define DIAG_TM_SAFE 2 585 #define DIAG_TM_RELAXED 4 586 587 struct diagnose_tm 588 { 589 unsigned int summary_flags : 8; 590 unsigned int block_flags : 8; 591 unsigned int func_flags : 8; 592 unsigned int saw_volatile : 1; 593 gimple *stmt; 594 }; 595 596 /* Return true if T is a volatile lvalue of some kind. */ 597 598 static bool 599 volatile_lvalue_p (tree t) 600 { 601 return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t)) 602 && TREE_THIS_VOLATILE (TREE_TYPE (t))); 603 } 604 605 /* Tree callback function for diagnose_tm pass. */ 606 607 static tree 608 diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data) 609 { 610 struct walk_stmt_info *wi = (struct walk_stmt_info *) data; 611 struct diagnose_tm *d = (struct diagnose_tm *) wi->info; 612 613 if (TYPE_P (*tp)) 614 *walk_subtrees = false; 615 else if (volatile_lvalue_p (*tp) 616 && !d->saw_volatile) 617 { 618 d->saw_volatile = 1; 619 if (d->block_flags & DIAG_TM_SAFE) 620 error_at (gimple_location (d->stmt), 621 "invalid use of volatile lvalue inside transaction"); 622 else if (d->func_flags & DIAG_TM_SAFE) 623 error_at (gimple_location (d->stmt), 624 "invalid use of volatile lvalue inside %<transaction_safe%> " 625 "function"); 626 } 627 628 return NULL_TREE; 629 } 630 631 static inline bool 632 is_tm_safe_or_pure (const_tree x) 633 { 634 return is_tm_safe (x) || is_tm_pure (x); 635 } 636 637 static tree 638 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p, 639 struct walk_stmt_info *wi) 640 { 641 gimple *stmt = gsi_stmt (*gsi); 642 struct diagnose_tm *d = (struct diagnose_tm *) wi->info; 643 644 /* Save stmt for use in leaf analysis. */ 645 d->stmt = stmt; 646 647 switch (gimple_code (stmt)) 648 { 649 case GIMPLE_CALL: 650 { 651 tree fn = gimple_call_fn (stmt); 652 653 if ((d->summary_flags & DIAG_TM_OUTER) == 0 654 && is_tm_may_cancel_outer (fn)) 655 error_at (gimple_location (stmt), 656 "%<transaction_may_cancel_outer%> function call not within" 657 " outer transaction or %<transaction_may_cancel_outer%>"); 658 659 if (d->summary_flags & DIAG_TM_SAFE) 660 { 661 bool is_safe, direct_call_p; 662 tree replacement; 663 664 if (TREE_CODE (fn) == ADDR_EXPR 665 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL) 666 { 667 direct_call_p = true; 668 replacement = TREE_OPERAND (fn, 0); 669 replacement = find_tm_replacement_function (replacement); 670 if (replacement) 671 fn = replacement; 672 } 673 else 674 { 675 direct_call_p = false; 676 replacement = NULL_TREE; 677 } 678 679 if (is_tm_safe_or_pure (fn)) 680 is_safe = true; 681 else if (is_tm_callable (fn) || is_tm_irrevocable (fn)) 682 { 683 /* A function explicitly marked transaction_callable as 684 opposed to transaction_safe is being defined to be 685 unsafe as part of its ABI, regardless of its contents. */ 686 is_safe = false; 687 } 688 else if (direct_call_p) 689 { 690 if (IS_TYPE_OR_DECL_P (fn) 691 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN) 692 is_safe = true; 693 else if (replacement) 694 { 695 /* ??? At present we've been considering replacements 696 merely transaction_callable, and therefore might 697 enter irrevocable. The tm_wrap attribute has not 698 yet made it into the new language spec. */ 699 is_safe = false; 700 } 701 else 702 { 703 /* ??? Diagnostics for unmarked direct calls moved into 704 the IPA pass. Section 3.2 of the spec details how 705 functions not marked should be considered "implicitly 706 safe" based on having examined the function body. */ 707 is_safe = true; 708 } 709 } 710 else 711 { 712 /* An unmarked indirect call. Consider it unsafe even 713 though optimization may yet figure out how to inline. */ 714 is_safe = false; 715 } 716 717 if (!is_safe) 718 { 719 if (TREE_CODE (fn) == ADDR_EXPR) 720 fn = TREE_OPERAND (fn, 0); 721 if (d->block_flags & DIAG_TM_SAFE) 722 { 723 if (direct_call_p) 724 error_at (gimple_location (stmt), 725 "unsafe function call %qD within " 726 "atomic transaction", fn); 727 else 728 { 729 if ((!DECL_P (fn) || DECL_NAME (fn)) 730 && TREE_CODE (fn) != SSA_NAME) 731 error_at (gimple_location (stmt), 732 "unsafe function call %qE within " 733 "atomic transaction", fn); 734 else 735 error_at (gimple_location (stmt), 736 "unsafe indirect function call within " 737 "atomic transaction"); 738 } 739 } 740 else 741 { 742 if (direct_call_p) 743 error_at (gimple_location (stmt), 744 "unsafe function call %qD within " 745 "%<transaction_safe%> function", fn); 746 else 747 { 748 if ((!DECL_P (fn) || DECL_NAME (fn)) 749 && TREE_CODE (fn) != SSA_NAME) 750 error_at (gimple_location (stmt), 751 "unsafe function call %qE within " 752 "%<transaction_safe%> function", fn); 753 else 754 error_at (gimple_location (stmt), 755 "unsafe indirect function call within " 756 "%<transaction_safe%> function"); 757 } 758 } 759 } 760 } 761 } 762 break; 763 764 case GIMPLE_ASM: 765 /* ??? We ought to come up with a way to add attributes to 766 asm statements, and then add "transaction_safe" to it. 767 Either that or get the language spec to resurrect __tm_waiver. */ 768 if (d->block_flags & DIAG_TM_SAFE) 769 error_at (gimple_location (stmt), 770 "asm not allowed in atomic transaction"); 771 else if (d->func_flags & DIAG_TM_SAFE) 772 error_at (gimple_location (stmt), 773 "asm not allowed in %<transaction_safe%> function"); 774 break; 775 776 case GIMPLE_TRANSACTION: 777 { 778 gtransaction *trans_stmt = as_a <gtransaction *> (stmt); 779 unsigned char inner_flags = DIAG_TM_SAFE; 780 781 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED) 782 { 783 if (d->block_flags & DIAG_TM_SAFE) 784 error_at (gimple_location (stmt), 785 "relaxed transaction in atomic transaction"); 786 else if (d->func_flags & DIAG_TM_SAFE) 787 error_at (gimple_location (stmt), 788 "relaxed transaction in %<transaction_safe%> function"); 789 inner_flags = DIAG_TM_RELAXED; 790 } 791 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER) 792 { 793 if (d->block_flags) 794 error_at (gimple_location (stmt), 795 "outer transaction in transaction"); 796 else if (d->func_flags & DIAG_TM_OUTER) 797 error_at (gimple_location (stmt), 798 "outer transaction in " 799 "%<transaction_may_cancel_outer%> function"); 800 else if (d->func_flags & DIAG_TM_SAFE) 801 error_at (gimple_location (stmt), 802 "outer transaction in %<transaction_safe%> function"); 803 inner_flags |= DIAG_TM_OUTER; 804 } 805 806 *handled_ops_p = true; 807 if (gimple_transaction_body (trans_stmt)) 808 { 809 struct walk_stmt_info wi_inner; 810 struct diagnose_tm d_inner; 811 812 memset (&d_inner, 0, sizeof (d_inner)); 813 d_inner.func_flags = d->func_flags; 814 d_inner.block_flags = d->block_flags | inner_flags; 815 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags; 816 817 memset (&wi_inner, 0, sizeof (wi_inner)); 818 wi_inner.info = &d_inner; 819 820 walk_gimple_seq (gimple_transaction_body (trans_stmt), 821 diagnose_tm_1, diagnose_tm_1_op, &wi_inner); 822 } 823 } 824 break; 825 826 default: 827 break; 828 } 829 830 return NULL_TREE; 831 } 832 833 static unsigned int 834 diagnose_tm_blocks (void) 835 { 836 struct walk_stmt_info wi; 837 struct diagnose_tm d; 838 839 memset (&d, 0, sizeof (d)); 840 if (is_tm_may_cancel_outer (current_function_decl)) 841 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE; 842 else if (is_tm_safe (current_function_decl)) 843 d.func_flags = DIAG_TM_SAFE; 844 d.summary_flags = d.func_flags; 845 846 memset (&wi, 0, sizeof (wi)); 847 wi.info = &d; 848 849 walk_gimple_seq (gimple_body (current_function_decl), 850 diagnose_tm_1, diagnose_tm_1_op, &wi); 851 852 return 0; 853 } 854 855 namespace { 856 857 const pass_data pass_data_diagnose_tm_blocks = 858 { 859 GIMPLE_PASS, /* type */ 860 "*diagnose_tm_blocks", /* name */ 861 OPTGROUP_NONE, /* optinfo_flags */ 862 TV_TRANS_MEM, /* tv_id */ 863 PROP_gimple_any, /* properties_required */ 864 0, /* properties_provided */ 865 0, /* properties_destroyed */ 866 0, /* todo_flags_start */ 867 0, /* todo_flags_finish */ 868 }; 869 870 class pass_diagnose_tm_blocks : public gimple_opt_pass 871 { 872 public: 873 pass_diagnose_tm_blocks (gcc::context *ctxt) 874 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt) 875 {} 876 877 /* opt_pass methods: */ 878 virtual bool gate (function *) { return flag_tm; } 879 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); } 880 881 }; // class pass_diagnose_tm_blocks 882 883 } // anon namespace 884 885 gimple_opt_pass * 886 make_pass_diagnose_tm_blocks (gcc::context *ctxt) 887 { 888 return new pass_diagnose_tm_blocks (ctxt); 889 } 890 891 /* Instead of instrumenting thread private memory, we save the 892 addresses in a log which we later use to save/restore the addresses 893 upon transaction start/restart. 894 895 The log is keyed by address, where each element contains individual 896 statements among different code paths that perform the store. 897 898 This log is later used to generate either plain save/restore of the 899 addresses upon transaction start/restart, or calls to the ITM_L* 900 logging functions. 901 902 So for something like: 903 904 struct large { int x[1000]; }; 905 struct large lala = { 0 }; 906 __transaction { 907 lala.x[i] = 123; 908 ... 909 } 910 911 We can either save/restore: 912 913 lala = { 0 }; 914 trxn = _ITM_startTransaction (); 915 if (trxn & a_saveLiveVariables) 916 tmp_lala1 = lala.x[i]; 917 else if (a & a_restoreLiveVariables) 918 lala.x[i] = tmp_lala1; 919 920 or use the logging functions: 921 922 lala = { 0 }; 923 trxn = _ITM_startTransaction (); 924 _ITM_LU4 (&lala.x[i]); 925 926 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as 927 far up the dominator tree to shadow all of the writes to a given 928 location (thus reducing the total number of logging calls), but not 929 so high as to be called on a path that does not perform a 930 write. */ 931 932 /* One individual log entry. We may have multiple statements for the 933 same location if neither dominate each other (on different 934 execution paths). */ 935 struct tm_log_entry 936 { 937 /* Address to save. */ 938 tree addr; 939 /* Entry block for the transaction this address occurs in. */ 940 basic_block entry_block; 941 /* Dominating statements the store occurs in. */ 942 vec<gimple *> stmts; 943 /* Initially, while we are building the log, we place a nonzero 944 value here to mean that this address *will* be saved with a 945 save/restore sequence. Later, when generating the save sequence 946 we place the SSA temp generated here. */ 947 tree save_var; 948 }; 949 950 951 /* Log entry hashtable helpers. */ 952 953 struct log_entry_hasher : pointer_hash <tm_log_entry> 954 { 955 static inline hashval_t hash (const tm_log_entry *); 956 static inline bool equal (const tm_log_entry *, const tm_log_entry *); 957 static inline void remove (tm_log_entry *); 958 }; 959 960 /* Htab support. Return hash value for a `tm_log_entry'. */ 961 inline hashval_t 962 log_entry_hasher::hash (const tm_log_entry *log) 963 { 964 return iterative_hash_expr (log->addr, 0); 965 } 966 967 /* Htab support. Return true if two log entries are the same. */ 968 inline bool 969 log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2) 970 { 971 /* FIXME: 972 973 rth: I suggest that we get rid of the component refs etc. 974 I.e. resolve the reference to base + offset. 975 976 We may need to actually finish a merge with mainline for this, 977 since we'd like to be presented with Richi's MEM_REF_EXPRs more 978 often than not. But in the meantime your tm_log_entry could save 979 the results of get_inner_reference. 980 981 See: g++.dg/tm/pr46653.C 982 */ 983 984 /* Special case plain equality because operand_equal_p() below will 985 return FALSE if the addresses are equal but they have 986 side-effects (e.g. a volatile address). */ 987 if (log1->addr == log2->addr) 988 return true; 989 990 return operand_equal_p (log1->addr, log2->addr, 0); 991 } 992 993 /* Htab support. Free one tm_log_entry. */ 994 inline void 995 log_entry_hasher::remove (tm_log_entry *lp) 996 { 997 lp->stmts.release (); 998 free (lp); 999 } 1000 1001 1002 /* The actual log. */ 1003 static hash_table<log_entry_hasher> *tm_log; 1004 1005 /* Addresses to log with a save/restore sequence. These should be in 1006 dominator order. */ 1007 static vec<tree> tm_log_save_addresses; 1008 1009 enum thread_memory_type 1010 { 1011 mem_non_local = 0, 1012 mem_thread_local, 1013 mem_transaction_local, 1014 mem_max 1015 }; 1016 1017 struct tm_new_mem_map 1018 { 1019 /* SSA_NAME being dereferenced. */ 1020 tree val; 1021 enum thread_memory_type local_new_memory; 1022 }; 1023 1024 /* Hashtable helpers. */ 1025 1026 struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map> 1027 { 1028 static inline hashval_t hash (const tm_new_mem_map *); 1029 static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *); 1030 }; 1031 1032 inline hashval_t 1033 tm_mem_map_hasher::hash (const tm_new_mem_map *v) 1034 { 1035 return (intptr_t)v->val >> 4; 1036 } 1037 1038 inline bool 1039 tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c) 1040 { 1041 return v->val == c->val; 1042 } 1043 1044 /* Map for an SSA_NAME originally pointing to a non aliased new piece 1045 of memory (malloc, alloc, etc). */ 1046 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash; 1047 1048 /* Initialize logging data structures. */ 1049 static void 1050 tm_log_init (void) 1051 { 1052 tm_log = new hash_table<log_entry_hasher> (10); 1053 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5); 1054 tm_log_save_addresses.create (5); 1055 } 1056 1057 /* Free logging data structures. */ 1058 static void 1059 tm_log_delete (void) 1060 { 1061 delete tm_log; 1062 tm_log = NULL; 1063 delete tm_new_mem_hash; 1064 tm_new_mem_hash = NULL; 1065 tm_log_save_addresses.release (); 1066 } 1067 1068 /* Return true if MEM is a transaction invariant memory for the TM 1069 region starting at REGION_ENTRY_BLOCK. */ 1070 static bool 1071 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block) 1072 { 1073 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF) 1074 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME) 1075 { 1076 basic_block def_bb; 1077 1078 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0))); 1079 return def_bb != region_entry_block 1080 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb); 1081 } 1082 1083 mem = strip_invariant_refs (mem); 1084 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem)); 1085 } 1086 1087 /* Given an address ADDR in STMT, find it in the memory log or add it, 1088 making sure to keep only the addresses highest in the dominator 1089 tree. 1090 1091 ENTRY_BLOCK is the entry_block for the transaction. 1092 1093 If we find the address in the log, make sure it's either the same 1094 address, or an equivalent one that dominates ADDR. 1095 1096 If we find the address, but neither ADDR dominates the found 1097 address, nor the found one dominates ADDR, we're on different 1098 execution paths. Add it. 1099 1100 If known, ENTRY_BLOCK is the entry block for the region, otherwise 1101 NULL. */ 1102 static void 1103 tm_log_add (basic_block entry_block, tree addr, gimple *stmt) 1104 { 1105 tm_log_entry **slot; 1106 struct tm_log_entry l, *lp; 1107 1108 l.addr = addr; 1109 slot = tm_log->find_slot (&l, INSERT); 1110 if (!*slot) 1111 { 1112 tree type = TREE_TYPE (addr); 1113 1114 lp = XNEW (struct tm_log_entry); 1115 lp->addr = addr; 1116 *slot = lp; 1117 1118 /* Small invariant addresses can be handled as save/restores. */ 1119 if (entry_block 1120 && transaction_invariant_address_p (lp->addr, entry_block) 1121 && TYPE_SIZE_UNIT (type) != NULL 1122 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) 1123 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type)) 1124 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) 1125 /* We must be able to copy this type normally. I.e., no 1126 special constructors and the like. */ 1127 && !TREE_ADDRESSABLE (type)) 1128 { 1129 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save"); 1130 lp->stmts.create (0); 1131 lp->entry_block = entry_block; 1132 /* Save addresses separately in dominator order so we don't 1133 get confused by overlapping addresses in the save/restore 1134 sequence. */ 1135 tm_log_save_addresses.safe_push (lp->addr); 1136 } 1137 else 1138 { 1139 /* Use the logging functions. */ 1140 lp->stmts.create (5); 1141 lp->stmts.quick_push (stmt); 1142 lp->save_var = NULL; 1143 } 1144 } 1145 else 1146 { 1147 size_t i; 1148 gimple *oldstmt; 1149 1150 lp = *slot; 1151 1152 /* If we're generating a save/restore sequence, we don't care 1153 about statements. */ 1154 if (lp->save_var) 1155 return; 1156 1157 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i) 1158 { 1159 if (stmt == oldstmt) 1160 return; 1161 /* We already have a store to the same address, higher up the 1162 dominator tree. Nothing to do. */ 1163 if (dominated_by_p (CDI_DOMINATORS, 1164 gimple_bb (stmt), gimple_bb (oldstmt))) 1165 return; 1166 /* We should be processing blocks in dominator tree order. */ 1167 gcc_assert (!dominated_by_p (CDI_DOMINATORS, 1168 gimple_bb (oldstmt), gimple_bb (stmt))); 1169 } 1170 /* Store is on a different code path. */ 1171 lp->stmts.safe_push (stmt); 1172 } 1173 } 1174 1175 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME 1176 result, insert the new statements before GSI. */ 1177 1178 static tree 1179 gimplify_addr (gimple_stmt_iterator *gsi, tree x) 1180 { 1181 if (TREE_CODE (x) == TARGET_MEM_REF) 1182 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x); 1183 else 1184 x = build_fold_addr_expr (x); 1185 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT); 1186 } 1187 1188 /* Instrument one address with the logging functions. 1189 ADDR is the address to save. 1190 STMT is the statement before which to place it. */ 1191 static void 1192 tm_log_emit_stmt (tree addr, gimple *stmt) 1193 { 1194 tree type = TREE_TYPE (addr); 1195 gimple_stmt_iterator gsi = gsi_for_stmt (stmt); 1196 gimple *log; 1197 enum built_in_function code = BUILT_IN_TM_LOG; 1198 1199 if (type == float_type_node) 1200 code = BUILT_IN_TM_LOG_FLOAT; 1201 else if (type == double_type_node) 1202 code = BUILT_IN_TM_LOG_DOUBLE; 1203 else if (type == long_double_type_node) 1204 code = BUILT_IN_TM_LOG_LDOUBLE; 1205 else if (TYPE_SIZE (type) != NULL 1206 && tree_fits_uhwi_p (TYPE_SIZE (type))) 1207 { 1208 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type)); 1209 1210 if (TREE_CODE (type) == VECTOR_TYPE) 1211 { 1212 switch (type_size) 1213 { 1214 case 64: 1215 code = BUILT_IN_TM_LOG_M64; 1216 break; 1217 case 128: 1218 code = BUILT_IN_TM_LOG_M128; 1219 break; 1220 case 256: 1221 code = BUILT_IN_TM_LOG_M256; 1222 break; 1223 default: 1224 goto unhandled_vec; 1225 } 1226 if (!builtin_decl_explicit_p (code)) 1227 goto unhandled_vec; 1228 } 1229 else 1230 { 1231 unhandled_vec: 1232 switch (type_size) 1233 { 1234 case 8: 1235 code = BUILT_IN_TM_LOG_1; 1236 break; 1237 case 16: 1238 code = BUILT_IN_TM_LOG_2; 1239 break; 1240 case 32: 1241 code = BUILT_IN_TM_LOG_4; 1242 break; 1243 case 64: 1244 code = BUILT_IN_TM_LOG_8; 1245 break; 1246 } 1247 } 1248 } 1249 1250 if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code)) 1251 code = BUILT_IN_TM_LOG; 1252 tree decl = builtin_decl_explicit (code); 1253 1254 addr = gimplify_addr (&gsi, addr); 1255 if (code == BUILT_IN_TM_LOG) 1256 log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type)); 1257 else 1258 log = gimple_build_call (decl, 1, addr); 1259 gsi_insert_before (&gsi, log, GSI_SAME_STMT); 1260 } 1261 1262 /* Go through the log and instrument address that must be instrumented 1263 with the logging functions. Leave the save/restore addresses for 1264 later. */ 1265 static void 1266 tm_log_emit (void) 1267 { 1268 hash_table<log_entry_hasher>::iterator hi; 1269 struct tm_log_entry *lp; 1270 1271 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi) 1272 { 1273 size_t i; 1274 gimple *stmt; 1275 1276 if (dump_file) 1277 { 1278 fprintf (dump_file, "TM thread private mem logging: "); 1279 print_generic_expr (dump_file, lp->addr); 1280 fprintf (dump_file, "\n"); 1281 } 1282 1283 if (lp->save_var) 1284 { 1285 if (dump_file) 1286 fprintf (dump_file, "DUMPING to variable\n"); 1287 continue; 1288 } 1289 else 1290 { 1291 if (dump_file) 1292 fprintf (dump_file, "DUMPING with logging functions\n"); 1293 for (i = 0; lp->stmts.iterate (i, &stmt); ++i) 1294 tm_log_emit_stmt (lp->addr, stmt); 1295 } 1296 } 1297 } 1298 1299 /* Emit the save sequence for the corresponding addresses in the log. 1300 ENTRY_BLOCK is the entry block for the transaction. 1301 BB is the basic block to insert the code in. */ 1302 static void 1303 tm_log_emit_saves (basic_block entry_block, basic_block bb) 1304 { 1305 size_t i; 1306 gimple_stmt_iterator gsi = gsi_last_bb (bb); 1307 gimple *stmt; 1308 struct tm_log_entry l, *lp; 1309 1310 for (i = 0; i < tm_log_save_addresses.length (); ++i) 1311 { 1312 l.addr = tm_log_save_addresses[i]; 1313 lp = *(tm_log->find_slot (&l, NO_INSERT)); 1314 gcc_assert (lp->save_var != NULL); 1315 1316 /* We only care about variables in the current transaction. */ 1317 if (lp->entry_block != entry_block) 1318 continue; 1319 1320 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr)); 1321 1322 /* Make sure we can create an SSA_NAME for this type. For 1323 instance, aggregates aren't allowed, in which case the system 1324 will create a VOP for us and everything will just work. */ 1325 if (is_gimple_reg_type (TREE_TYPE (lp->save_var))) 1326 { 1327 lp->save_var = make_ssa_name (lp->save_var, stmt); 1328 gimple_assign_set_lhs (stmt, lp->save_var); 1329 } 1330 1331 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); 1332 } 1333 } 1334 1335 /* Emit the restore sequence for the corresponding addresses in the log. 1336 ENTRY_BLOCK is the entry block for the transaction. 1337 BB is the basic block to insert the code in. */ 1338 static void 1339 tm_log_emit_restores (basic_block entry_block, basic_block bb) 1340 { 1341 int i; 1342 struct tm_log_entry l, *lp; 1343 gimple_stmt_iterator gsi; 1344 gimple *stmt; 1345 1346 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--) 1347 { 1348 l.addr = tm_log_save_addresses[i]; 1349 lp = *(tm_log->find_slot (&l, NO_INSERT)); 1350 gcc_assert (lp->save_var != NULL); 1351 1352 /* We only care about variables in the current transaction. */ 1353 if (lp->entry_block != entry_block) 1354 continue; 1355 1356 /* Restores are in LIFO order from the saves in case we have 1357 overlaps. */ 1358 gsi = gsi_start_bb (bb); 1359 1360 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var); 1361 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 1362 } 1363 } 1364 1365 1366 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *, 1367 struct walk_stmt_info *); 1368 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *, 1369 struct walk_stmt_info *); 1370 1371 /* Evaluate an address X being dereferenced and determine if it 1372 originally points to a non aliased new chunk of memory (malloc, 1373 alloca, etc). 1374 1375 Return MEM_THREAD_LOCAL if it points to a thread-local address. 1376 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address. 1377 Return MEM_NON_LOCAL otherwise. 1378 1379 ENTRY_BLOCK is the entry block to the transaction containing the 1380 dereference of X. */ 1381 static enum thread_memory_type 1382 thread_private_new_memory (basic_block entry_block, tree x) 1383 { 1384 gimple *stmt = NULL; 1385 enum tree_code code; 1386 tm_new_mem_map **slot; 1387 tm_new_mem_map elt, *elt_p; 1388 tree val = x; 1389 enum thread_memory_type retval = mem_transaction_local; 1390 1391 if (!entry_block 1392 || TREE_CODE (x) != SSA_NAME 1393 /* Possible uninitialized use, or a function argument. In 1394 either case, we don't care. */ 1395 || SSA_NAME_IS_DEFAULT_DEF (x)) 1396 return mem_non_local; 1397 1398 /* Look in cache first. */ 1399 elt.val = x; 1400 slot = tm_new_mem_hash->find_slot (&elt, INSERT); 1401 elt_p = *slot; 1402 if (elt_p) 1403 return elt_p->local_new_memory; 1404 1405 /* Optimistically assume the memory is transaction local during 1406 processing. This catches recursion into this variable. */ 1407 *slot = elt_p = XNEW (tm_new_mem_map); 1408 elt_p->val = val; 1409 elt_p->local_new_memory = mem_transaction_local; 1410 1411 /* Search DEF chain to find the original definition of this address. */ 1412 do 1413 { 1414 if (ptr_deref_may_alias_global_p (x)) 1415 { 1416 /* Address escapes. This is not thread-private. */ 1417 retval = mem_non_local; 1418 goto new_memory_ret; 1419 } 1420 1421 stmt = SSA_NAME_DEF_STMT (x); 1422 1423 /* If the malloc call is outside the transaction, this is 1424 thread-local. */ 1425 if (retval != mem_thread_local 1426 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block)) 1427 retval = mem_thread_local; 1428 1429 if (is_gimple_assign (stmt)) 1430 { 1431 code = gimple_assign_rhs_code (stmt); 1432 /* x = foo ==> foo */ 1433 if (code == SSA_NAME) 1434 x = gimple_assign_rhs1 (stmt); 1435 /* x = foo + n ==> foo */ 1436 else if (code == POINTER_PLUS_EXPR) 1437 x = gimple_assign_rhs1 (stmt); 1438 /* x = (cast*) foo ==> foo */ 1439 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code)) 1440 x = gimple_assign_rhs1 (stmt); 1441 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */ 1442 else if (code == COND_EXPR) 1443 { 1444 tree op1 = gimple_assign_rhs2 (stmt); 1445 tree op2 = gimple_assign_rhs3 (stmt); 1446 enum thread_memory_type mem; 1447 retval = thread_private_new_memory (entry_block, op1); 1448 if (retval == mem_non_local) 1449 goto new_memory_ret; 1450 mem = thread_private_new_memory (entry_block, op2); 1451 retval = MIN (retval, mem); 1452 goto new_memory_ret; 1453 } 1454 else 1455 { 1456 retval = mem_non_local; 1457 goto new_memory_ret; 1458 } 1459 } 1460 else 1461 { 1462 if (gimple_code (stmt) == GIMPLE_PHI) 1463 { 1464 unsigned int i; 1465 enum thread_memory_type mem; 1466 tree phi_result = gimple_phi_result (stmt); 1467 1468 /* If any of the ancestors are non-local, we are sure to 1469 be non-local. Otherwise we can avoid doing anything 1470 and inherit what has already been generated. */ 1471 retval = mem_max; 1472 for (i = 0; i < gimple_phi_num_args (stmt); ++i) 1473 { 1474 tree op = PHI_ARG_DEF (stmt, i); 1475 1476 /* Exclude self-assignment. */ 1477 if (phi_result == op) 1478 continue; 1479 1480 mem = thread_private_new_memory (entry_block, op); 1481 if (mem == mem_non_local) 1482 { 1483 retval = mem; 1484 goto new_memory_ret; 1485 } 1486 retval = MIN (retval, mem); 1487 } 1488 goto new_memory_ret; 1489 } 1490 break; 1491 } 1492 } 1493 while (TREE_CODE (x) == SSA_NAME); 1494 1495 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC) 1496 /* Thread-local or transaction-local. */ 1497 ; 1498 else 1499 retval = mem_non_local; 1500 1501 new_memory_ret: 1502 elt_p->local_new_memory = retval; 1503 return retval; 1504 } 1505 1506 /* Determine whether X has to be instrumented using a read 1507 or write barrier. 1508 1509 ENTRY_BLOCK is the entry block for the region where stmt resides 1510 in. NULL if unknown. 1511 1512 STMT is the statement in which X occurs in. It is used for thread 1513 private memory instrumentation. If no TPM instrumentation is 1514 desired, STMT should be null. */ 1515 static bool 1516 requires_barrier (basic_block entry_block, tree x, gimple *stmt) 1517 { 1518 tree orig = x; 1519 while (handled_component_p (x)) 1520 x = TREE_OPERAND (x, 0); 1521 1522 switch (TREE_CODE (x)) 1523 { 1524 case INDIRECT_REF: 1525 case MEM_REF: 1526 { 1527 enum thread_memory_type ret; 1528 1529 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0)); 1530 if (ret == mem_non_local) 1531 return true; 1532 if (stmt && ret == mem_thread_local) 1533 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */ 1534 tm_log_add (entry_block, orig, stmt); 1535 1536 /* Transaction-locals require nothing at all. For malloc, a 1537 transaction restart frees the memory and we reallocate. 1538 For alloca, the stack pointer gets reset by the retry and 1539 we reallocate. */ 1540 return false; 1541 } 1542 1543 case TARGET_MEM_REF: 1544 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR) 1545 return true; 1546 x = TREE_OPERAND (TMR_BASE (x), 0); 1547 if (TREE_CODE (x) == PARM_DECL) 1548 return false; 1549 gcc_assert (VAR_P (x)); 1550 /* FALLTHRU */ 1551 1552 case PARM_DECL: 1553 case RESULT_DECL: 1554 case VAR_DECL: 1555 if (DECL_BY_REFERENCE (x)) 1556 { 1557 /* ??? This value is a pointer, but aggregate_value_p has been 1558 jigged to return true which confuses needs_to_live_in_memory. 1559 This ought to be cleaned up generically. 1560 1561 FIXME: Verify this still happens after the next mainline 1562 merge. Testcase ie g++.dg/tm/pr47554.C. 1563 */ 1564 return false; 1565 } 1566 1567 if (is_global_var (x)) 1568 return !TREE_READONLY (x); 1569 if (/* FIXME: This condition should actually go below in the 1570 tm_log_add() call, however is_call_clobbered() depends on 1571 aliasing info which is not available during 1572 gimplification. Since requires_barrier() gets called 1573 during lower_sequence_tm/gimplification, leave the call 1574 to needs_to_live_in_memory until we eliminate 1575 lower_sequence_tm altogether. */ 1576 needs_to_live_in_memory (x)) 1577 return true; 1578 else 1579 { 1580 /* For local memory that doesn't escape (aka thread private 1581 memory), we can either save the value at the beginning of 1582 the transaction and restore on restart, or call a tm 1583 function to dynamically save and restore on restart 1584 (ITM_L*). */ 1585 if (stmt) 1586 tm_log_add (entry_block, orig, stmt); 1587 return false; 1588 } 1589 1590 default: 1591 return false; 1592 } 1593 } 1594 1595 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside 1596 a transaction region. */ 1597 1598 static void 1599 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi) 1600 { 1601 gimple *stmt = gsi_stmt (*gsi); 1602 1603 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL)) 1604 *state |= GTMA_HAVE_LOAD; 1605 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL)) 1606 *state |= GTMA_HAVE_STORE; 1607 } 1608 1609 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */ 1610 1611 static void 1612 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi) 1613 { 1614 gimple *stmt = gsi_stmt (*gsi); 1615 tree fn; 1616 1617 if (is_tm_pure_call (stmt)) 1618 return; 1619 1620 /* Check if this call is a transaction abort. */ 1621 fn = gimple_call_fndecl (stmt); 1622 if (is_tm_abort (fn)) 1623 *state |= GTMA_HAVE_ABORT; 1624 1625 /* Note that something may happen. */ 1626 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE; 1627 } 1628 1629 /* Iterate through the statements in the sequence, moving labels 1630 (and thus edges) of transactions from "label_norm" to "label_uninst". */ 1631 1632 static tree 1633 make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p, 1634 struct walk_stmt_info *) 1635 { 1636 gimple *stmt = gsi_stmt (*gsi); 1637 1638 if (gtransaction *txn = dyn_cast <gtransaction *> (stmt)) 1639 { 1640 *handled_ops_p = true; 1641 txn->label_uninst = txn->label_norm; 1642 txn->label_norm = NULL; 1643 } 1644 else 1645 *handled_ops_p = !gimple_has_substatements (stmt); 1646 1647 return NULL_TREE; 1648 } 1649 1650 /* Lower a GIMPLE_TRANSACTION statement. */ 1651 1652 static void 1653 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi) 1654 { 1655 gimple *g; 1656 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi)); 1657 unsigned int *outer_state = (unsigned int *) wi->info; 1658 unsigned int this_state = 0; 1659 struct walk_stmt_info this_wi; 1660 1661 /* First, lower the body. The scanning that we do inside gives 1662 us some idea of what we're dealing with. */ 1663 memset (&this_wi, 0, sizeof (this_wi)); 1664 this_wi.info = (void *) &this_state; 1665 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt), 1666 lower_sequence_tm, NULL, &this_wi); 1667 1668 /* If there was absolutely nothing transaction related inside the 1669 transaction, we may elide it. Likewise if this is a nested 1670 transaction and does not contain an abort. */ 1671 if (this_state == 0 1672 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL)) 1673 { 1674 if (outer_state) 1675 *outer_state |= this_state; 1676 1677 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt), 1678 GSI_SAME_STMT); 1679 gimple_transaction_set_body (stmt, NULL); 1680 1681 gsi_remove (gsi, true); 1682 wi->removed_stmt = true; 1683 return; 1684 } 1685 1686 /* Wrap the body of the transaction in a try-finally node so that 1687 the commit call is always properly called. */ 1688 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0); 1689 if (flag_exceptions) 1690 { 1691 tree ptr; 1692 gimple_seq n_seq, e_seq; 1693 1694 n_seq = gimple_seq_alloc_with_stmt (g); 1695 e_seq = NULL; 1696 1697 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER), 1698 1, integer_zero_node); 1699 ptr = create_tmp_var (ptr_type_node); 1700 gimple_call_set_lhs (g, ptr); 1701 gimple_seq_add_stmt (&e_seq, g); 1702 1703 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH), 1704 1, ptr); 1705 gimple_seq_add_stmt (&e_seq, g); 1706 1707 g = gimple_build_eh_else (n_seq, e_seq); 1708 } 1709 1710 g = gimple_build_try (gimple_transaction_body (stmt), 1711 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY); 1712 1713 /* For a (potentially) outer transaction, create two paths. */ 1714 gimple_seq uninst = NULL; 1715 if (outer_state == NULL) 1716 { 1717 uninst = copy_gimple_seq_and_replace_locals (g); 1718 /* In the uninstrumented copy, reset inner transactions to have only 1719 an uninstrumented code path. */ 1720 memset (&this_wi, 0, sizeof (this_wi)); 1721 walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi); 1722 } 1723 1724 tree label1 = create_artificial_label (UNKNOWN_LOCATION); 1725 gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING); 1726 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); 1727 gimple_transaction_set_label_norm (stmt, label1); 1728 1729 /* If the transaction calls abort or if this is an outer transaction, 1730 add an "over" label afterwards. */ 1731 tree label3 = NULL; 1732 if ((this_state & GTMA_HAVE_ABORT) 1733 || outer_state == NULL 1734 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)) 1735 { 1736 label3 = create_artificial_label (UNKNOWN_LOCATION); 1737 gimple_transaction_set_label_over (stmt, label3); 1738 } 1739 1740 if (uninst != NULL) 1741 { 1742 gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING); 1743 1744 tree label2 = create_artificial_label (UNKNOWN_LOCATION); 1745 gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING); 1746 gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING); 1747 gimple_transaction_set_label_uninst (stmt, label2); 1748 } 1749 1750 if (label3 != NULL) 1751 gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING); 1752 1753 gimple_transaction_set_body (stmt, NULL); 1754 1755 /* Record the set of operations found for use later. */ 1756 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK; 1757 gimple_transaction_set_subcode (stmt, this_state); 1758 } 1759 1760 /* Iterate through the statements in the sequence, lowering them all 1761 as appropriate for being in a transaction. */ 1762 1763 static tree 1764 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, 1765 struct walk_stmt_info *wi) 1766 { 1767 unsigned int *state = (unsigned int *) wi->info; 1768 gimple *stmt = gsi_stmt (*gsi); 1769 1770 *handled_ops_p = true; 1771 switch (gimple_code (stmt)) 1772 { 1773 case GIMPLE_ASSIGN: 1774 /* Only memory reads/writes need to be instrumented. */ 1775 if (gimple_assign_single_p (stmt)) 1776 examine_assign_tm (state, gsi); 1777 break; 1778 1779 case GIMPLE_CALL: 1780 examine_call_tm (state, gsi); 1781 break; 1782 1783 case GIMPLE_ASM: 1784 *state |= GTMA_MAY_ENTER_IRREVOCABLE; 1785 break; 1786 1787 case GIMPLE_TRANSACTION: 1788 lower_transaction (gsi, wi); 1789 break; 1790 1791 default: 1792 *handled_ops_p = !gimple_has_substatements (stmt); 1793 break; 1794 } 1795 1796 return NULL_TREE; 1797 } 1798 1799 /* Iterate through the statements in the sequence, lowering them all 1800 as appropriate for being outside of a transaction. */ 1801 1802 static tree 1803 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, 1804 struct walk_stmt_info * wi) 1805 { 1806 gimple *stmt = gsi_stmt (*gsi); 1807 1808 if (gimple_code (stmt) == GIMPLE_TRANSACTION) 1809 { 1810 *handled_ops_p = true; 1811 lower_transaction (gsi, wi); 1812 } 1813 else 1814 *handled_ops_p = !gimple_has_substatements (stmt); 1815 1816 return NULL_TREE; 1817 } 1818 1819 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After 1820 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has 1821 been moved out, and all the data required for constructing a proper 1822 CFG has been recorded. */ 1823 1824 static unsigned int 1825 execute_lower_tm (void) 1826 { 1827 struct walk_stmt_info wi; 1828 gimple_seq body; 1829 1830 /* Transactional clones aren't created until a later pass. */ 1831 gcc_assert (!decl_is_tm_clone (current_function_decl)); 1832 1833 body = gimple_body (current_function_decl); 1834 memset (&wi, 0, sizeof (wi)); 1835 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi); 1836 gimple_set_body (current_function_decl, body); 1837 1838 return 0; 1839 } 1840 1841 namespace { 1842 1843 const pass_data pass_data_lower_tm = 1844 { 1845 GIMPLE_PASS, /* type */ 1846 "tmlower", /* name */ 1847 OPTGROUP_NONE, /* optinfo_flags */ 1848 TV_TRANS_MEM, /* tv_id */ 1849 PROP_gimple_lcf, /* properties_required */ 1850 0, /* properties_provided */ 1851 0, /* properties_destroyed */ 1852 0, /* todo_flags_start */ 1853 0, /* todo_flags_finish */ 1854 }; 1855 1856 class pass_lower_tm : public gimple_opt_pass 1857 { 1858 public: 1859 pass_lower_tm (gcc::context *ctxt) 1860 : gimple_opt_pass (pass_data_lower_tm, ctxt) 1861 {} 1862 1863 /* opt_pass methods: */ 1864 virtual bool gate (function *) { return flag_tm; } 1865 virtual unsigned int execute (function *) { return execute_lower_tm (); } 1866 1867 }; // class pass_lower_tm 1868 1869 } // anon namespace 1870 1871 gimple_opt_pass * 1872 make_pass_lower_tm (gcc::context *ctxt) 1873 { 1874 return new pass_lower_tm (ctxt); 1875 } 1876 1877 /* Collect region information for each transaction. */ 1878 1879 struct tm_region 1880 { 1881 public: 1882 1883 /* The field "transaction_stmt" is initially a gtransaction *, 1884 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START). 1885 1886 Helper method to get it as a gtransaction *, with code-checking 1887 in a checked-build. */ 1888 1889 gtransaction * 1890 get_transaction_stmt () const 1891 { 1892 return as_a <gtransaction *> (transaction_stmt); 1893 } 1894 1895 public: 1896 1897 /* Link to the next unnested transaction. */ 1898 struct tm_region *next; 1899 1900 /* Link to the next inner transaction. */ 1901 struct tm_region *inner; 1902 1903 /* Link to the next outer transaction. */ 1904 struct tm_region *outer; 1905 1906 /* The GIMPLE_TRANSACTION statement beginning this transaction. 1907 After TM_MARK, this gets replaced by a call to 1908 BUILT_IN_TM_START. 1909 Hence this will be either a gtransaction *or a gcall *. */ 1910 gimple *transaction_stmt; 1911 1912 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to 1913 BUILT_IN_TM_START, this field is true if the transaction is an 1914 outer transaction. */ 1915 bool original_transaction_was_outer; 1916 1917 /* Return value from BUILT_IN_TM_START. */ 1918 tree tm_state; 1919 1920 /* The entry block to this region. This will always be the first 1921 block of the body of the transaction. */ 1922 basic_block entry_block; 1923 1924 /* The first block after an expanded call to _ITM_beginTransaction. */ 1925 basic_block restart_block; 1926 1927 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK. 1928 These blocks are still a part of the region (i.e., the border is 1929 inclusive). Note that this set is only complete for paths in the CFG 1930 starting at ENTRY_BLOCK, and that there is no exit block recorded for 1931 the edge to the "over" label. */ 1932 bitmap exit_blocks; 1933 1934 /* The set of all blocks that have an TM_IRREVOCABLE call. */ 1935 bitmap irr_blocks; 1936 }; 1937 1938 /* True if there are pending edge statements to be committed for the 1939 current function being scanned in the tmmark pass. */ 1940 bool pending_edge_inserts_p; 1941 1942 static struct tm_region *all_tm_regions; 1943 static bitmap_obstack tm_obstack; 1944 1945 1946 /* A subroutine of tm_region_init. Record the existence of the 1947 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */ 1948 1949 static struct tm_region * 1950 tm_region_init_0 (struct tm_region *outer, basic_block bb, 1951 gtransaction *stmt) 1952 { 1953 struct tm_region *region; 1954 1955 region = (struct tm_region *) 1956 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); 1957 1958 if (outer) 1959 { 1960 region->next = outer->inner; 1961 outer->inner = region; 1962 } 1963 else 1964 { 1965 region->next = all_tm_regions; 1966 all_tm_regions = region; 1967 } 1968 region->inner = NULL; 1969 region->outer = outer; 1970 1971 region->transaction_stmt = stmt; 1972 region->original_transaction_was_outer = false; 1973 region->tm_state = NULL; 1974 1975 /* There are either one or two edges out of the block containing 1976 the GIMPLE_TRANSACTION, one to the actual region and one to the 1977 "over" label if the region contains an abort. The former will 1978 always be the one marked FALLTHRU. */ 1979 region->entry_block = FALLTHRU_EDGE (bb)->dest; 1980 1981 region->exit_blocks = BITMAP_ALLOC (&tm_obstack); 1982 region->irr_blocks = BITMAP_ALLOC (&tm_obstack); 1983 1984 return region; 1985 } 1986 1987 /* A subroutine of tm_region_init. Record all the exit and 1988 irrevocable blocks in BB into the region's exit_blocks and 1989 irr_blocks bitmaps. Returns the new region being scanned. */ 1990 1991 static struct tm_region * 1992 tm_region_init_1 (struct tm_region *region, basic_block bb) 1993 { 1994 gimple_stmt_iterator gsi; 1995 gimple *g; 1996 1997 if (!region 1998 || (!region->irr_blocks && !region->exit_blocks)) 1999 return region; 2000 2001 /* Check to see if this is the end of a region by seeing if it 2002 contains a call to __builtin_tm_commit{,_eh}. Note that the 2003 outermost region for DECL_IS_TM_CLONE need not collect this. */ 2004 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) 2005 { 2006 g = gsi_stmt (gsi); 2007 if (gimple_code (g) == GIMPLE_CALL) 2008 { 2009 tree fn = gimple_call_fndecl (g); 2010 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL) 2011 { 2012 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT 2013 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH) 2014 && region->exit_blocks) 2015 { 2016 bitmap_set_bit (region->exit_blocks, bb->index); 2017 region = region->outer; 2018 break; 2019 } 2020 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE) 2021 bitmap_set_bit (region->irr_blocks, bb->index); 2022 } 2023 } 2024 } 2025 return region; 2026 } 2027 2028 /* Collect all of the transaction regions within the current function 2029 and record them in ALL_TM_REGIONS. The REGION parameter may specify 2030 an "outermost" region for use by tm clones. */ 2031 2032 static void 2033 tm_region_init (struct tm_region *region) 2034 { 2035 gimple *g; 2036 edge_iterator ei; 2037 edge e; 2038 basic_block bb; 2039 auto_vec<basic_block> queue; 2040 bitmap visited_blocks = BITMAP_ALLOC (NULL); 2041 struct tm_region *old_region; 2042 auto_vec<tm_region *> bb_regions; 2043 2044 /* We could store this information in bb->aux, but we may get called 2045 through get_all_tm_blocks() from another pass that may be already 2046 using bb->aux. */ 2047 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun)); 2048 2049 all_tm_regions = region; 2050 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); 2051 queue.safe_push (bb); 2052 bitmap_set_bit (visited_blocks, bb->index); 2053 bb_regions[bb->index] = region; 2054 2055 do 2056 { 2057 bb = queue.pop (); 2058 region = bb_regions[bb->index]; 2059 bb_regions[bb->index] = NULL; 2060 2061 /* Record exit and irrevocable blocks. */ 2062 region = tm_region_init_1 (region, bb); 2063 2064 /* Check for the last statement in the block beginning a new region. */ 2065 g = last_stmt (bb); 2066 old_region = region; 2067 if (g) 2068 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g)) 2069 region = tm_region_init_0 (region, bb, trans_stmt); 2070 2071 /* Process subsequent blocks. */ 2072 FOR_EACH_EDGE (e, ei, bb->succs) 2073 if (!bitmap_bit_p (visited_blocks, e->dest->index)) 2074 { 2075 bitmap_set_bit (visited_blocks, e->dest->index); 2076 queue.safe_push (e->dest); 2077 2078 /* If the current block started a new region, make sure that only 2079 the entry block of the new region is associated with this region. 2080 Other successors are still part of the old region. */ 2081 if (old_region != region && e->dest != region->entry_block) 2082 bb_regions[e->dest->index] = old_region; 2083 else 2084 bb_regions[e->dest->index] = region; 2085 } 2086 } 2087 while (!queue.is_empty ()); 2088 BITMAP_FREE (visited_blocks); 2089 } 2090 2091 /* The "gate" function for all transactional memory expansion and optimization 2092 passes. We collect region information for each top-level transaction, and 2093 if we don't find any, we skip all of the TM passes. Each region will have 2094 all of the exit blocks recorded, and the originating statement. */ 2095 2096 static bool 2097 gate_tm_init (void) 2098 { 2099 if (!flag_tm) 2100 return false; 2101 2102 calculate_dominance_info (CDI_DOMINATORS); 2103 bitmap_obstack_initialize (&tm_obstack); 2104 2105 /* If the function is a TM_CLONE, then the entire function is the region. */ 2106 if (decl_is_tm_clone (current_function_decl)) 2107 { 2108 struct tm_region *region = (struct tm_region *) 2109 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); 2110 memset (region, 0, sizeof (*region)); 2111 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); 2112 /* For a clone, the entire function is the region. But even if 2113 we don't need to record any exit blocks, we may need to 2114 record irrevocable blocks. */ 2115 region->irr_blocks = BITMAP_ALLOC (&tm_obstack); 2116 2117 tm_region_init (region); 2118 } 2119 else 2120 { 2121 tm_region_init (NULL); 2122 2123 /* If we didn't find any regions, cleanup and skip the whole tree 2124 of tm-related optimizations. */ 2125 if (all_tm_regions == NULL) 2126 { 2127 bitmap_obstack_release (&tm_obstack); 2128 return false; 2129 } 2130 } 2131 2132 return true; 2133 } 2134 2135 namespace { 2136 2137 const pass_data pass_data_tm_init = 2138 { 2139 GIMPLE_PASS, /* type */ 2140 "*tminit", /* name */ 2141 OPTGROUP_NONE, /* optinfo_flags */ 2142 TV_TRANS_MEM, /* tv_id */ 2143 ( PROP_ssa | PROP_cfg ), /* properties_required */ 2144 0, /* properties_provided */ 2145 0, /* properties_destroyed */ 2146 0, /* todo_flags_start */ 2147 0, /* todo_flags_finish */ 2148 }; 2149 2150 class pass_tm_init : public gimple_opt_pass 2151 { 2152 public: 2153 pass_tm_init (gcc::context *ctxt) 2154 : gimple_opt_pass (pass_data_tm_init, ctxt) 2155 {} 2156 2157 /* opt_pass methods: */ 2158 virtual bool gate (function *) { return gate_tm_init (); } 2159 2160 }; // class pass_tm_init 2161 2162 } // anon namespace 2163 2164 gimple_opt_pass * 2165 make_pass_tm_init (gcc::context *ctxt) 2166 { 2167 return new pass_tm_init (ctxt); 2168 } 2169 2170 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region 2171 represented by STATE. */ 2172 2173 static inline void 2174 transaction_subcode_ior (struct tm_region *region, unsigned flags) 2175 { 2176 if (region && region->transaction_stmt) 2177 { 2178 gtransaction *transaction_stmt = region->get_transaction_stmt (); 2179 flags |= gimple_transaction_subcode (transaction_stmt); 2180 gimple_transaction_set_subcode (transaction_stmt, flags); 2181 } 2182 } 2183 2184 /* Construct a memory load in a transactional context. Return the 2185 gimple statement performing the load, or NULL if there is no 2186 TM_LOAD builtin of the appropriate size to do the load. 2187 2188 LOC is the location to use for the new statement(s). */ 2189 2190 static gcall * 2191 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) 2192 { 2193 tree t, type = TREE_TYPE (rhs); 2194 gcall *gcall; 2195 2196 built_in_function code; 2197 if (type == float_type_node) 2198 code = BUILT_IN_TM_LOAD_FLOAT; 2199 else if (type == double_type_node) 2200 code = BUILT_IN_TM_LOAD_DOUBLE; 2201 else if (type == long_double_type_node) 2202 code = BUILT_IN_TM_LOAD_LDOUBLE; 2203 else 2204 { 2205 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type))) 2206 return NULL; 2207 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type)); 2208 2209 if (TREE_CODE (type) == VECTOR_TYPE) 2210 { 2211 switch (type_size) 2212 { 2213 case 64: 2214 code = BUILT_IN_TM_LOAD_M64; 2215 break; 2216 case 128: 2217 code = BUILT_IN_TM_LOAD_M128; 2218 break; 2219 case 256: 2220 code = BUILT_IN_TM_LOAD_M256; 2221 break; 2222 default: 2223 goto unhandled_vec; 2224 } 2225 if (!builtin_decl_explicit_p (code)) 2226 goto unhandled_vec; 2227 } 2228 else 2229 { 2230 unhandled_vec: 2231 switch (type_size) 2232 { 2233 case 8: 2234 code = BUILT_IN_TM_LOAD_1; 2235 break; 2236 case 16: 2237 code = BUILT_IN_TM_LOAD_2; 2238 break; 2239 case 32: 2240 code = BUILT_IN_TM_LOAD_4; 2241 break; 2242 case 64: 2243 code = BUILT_IN_TM_LOAD_8; 2244 break; 2245 default: 2246 return NULL; 2247 } 2248 } 2249 } 2250 2251 tree decl = builtin_decl_explicit (code); 2252 gcc_assert (decl); 2253 2254 t = gimplify_addr (gsi, rhs); 2255 gcall = gimple_build_call (decl, 1, t); 2256 gimple_set_location (gcall, loc); 2257 2258 t = TREE_TYPE (TREE_TYPE (decl)); 2259 if (useless_type_conversion_p (type, t)) 2260 { 2261 gimple_call_set_lhs (gcall, lhs); 2262 gsi_insert_before (gsi, gcall, GSI_SAME_STMT); 2263 } 2264 else 2265 { 2266 gimple *g; 2267 tree temp; 2268 2269 temp = create_tmp_reg (t); 2270 gimple_call_set_lhs (gcall, temp); 2271 gsi_insert_before (gsi, gcall, GSI_SAME_STMT); 2272 2273 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp); 2274 g = gimple_build_assign (lhs, t); 2275 gsi_insert_before (gsi, g, GSI_SAME_STMT); 2276 } 2277 2278 return gcall; 2279 } 2280 2281 2282 /* Similarly for storing TYPE in a transactional context. */ 2283 2284 static gcall * 2285 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) 2286 { 2287 tree t, fn, type = TREE_TYPE (rhs), simple_type; 2288 gcall *gcall; 2289 2290 built_in_function code; 2291 if (type == float_type_node) 2292 code = BUILT_IN_TM_STORE_FLOAT; 2293 else if (type == double_type_node) 2294 code = BUILT_IN_TM_STORE_DOUBLE; 2295 else if (type == long_double_type_node) 2296 code = BUILT_IN_TM_STORE_LDOUBLE; 2297 else 2298 { 2299 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type))) 2300 return NULL; 2301 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type)); 2302 2303 if (TREE_CODE (type) == VECTOR_TYPE) 2304 { 2305 switch (type_size) 2306 { 2307 case 64: 2308 code = BUILT_IN_TM_STORE_M64; 2309 break; 2310 case 128: 2311 code = BUILT_IN_TM_STORE_M128; 2312 break; 2313 case 256: 2314 code = BUILT_IN_TM_STORE_M256; 2315 break; 2316 default: 2317 goto unhandled_vec; 2318 } 2319 if (!builtin_decl_explicit_p (code)) 2320 goto unhandled_vec; 2321 } 2322 else 2323 { 2324 unhandled_vec: 2325 switch (type_size) 2326 { 2327 case 8: 2328 code = BUILT_IN_TM_STORE_1; 2329 break; 2330 case 16: 2331 code = BUILT_IN_TM_STORE_2; 2332 break; 2333 case 32: 2334 code = BUILT_IN_TM_STORE_4; 2335 break; 2336 case 64: 2337 code = BUILT_IN_TM_STORE_8; 2338 break; 2339 default: 2340 return NULL; 2341 } 2342 } 2343 } 2344 2345 fn = builtin_decl_explicit (code); 2346 gcc_assert (fn); 2347 2348 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn)))); 2349 2350 if (TREE_CODE (rhs) == CONSTRUCTOR) 2351 { 2352 /* Handle the easy initialization to zero. */ 2353 if (!CONSTRUCTOR_ELTS (rhs)) 2354 rhs = build_int_cst (simple_type, 0); 2355 else 2356 { 2357 /* ...otherwise punt to the caller and probably use 2358 BUILT_IN_TM_MEMMOVE, because we can't wrap a 2359 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce 2360 valid gimple. */ 2361 return NULL; 2362 } 2363 } 2364 else if (!useless_type_conversion_p (simple_type, type)) 2365 { 2366 gimple *g; 2367 tree temp; 2368 2369 temp = create_tmp_reg (simple_type); 2370 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs); 2371 g = gimple_build_assign (temp, t); 2372 gimple_set_location (g, loc); 2373 gsi_insert_before (gsi, g, GSI_SAME_STMT); 2374 2375 rhs = temp; 2376 } 2377 2378 t = gimplify_addr (gsi, lhs); 2379 gcall = gimple_build_call (fn, 2, t, rhs); 2380 gimple_set_location (gcall, loc); 2381 gsi_insert_before (gsi, gcall, GSI_SAME_STMT); 2382 2383 return gcall; 2384 } 2385 2386 2387 /* Expand an assignment statement into transactional builtins. */ 2388 2389 static void 2390 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) 2391 { 2392 gimple *stmt = gsi_stmt (*gsi); 2393 location_t loc = gimple_location (stmt); 2394 tree lhs = gimple_assign_lhs (stmt); 2395 tree rhs = gimple_assign_rhs1 (stmt); 2396 bool store_p = requires_barrier (region->entry_block, lhs, NULL); 2397 bool load_p = requires_barrier (region->entry_block, rhs, NULL); 2398 gimple *gcall = NULL; 2399 2400 if (!load_p && !store_p) 2401 { 2402 /* Add thread private addresses to log if applicable. */ 2403 requires_barrier (region->entry_block, lhs, stmt); 2404 gsi_next (gsi); 2405 return; 2406 } 2407 2408 if (load_p) 2409 transaction_subcode_ior (region, GTMA_HAVE_LOAD); 2410 if (store_p) 2411 transaction_subcode_ior (region, GTMA_HAVE_STORE); 2412 2413 // Remove original load/store statement. 2414 gsi_remove (gsi, true); 2415 2416 // Attempt to use a simple load/store helper function. 2417 if (load_p && !store_p) 2418 gcall = build_tm_load (loc, lhs, rhs, gsi); 2419 else if (store_p && !load_p) 2420 gcall = build_tm_store (loc, lhs, rhs, gsi); 2421 2422 // If gcall has not been set, then we do not have a simple helper 2423 // function available for the type. This may be true of larger 2424 // structures, vectors, and non-standard float types. 2425 if (!gcall) 2426 { 2427 tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn; 2428 2429 // If this is a type that we couldn't handle above, but it's 2430 // in a register, we must spill it to memory for the copy. 2431 if (is_gimple_reg (lhs)) 2432 { 2433 ltmp = create_tmp_var (TREE_TYPE (lhs)); 2434 lhs_addr = build_fold_addr_expr (ltmp); 2435 } 2436 else 2437 lhs_addr = gimplify_addr (gsi, lhs); 2438 if (is_gimple_reg (rhs)) 2439 { 2440 tree rtmp = create_tmp_var (TREE_TYPE (rhs)); 2441 rhs_addr = build_fold_addr_expr (rtmp); 2442 gcall = gimple_build_assign (rtmp, rhs); 2443 gsi_insert_before (gsi, gcall, GSI_SAME_STMT); 2444 } 2445 else 2446 rhs_addr = gimplify_addr (gsi, rhs); 2447 2448 // Choose the appropriate memory transfer function. 2449 if (load_p && store_p) 2450 { 2451 // ??? Figure out if there's any possible overlap between 2452 // the LHS and the RHS and if not, use MEMCPY. 2453 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); 2454 } 2455 else if (load_p) 2456 { 2457 // Note that the store is non-transactional and cannot overlap. 2458 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN); 2459 } 2460 else 2461 { 2462 // Note that the load is non-transactional and cannot overlap. 2463 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT); 2464 } 2465 2466 gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr, 2467 TYPE_SIZE_UNIT (TREE_TYPE (lhs))); 2468 gimple_set_location (gcall, loc); 2469 gsi_insert_before (gsi, gcall, GSI_SAME_STMT); 2470 2471 if (ltmp) 2472 { 2473 gcall = gimple_build_assign (lhs, ltmp); 2474 gsi_insert_before (gsi, gcall, GSI_SAME_STMT); 2475 } 2476 } 2477 2478 // Now that we have the load/store in its instrumented form, add 2479 // thread private addresses to the log if applicable. 2480 if (!store_p) 2481 requires_barrier (region->entry_block, lhs, gcall); 2482 } 2483 2484 2485 /* Expand a call statement as appropriate for a transaction. That is, 2486 either verify that the call does not affect the transaction, or 2487 redirect the call to a clone that handles transactions, or change 2488 the transaction state to IRREVOCABLE. Return true if the call is 2489 one of the builtins that end a transaction. */ 2490 2491 static bool 2492 expand_call_tm (struct tm_region *region, 2493 gimple_stmt_iterator *gsi) 2494 { 2495 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); 2496 tree lhs = gimple_call_lhs (stmt); 2497 tree fn_decl; 2498 struct cgraph_node *node; 2499 bool retval = false; 2500 2501 fn_decl = gimple_call_fndecl (stmt); 2502 2503 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) 2504 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) 2505 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); 2506 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) 2507 transaction_subcode_ior (region, GTMA_HAVE_STORE); 2508 2509 if (is_tm_pure_call (stmt)) 2510 return false; 2511 2512 if (fn_decl) 2513 retval = is_tm_ending_fndecl (fn_decl); 2514 if (!retval) 2515 { 2516 /* Assume all non-const/pure calls write to memory, except 2517 transaction ending builtins. */ 2518 transaction_subcode_ior (region, GTMA_HAVE_STORE); 2519 } 2520 2521 /* For indirect calls, we already generated a call into the runtime. */ 2522 if (!fn_decl) 2523 { 2524 tree fn = gimple_call_fn (stmt); 2525 2526 /* We are guaranteed never to go irrevocable on a safe or pure 2527 call, and the pure call was handled above. */ 2528 if (is_tm_safe (fn)) 2529 return false; 2530 else 2531 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); 2532 2533 return false; 2534 } 2535 2536 node = cgraph_node::get (fn_decl); 2537 /* All calls should have cgraph here. */ 2538 if (!node) 2539 { 2540 /* We can have a nodeless call here if some pass after IPA-tm 2541 added uninstrumented calls. For example, loop distribution 2542 can transform certain loop constructs into __builtin_mem* 2543 calls. In this case, see if we have a suitable TM 2544 replacement and fill in the gaps. */ 2545 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL); 2546 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl); 2547 gcc_assert (code == BUILT_IN_MEMCPY 2548 || code == BUILT_IN_MEMMOVE 2549 || code == BUILT_IN_MEMSET); 2550 2551 tree repl = find_tm_replacement_function (fn_decl); 2552 if (repl) 2553 { 2554 gimple_call_set_fndecl (stmt, repl); 2555 update_stmt (stmt); 2556 node = cgraph_node::create (repl); 2557 node->local.tm_may_enter_irr = false; 2558 return expand_call_tm (region, gsi); 2559 } 2560 gcc_unreachable (); 2561 } 2562 if (node->local.tm_may_enter_irr) 2563 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); 2564 2565 if (is_tm_abort (fn_decl)) 2566 { 2567 transaction_subcode_ior (region, GTMA_HAVE_ABORT); 2568 return true; 2569 } 2570 2571 /* Instrument the store if needed. 2572 2573 If the assignment happens inside the function call (return slot 2574 optimization), there is no instrumentation to be done, since 2575 the callee should have done the right thing. */ 2576 if (lhs && requires_barrier (region->entry_block, lhs, stmt) 2577 && !gimple_call_return_slot_opt_p (stmt)) 2578 { 2579 tree tmp = create_tmp_reg (TREE_TYPE (lhs)); 2580 location_t loc = gimple_location (stmt); 2581 edge fallthru_edge = NULL; 2582 gassign *assign_stmt; 2583 2584 /* Remember if the call was going to throw. */ 2585 if (stmt_can_throw_internal (stmt)) 2586 { 2587 edge_iterator ei; 2588 edge e; 2589 basic_block bb = gimple_bb (stmt); 2590 2591 FOR_EACH_EDGE (e, ei, bb->succs) 2592 if (e->flags & EDGE_FALLTHRU) 2593 { 2594 fallthru_edge = e; 2595 break; 2596 } 2597 } 2598 2599 gimple_call_set_lhs (stmt, tmp); 2600 update_stmt (stmt); 2601 assign_stmt = gimple_build_assign (lhs, tmp); 2602 gimple_set_location (assign_stmt, loc); 2603 2604 /* We cannot throw in the middle of a BB. If the call was going 2605 to throw, place the instrumentation on the fallthru edge, so 2606 the call remains the last statement in the block. */ 2607 if (fallthru_edge) 2608 { 2609 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt); 2610 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq); 2611 expand_assign_tm (region, &fallthru_gsi); 2612 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq); 2613 pending_edge_inserts_p = true; 2614 } 2615 else 2616 { 2617 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING); 2618 expand_assign_tm (region, gsi); 2619 } 2620 2621 transaction_subcode_ior (region, GTMA_HAVE_STORE); 2622 } 2623 2624 return retval; 2625 } 2626 2627 2628 /* Expand all statements in BB as appropriate for being inside 2629 a transaction. */ 2630 2631 static void 2632 expand_block_tm (struct tm_region *region, basic_block bb) 2633 { 2634 gimple_stmt_iterator gsi; 2635 2636 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) 2637 { 2638 gimple *stmt = gsi_stmt (gsi); 2639 switch (gimple_code (stmt)) 2640 { 2641 case GIMPLE_ASSIGN: 2642 /* Only memory reads/writes need to be instrumented. */ 2643 if (gimple_assign_single_p (stmt) 2644 && !gimple_clobber_p (stmt)) 2645 { 2646 expand_assign_tm (region, &gsi); 2647 continue; 2648 } 2649 break; 2650 2651 case GIMPLE_CALL: 2652 if (expand_call_tm (region, &gsi)) 2653 return; 2654 break; 2655 2656 case GIMPLE_ASM: 2657 gcc_unreachable (); 2658 2659 default: 2660 break; 2661 } 2662 if (!gsi_end_p (gsi)) 2663 gsi_next (&gsi); 2664 } 2665 } 2666 2667 /* Return the list of basic-blocks in REGION. 2668 2669 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks 2670 following a TM_IRREVOCABLE call. 2671 2672 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the 2673 uninstrumented code path blocks in the list of basic blocks 2674 returned, false otherwise. */ 2675 2676 static vec<basic_block> 2677 get_tm_region_blocks (basic_block entry_block, 2678 bitmap exit_blocks, 2679 bitmap irr_blocks, 2680 bitmap all_region_blocks, 2681 bool stop_at_irrevocable_p, 2682 bool include_uninstrumented_p = true) 2683 { 2684 vec<basic_block> bbs = vNULL; 2685 unsigned i; 2686 edge e; 2687 edge_iterator ei; 2688 bitmap visited_blocks = BITMAP_ALLOC (NULL); 2689 2690 i = 0; 2691 bbs.safe_push (entry_block); 2692 bitmap_set_bit (visited_blocks, entry_block->index); 2693 2694 do 2695 { 2696 basic_block bb = bbs[i++]; 2697 2698 if (exit_blocks && 2699 bitmap_bit_p (exit_blocks, bb->index)) 2700 continue; 2701 2702 if (stop_at_irrevocable_p 2703 && irr_blocks 2704 && bitmap_bit_p (irr_blocks, bb->index)) 2705 continue; 2706 2707 FOR_EACH_EDGE (e, ei, bb->succs) 2708 if ((include_uninstrumented_p 2709 || !(e->flags & EDGE_TM_UNINSTRUMENTED)) 2710 && !bitmap_bit_p (visited_blocks, e->dest->index)) 2711 { 2712 bitmap_set_bit (visited_blocks, e->dest->index); 2713 bbs.safe_push (e->dest); 2714 } 2715 } 2716 while (i < bbs.length ()); 2717 2718 if (all_region_blocks) 2719 bitmap_ior_into (all_region_blocks, visited_blocks); 2720 2721 BITMAP_FREE (visited_blocks); 2722 return bbs; 2723 } 2724 2725 // Callback data for collect_bb2reg. 2726 struct bb2reg_stuff 2727 { 2728 vec<tm_region *> *bb2reg; 2729 bool include_uninstrumented_p; 2730 }; 2731 2732 // Callback for expand_regions, collect innermost region data for each bb. 2733 static void * 2734 collect_bb2reg (struct tm_region *region, void *data) 2735 { 2736 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data; 2737 vec<tm_region *> *bb2reg = stuff->bb2reg; 2738 vec<basic_block> queue; 2739 unsigned int i; 2740 basic_block bb; 2741 2742 queue = get_tm_region_blocks (region->entry_block, 2743 region->exit_blocks, 2744 region->irr_blocks, 2745 NULL, 2746 /*stop_at_irr_p=*/true, 2747 stuff->include_uninstrumented_p); 2748 2749 // We expect expand_region to perform a post-order traversal of the region 2750 // tree. Therefore the last region seen for any bb is the innermost. 2751 FOR_EACH_VEC_ELT (queue, i, bb) 2752 (*bb2reg)[bb->index] = region; 2753 2754 queue.release (); 2755 return NULL; 2756 } 2757 2758 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to 2759 // which a basic block belongs. Note that we only consider the instrumented 2760 // code paths for the region; the uninstrumented code paths are ignored if 2761 // INCLUDE_UNINSTRUMENTED_P is false. 2762 // 2763 // ??? This data is very similar to the bb_regions array that is collected 2764 // during tm_region_init. Or, rather, this data is similar to what could 2765 // be used within tm_region_init. The actual computation in tm_region_init 2766 // begins and ends with bb_regions entirely full of NULL pointers, due to 2767 // the way in which pointers are swapped in and out of the array. 2768 // 2769 // ??? Our callers expect that blocks are not shared between transactions. 2770 // When the optimizers get too smart, and blocks are shared, then during 2771 // the tm_mark phase we'll add log entries to only one of the two transactions, 2772 // and in the tm_edge phase we'll add edges to the CFG that create invalid 2773 // cycles. The symptom being SSA defs that do not dominate their uses. 2774 // Note that the optimizers were locally correct with their transformation, 2775 // as we have no info within the program that suggests that the blocks cannot 2776 // be shared. 2777 // 2778 // ??? There is currently a hack inside tree-ssa-pre.c to work around the 2779 // only known instance of this block sharing. 2780 2781 static vec<tm_region *> 2782 get_bb_regions_instrumented (bool traverse_clones, 2783 bool include_uninstrumented_p) 2784 { 2785 unsigned n = last_basic_block_for_fn (cfun); 2786 struct bb2reg_stuff stuff; 2787 vec<tm_region *> ret; 2788 2789 ret.create (n); 2790 ret.safe_grow_cleared (n); 2791 stuff.bb2reg = &ret; 2792 stuff.include_uninstrumented_p = include_uninstrumented_p; 2793 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones); 2794 2795 return ret; 2796 } 2797 2798 /* Set the IN_TRANSACTION for all gimple statements that appear in a 2799 transaction. */ 2800 2801 void 2802 compute_transaction_bits (void) 2803 { 2804 struct tm_region *region; 2805 vec<basic_block> queue; 2806 unsigned int i; 2807 basic_block bb; 2808 2809 /* ?? Perhaps we need to abstract gate_tm_init further, because we 2810 certainly don't need it to calculate CDI_DOMINATOR info. */ 2811 gate_tm_init (); 2812 2813 FOR_EACH_BB_FN (bb, cfun) 2814 bb->flags &= ~BB_IN_TRANSACTION; 2815 2816 for (region = all_tm_regions; region; region = region->next) 2817 { 2818 queue = get_tm_region_blocks (region->entry_block, 2819 region->exit_blocks, 2820 region->irr_blocks, 2821 NULL, 2822 /*stop_at_irr_p=*/true); 2823 for (i = 0; queue.iterate (i, &bb); ++i) 2824 bb->flags |= BB_IN_TRANSACTION; 2825 queue.release (); 2826 } 2827 2828 if (all_tm_regions) 2829 bitmap_obstack_release (&tm_obstack); 2830 } 2831 2832 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding 2833 call to BUILT_IN_TM_START. */ 2834 2835 static void * 2836 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) 2837 { 2838 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); 2839 basic_block transaction_bb = gimple_bb (region->transaction_stmt); 2840 tree tm_state = region->tm_state; 2841 tree tm_state_type = TREE_TYPE (tm_state); 2842 edge abort_edge = NULL; 2843 edge inst_edge = NULL; 2844 edge uninst_edge = NULL; 2845 edge fallthru_edge = NULL; 2846 2847 // Identify the various successors of the transaction start. 2848 { 2849 edge_iterator i; 2850 edge e; 2851 FOR_EACH_EDGE (e, i, transaction_bb->succs) 2852 { 2853 if (e->flags & EDGE_TM_ABORT) 2854 abort_edge = e; 2855 else if (e->flags & EDGE_TM_UNINSTRUMENTED) 2856 uninst_edge = e; 2857 else 2858 inst_edge = e; 2859 if (e->flags & EDGE_FALLTHRU) 2860 fallthru_edge = e; 2861 } 2862 } 2863 2864 /* ??? There are plenty of bits here we're not computing. */ 2865 { 2866 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ()); 2867 int flags = 0; 2868 if (subcode & GTMA_DOES_GO_IRREVOCABLE) 2869 flags |= PR_DOESGOIRREVOCABLE; 2870 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0) 2871 flags |= PR_HASNOIRREVOCABLE; 2872 /* If the transaction does not have an abort in lexical scope and is not 2873 marked as an outer transaction, then it will never abort. */ 2874 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0) 2875 flags |= PR_HASNOABORT; 2876 if ((subcode & GTMA_HAVE_STORE) == 0) 2877 flags |= PR_READONLY; 2878 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION)) 2879 flags |= PR_INSTRUMENTEDCODE; 2880 if (uninst_edge) 2881 flags |= PR_UNINSTRUMENTEDCODE; 2882 if (subcode & GTMA_IS_OUTER) 2883 region->original_transaction_was_outer = true; 2884 tree t = build_int_cst (tm_state_type, flags); 2885 gcall *call = gimple_build_call (tm_start, 1, t); 2886 gimple_call_set_lhs (call, tm_state); 2887 gimple_set_location (call, gimple_location (region->transaction_stmt)); 2888 2889 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START. 2890 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb); 2891 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt); 2892 gsi_insert_before (&gsi, call, GSI_SAME_STMT); 2893 gsi_remove (&gsi, true); 2894 region->transaction_stmt = call; 2895 } 2896 2897 // Generate log saves. 2898 if (!tm_log_save_addresses.is_empty ()) 2899 tm_log_emit_saves (region->entry_block, transaction_bb); 2900 2901 // In the beginning, we've no tests to perform on transaction restart. 2902 // Note that after this point, transaction_bb becomes the "most recent 2903 // block containing tests for the transaction". 2904 region->restart_block = region->entry_block; 2905 2906 // Generate log restores. 2907 if (!tm_log_save_addresses.is_empty ()) 2908 { 2909 basic_block test_bb = create_empty_bb (transaction_bb); 2910 basic_block code_bb = create_empty_bb (test_bb); 2911 basic_block join_bb = create_empty_bb (code_bb); 2912 add_bb_to_loop (test_bb, transaction_bb->loop_father); 2913 add_bb_to_loop (code_bb, transaction_bb->loop_father); 2914 add_bb_to_loop (join_bb, transaction_bb->loop_father); 2915 if (region->restart_block == region->entry_block) 2916 region->restart_block = test_bb; 2917 2918 tree t1 = create_tmp_reg (tm_state_type); 2919 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES); 2920 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); 2921 gimple_stmt_iterator gsi = gsi_last_bb (test_bb); 2922 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 2923 2924 t2 = build_int_cst (tm_state_type, 0); 2925 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); 2926 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 2927 2928 tm_log_emit_restores (region->entry_block, code_bb); 2929 2930 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); 2931 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE); 2932 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE); 2933 redirect_edge_pred (fallthru_edge, join_bb); 2934 2935 join_bb->count = test_bb->count = transaction_bb->count; 2936 2937 ei->probability = profile_probability::always (); 2938 et->probability = profile_probability::likely (); 2939 ef->probability = profile_probability::unlikely (); 2940 2941 code_bb->count = et->count (); 2942 2943 transaction_bb = join_bb; 2944 } 2945 2946 // If we have an ABORT edge, create a test to perform the abort. 2947 if (abort_edge) 2948 { 2949 basic_block test_bb = create_empty_bb (transaction_bb); 2950 add_bb_to_loop (test_bb, transaction_bb->loop_father); 2951 if (region->restart_block == region->entry_block) 2952 region->restart_block = test_bb; 2953 2954 tree t1 = create_tmp_reg (tm_state_type); 2955 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION); 2956 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); 2957 gimple_stmt_iterator gsi = gsi_last_bb (test_bb); 2958 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 2959 2960 t2 = build_int_cst (tm_state_type, 0); 2961 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); 2962 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 2963 2964 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); 2965 test_bb->count = transaction_bb->count; 2966 ei->probability = profile_probability::always (); 2967 2968 // Not abort edge. If both are live, chose one at random as we'll 2969 // we'll be fixing that up below. 2970 redirect_edge_pred (fallthru_edge, test_bb); 2971 fallthru_edge->flags = EDGE_FALSE_VALUE; 2972 fallthru_edge->probability = profile_probability::very_likely (); 2973 2974 // Abort/over edge. 2975 redirect_edge_pred (abort_edge, test_bb); 2976 abort_edge->flags = EDGE_TRUE_VALUE; 2977 abort_edge->probability = profile_probability::unlikely (); 2978 2979 transaction_bb = test_bb; 2980 } 2981 2982 // If we have both instrumented and uninstrumented code paths, select one. 2983 if (inst_edge && uninst_edge) 2984 { 2985 basic_block test_bb = create_empty_bb (transaction_bb); 2986 add_bb_to_loop (test_bb, transaction_bb->loop_father); 2987 if (region->restart_block == region->entry_block) 2988 region->restart_block = test_bb; 2989 2990 tree t1 = create_tmp_reg (tm_state_type); 2991 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE); 2992 2993 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); 2994 gimple_stmt_iterator gsi = gsi_last_bb (test_bb); 2995 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 2996 2997 t2 = build_int_cst (tm_state_type, 0); 2998 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); 2999 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); 3000 3001 // Create the edge into test_bb first, as we want to copy values 3002 // out of the fallthru edge. 3003 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags); 3004 e->probability = fallthru_edge->probability; 3005 test_bb->count = fallthru_edge->count (); 3006 3007 // Now update the edges to the inst/uninist implementations. 3008 // For now assume that the paths are equally likely. When using HTM, 3009 // we'll try the uninst path first and fallback to inst path if htm 3010 // buffers are exceeded. Without HTM we start with the inst path and 3011 // use the uninst path when falling back to serial mode. 3012 redirect_edge_pred (inst_edge, test_bb); 3013 inst_edge->flags = EDGE_FALSE_VALUE; 3014 inst_edge->probability = profile_probability::even (); 3015 3016 redirect_edge_pred (uninst_edge, test_bb); 3017 uninst_edge->flags = EDGE_TRUE_VALUE; 3018 uninst_edge->probability = profile_probability::even (); 3019 } 3020 3021 // If we have no previous special cases, and we have PHIs at the beginning 3022 // of the atomic region, this means we have a loop at the beginning of the 3023 // atomic region that shares the first block. This can cause problems with 3024 // the transaction restart abnormal edges to be added in the tm_edges pass. 3025 // Solve this by adding a new empty block to receive the abnormal edges. 3026 if (region->restart_block == region->entry_block 3027 && phi_nodes (region->entry_block)) 3028 { 3029 basic_block empty_bb = create_empty_bb (transaction_bb); 3030 region->restart_block = empty_bb; 3031 add_bb_to_loop (empty_bb, transaction_bb->loop_father); 3032 3033 redirect_edge_pred (fallthru_edge, empty_bb); 3034 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU); 3035 } 3036 3037 return NULL; 3038 } 3039 3040 /* Generate the temporary to be used for the return value of 3041 BUILT_IN_TM_START. */ 3042 3043 static void * 3044 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED) 3045 { 3046 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); 3047 region->tm_state = 3048 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state"); 3049 3050 // Reset the subcode, post optimizations. We'll fill this in 3051 // again as we process blocks. 3052 if (region->exit_blocks) 3053 { 3054 gtransaction *transaction_stmt = region->get_transaction_stmt (); 3055 unsigned int subcode = gimple_transaction_subcode (transaction_stmt); 3056 3057 if (subcode & GTMA_DOES_GO_IRREVOCABLE) 3058 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE 3059 | GTMA_MAY_ENTER_IRREVOCABLE 3060 | GTMA_HAS_NO_INSTRUMENTATION); 3061 else 3062 subcode &= GTMA_DECLARATION_MASK; 3063 gimple_transaction_set_subcode (transaction_stmt, subcode); 3064 } 3065 3066 return NULL; 3067 } 3068 3069 // Propagate flags from inner transactions outwards. 3070 static void 3071 propagate_tm_flags_out (struct tm_region *region) 3072 { 3073 if (region == NULL) 3074 return; 3075 propagate_tm_flags_out (region->inner); 3076 3077 if (region->outer && region->outer->transaction_stmt) 3078 { 3079 unsigned s 3080 = gimple_transaction_subcode (region->get_transaction_stmt ()); 3081 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE 3082 | GTMA_MAY_ENTER_IRREVOCABLE); 3083 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ()); 3084 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (), 3085 s); 3086 } 3087 3088 propagate_tm_flags_out (region->next); 3089 } 3090 3091 /* Entry point to the MARK phase of TM expansion. Here we replace 3092 transactional memory statements with calls to builtins, and function 3093 calls with their transactional clones (if available). But we don't 3094 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */ 3095 3096 static unsigned int 3097 execute_tm_mark (void) 3098 { 3099 pending_edge_inserts_p = false; 3100 3101 expand_regions (all_tm_regions, generate_tm_state, NULL, 3102 /*traverse_clones=*/true); 3103 3104 tm_log_init (); 3105 3106 vec<tm_region *> bb_regions 3107 = get_bb_regions_instrumented (/*traverse_clones=*/true, 3108 /*include_uninstrumented_p=*/false); 3109 struct tm_region *r; 3110 unsigned i; 3111 3112 // Expand memory operations into calls into the runtime. 3113 // This collects log entries as well. 3114 FOR_EACH_VEC_ELT (bb_regions, i, r) 3115 { 3116 if (r != NULL) 3117 { 3118 if (r->transaction_stmt) 3119 { 3120 unsigned sub 3121 = gimple_transaction_subcode (r->get_transaction_stmt ()); 3122 3123 /* If we're sure to go irrevocable, there won't be 3124 anything to expand, since the run-time will go 3125 irrevocable right away. */ 3126 if (sub & GTMA_DOES_GO_IRREVOCABLE 3127 && sub & GTMA_MAY_ENTER_IRREVOCABLE) 3128 continue; 3129 } 3130 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i)); 3131 } 3132 } 3133 3134 bb_regions.release (); 3135 3136 // Propagate flags from inner transactions outwards. 3137 propagate_tm_flags_out (all_tm_regions); 3138 3139 // Expand GIMPLE_TRANSACTIONs into calls into the runtime. 3140 expand_regions (all_tm_regions, expand_transaction, NULL, 3141 /*traverse_clones=*/false); 3142 3143 tm_log_emit (); 3144 tm_log_delete (); 3145 3146 if (pending_edge_inserts_p) 3147 gsi_commit_edge_inserts (); 3148 free_dominance_info (CDI_DOMINATORS); 3149 return 0; 3150 } 3151 3152 namespace { 3153 3154 const pass_data pass_data_tm_mark = 3155 { 3156 GIMPLE_PASS, /* type */ 3157 "tmmark", /* name */ 3158 OPTGROUP_NONE, /* optinfo_flags */ 3159 TV_TRANS_MEM, /* tv_id */ 3160 ( PROP_ssa | PROP_cfg ), /* properties_required */ 3161 0, /* properties_provided */ 3162 0, /* properties_destroyed */ 3163 0, /* todo_flags_start */ 3164 TODO_update_ssa, /* todo_flags_finish */ 3165 }; 3166 3167 class pass_tm_mark : public gimple_opt_pass 3168 { 3169 public: 3170 pass_tm_mark (gcc::context *ctxt) 3171 : gimple_opt_pass (pass_data_tm_mark, ctxt) 3172 {} 3173 3174 /* opt_pass methods: */ 3175 virtual unsigned int execute (function *) { return execute_tm_mark (); } 3176 3177 }; // class pass_tm_mark 3178 3179 } // anon namespace 3180 3181 gimple_opt_pass * 3182 make_pass_tm_mark (gcc::context *ctxt) 3183 { 3184 return new pass_tm_mark (ctxt); 3185 } 3186 3187 3188 /* Create an abnormal edge from STMT at iter, splitting the block 3189 as necessary. Adjust *PNEXT as needed for the split block. */ 3190 3191 static inline void 3192 split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb, 3193 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext) 3194 { 3195 basic_block bb = gimple_bb (stmt); 3196 if (!gsi_one_before_end_p (iter)) 3197 { 3198 edge e = split_block (bb, stmt); 3199 *pnext = gsi_start_bb (e->dest); 3200 } 3201 edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL); 3202 if (e) 3203 e->probability = profile_probability::guessed_never (); 3204 3205 // Record the need for the edge for the benefit of the rtl passes. 3206 if (cfun->gimple_df->tm_restart == NULL) 3207 cfun->gimple_df->tm_restart 3208 = hash_table<tm_restart_hasher>::create_ggc (31); 3209 3210 struct tm_restart_node dummy; 3211 dummy.stmt = stmt; 3212 dummy.label_or_list = gimple_block_label (dest_bb); 3213 3214 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy, 3215 INSERT); 3216 struct tm_restart_node *n = *slot; 3217 if (n == NULL) 3218 { 3219 n = ggc_alloc<tm_restart_node> (); 3220 *n = dummy; 3221 } 3222 else 3223 { 3224 tree old = n->label_or_list; 3225 if (TREE_CODE (old) == LABEL_DECL) 3226 old = tree_cons (NULL, old, NULL); 3227 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old); 3228 } 3229 } 3230 3231 /* Split block BB as necessary for every builtin function we added, and 3232 wire up the abnormal back edges implied by the transaction restart. */ 3233 3234 static void 3235 expand_block_edges (struct tm_region *const region, basic_block bb) 3236 { 3237 gimple_stmt_iterator gsi, next_gsi; 3238 3239 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi) 3240 { 3241 gimple *stmt = gsi_stmt (gsi); 3242 gcall *call_stmt; 3243 3244 next_gsi = gsi; 3245 gsi_next (&next_gsi); 3246 3247 // ??? Shouldn't we split for any non-pure, non-irrevocable function? 3248 call_stmt = dyn_cast <gcall *> (stmt); 3249 if ((!call_stmt) 3250 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0) 3251 continue; 3252 3253 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt)) 3254 == BUILT_IN_TM_ABORT) 3255 { 3256 // If we have a ``_transaction_cancel [[outer]]'', there is only 3257 // one abnormal edge: to the transaction marked OUTER. 3258 // All compiler-generated instances of BUILT_IN_TM_ABORT have a 3259 // constant argument, which we can examine here. Users invoking 3260 // TM_ABORT directly get what they deserve. 3261 tree arg = gimple_call_arg (call_stmt, 0); 3262 if (TREE_CODE (arg) == INTEGER_CST 3263 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 3264 && !decl_is_tm_clone (current_function_decl)) 3265 { 3266 // Find the GTMA_IS_OUTER transaction. 3267 for (struct tm_region *o = region; o; o = o->outer) 3268 if (o->original_transaction_was_outer) 3269 { 3270 split_bb_make_tm_edge (call_stmt, o->restart_block, 3271 gsi, &next_gsi); 3272 break; 3273 } 3274 3275 // Otherwise, the front-end should have semantically checked 3276 // outer aborts, but in either case the target region is not 3277 // within this function. 3278 continue; 3279 } 3280 3281 // Non-outer, TM aborts have an abnormal edge to the inner-most 3282 // transaction, the one being aborted; 3283 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi, 3284 &next_gsi); 3285 } 3286 3287 // All TM builtins have an abnormal edge to the outer-most transaction. 3288 // We never restart inner transactions. For tm clones, we know a-priori 3289 // that the outer-most transaction is outside the function. 3290 if (decl_is_tm_clone (current_function_decl)) 3291 continue; 3292 3293 if (cfun->gimple_df->tm_restart == NULL) 3294 cfun->gimple_df->tm_restart 3295 = hash_table<tm_restart_hasher>::create_ggc (31); 3296 3297 // All TM builtins have an abnormal edge to the outer-most transaction. 3298 // We never restart inner transactions. 3299 for (struct tm_region *o = region; o; o = o->outer) 3300 if (!o->outer) 3301 { 3302 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi); 3303 break; 3304 } 3305 3306 // Delete any tail-call annotation that may have been added. 3307 // The tail-call pass may have mis-identified the commit as being 3308 // a candidate because we had not yet added this restart edge. 3309 gimple_call_set_tail (call_stmt, false); 3310 } 3311 } 3312 3313 /* Entry point to the final expansion of transactional nodes. */ 3314 3315 namespace { 3316 3317 const pass_data pass_data_tm_edges = 3318 { 3319 GIMPLE_PASS, /* type */ 3320 "tmedge", /* name */ 3321 OPTGROUP_NONE, /* optinfo_flags */ 3322 TV_TRANS_MEM, /* tv_id */ 3323 ( PROP_ssa | PROP_cfg ), /* properties_required */ 3324 0, /* properties_provided */ 3325 0, /* properties_destroyed */ 3326 0, /* todo_flags_start */ 3327 TODO_update_ssa, /* todo_flags_finish */ 3328 }; 3329 3330 class pass_tm_edges : public gimple_opt_pass 3331 { 3332 public: 3333 pass_tm_edges (gcc::context *ctxt) 3334 : gimple_opt_pass (pass_data_tm_edges, ctxt) 3335 {} 3336 3337 /* opt_pass methods: */ 3338 virtual unsigned int execute (function *); 3339 3340 }; // class pass_tm_edges 3341 3342 unsigned int 3343 pass_tm_edges::execute (function *fun) 3344 { 3345 vec<tm_region *> bb_regions 3346 = get_bb_regions_instrumented (/*traverse_clones=*/false, 3347 /*include_uninstrumented_p=*/true); 3348 struct tm_region *r; 3349 unsigned i; 3350 3351 FOR_EACH_VEC_ELT (bb_regions, i, r) 3352 if (r != NULL) 3353 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i)); 3354 3355 bb_regions.release (); 3356 3357 /* We've got to release the dominance info now, to indicate that it 3358 must be rebuilt completely. Otherwise we'll crash trying to update 3359 the SSA web in the TODO section following this pass. */ 3360 free_dominance_info (CDI_DOMINATORS); 3361 /* We'ge also wrecked loops badly with inserting of abnormal edges. */ 3362 loops_state_set (LOOPS_NEED_FIXUP); 3363 bitmap_obstack_release (&tm_obstack); 3364 all_tm_regions = NULL; 3365 3366 return 0; 3367 } 3368 3369 } // anon namespace 3370 3371 gimple_opt_pass * 3372 make_pass_tm_edges (gcc::context *ctxt) 3373 { 3374 return new pass_tm_edges (ctxt); 3375 } 3376 3377 /* Helper function for expand_regions. Expand REGION and recurse to 3378 the inner region. Call CALLBACK on each region. CALLBACK returns 3379 NULL to continue the traversal, otherwise a non-null value which 3380 this function will return as well. TRAVERSE_CLONES is true if we 3381 should traverse transactional clones. */ 3382 3383 static void * 3384 expand_regions_1 (struct tm_region *region, 3385 void *(*callback)(struct tm_region *, void *), 3386 void *data, 3387 bool traverse_clones) 3388 { 3389 void *retval = NULL; 3390 if (region->exit_blocks 3391 || (traverse_clones && decl_is_tm_clone (current_function_decl))) 3392 { 3393 retval = callback (region, data); 3394 if (retval) 3395 return retval; 3396 } 3397 if (region->inner) 3398 { 3399 retval = expand_regions (region->inner, callback, data, traverse_clones); 3400 if (retval) 3401 return retval; 3402 } 3403 return retval; 3404 } 3405 3406 /* Traverse the regions enclosed and including REGION. Execute 3407 CALLBACK for each region, passing DATA. CALLBACK returns NULL to 3408 continue the traversal, otherwise a non-null value which this 3409 function will return as well. TRAVERSE_CLONES is true if we should 3410 traverse transactional clones. */ 3411 3412 static void * 3413 expand_regions (struct tm_region *region, 3414 void *(*callback)(struct tm_region *, void *), 3415 void *data, 3416 bool traverse_clones) 3417 { 3418 void *retval = NULL; 3419 while (region) 3420 { 3421 retval = expand_regions_1 (region, callback, data, traverse_clones); 3422 if (retval) 3423 return retval; 3424 region = region->next; 3425 } 3426 return retval; 3427 } 3428 3429 3430 /* A unique TM memory operation. */ 3431 struct tm_memop 3432 { 3433 /* Unique ID that all memory operations to the same location have. */ 3434 unsigned int value_id; 3435 /* Address of load/store. */ 3436 tree addr; 3437 }; 3438 3439 /* TM memory operation hashtable helpers. */ 3440 3441 struct tm_memop_hasher : free_ptr_hash <tm_memop> 3442 { 3443 static inline hashval_t hash (const tm_memop *); 3444 static inline bool equal (const tm_memop *, const tm_memop *); 3445 }; 3446 3447 /* Htab support. Return a hash value for a `tm_memop'. */ 3448 inline hashval_t 3449 tm_memop_hasher::hash (const tm_memop *mem) 3450 { 3451 tree addr = mem->addr; 3452 /* We drill down to the SSA_NAME/DECL for the hash, but equality is 3453 actually done with operand_equal_p (see tm_memop_eq). */ 3454 if (TREE_CODE (addr) == ADDR_EXPR) 3455 addr = TREE_OPERAND (addr, 0); 3456 return iterative_hash_expr (addr, 0); 3457 } 3458 3459 /* Htab support. Return true if two tm_memop's are the same. */ 3460 inline bool 3461 tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2) 3462 { 3463 return operand_equal_p (mem1->addr, mem2->addr, 0); 3464 } 3465 3466 /* Sets for solving data flow equations in the memory optimization pass. */ 3467 struct tm_memopt_bitmaps 3468 { 3469 /* Stores available to this BB upon entry. Basically, stores that 3470 dominate this BB. */ 3471 bitmap store_avail_in; 3472 /* Stores available at the end of this BB. */ 3473 bitmap store_avail_out; 3474 bitmap store_antic_in; 3475 bitmap store_antic_out; 3476 /* Reads available to this BB upon entry. Basically, reads that 3477 dominate this BB. */ 3478 bitmap read_avail_in; 3479 /* Reads available at the end of this BB. */ 3480 bitmap read_avail_out; 3481 /* Reads performed in this BB. */ 3482 bitmap read_local; 3483 /* Writes performed in this BB. */ 3484 bitmap store_local; 3485 3486 /* Temporary storage for pass. */ 3487 /* Is the current BB in the worklist? */ 3488 bool avail_in_worklist_p; 3489 /* Have we visited this BB? */ 3490 bool visited_p; 3491 }; 3492 3493 static bitmap_obstack tm_memopt_obstack; 3494 3495 /* Unique counter for TM loads and stores. Loads and stores of the 3496 same address get the same ID. */ 3497 static unsigned int tm_memopt_value_id; 3498 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers; 3499 3500 #define STORE_AVAIL_IN(BB) \ 3501 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in 3502 #define STORE_AVAIL_OUT(BB) \ 3503 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out 3504 #define STORE_ANTIC_IN(BB) \ 3505 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in 3506 #define STORE_ANTIC_OUT(BB) \ 3507 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out 3508 #define READ_AVAIL_IN(BB) \ 3509 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in 3510 #define READ_AVAIL_OUT(BB) \ 3511 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out 3512 #define READ_LOCAL(BB) \ 3513 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local 3514 #define STORE_LOCAL(BB) \ 3515 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local 3516 #define AVAIL_IN_WORKLIST_P(BB) \ 3517 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p 3518 #define BB_VISITED_P(BB) \ 3519 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p 3520 3521 /* Given a TM load/store in STMT, return the value number for the address 3522 it accesses. */ 3523 3524 static unsigned int 3525 tm_memopt_value_number (gimple *stmt, enum insert_option op) 3526 { 3527 struct tm_memop tmpmem, *mem; 3528 tm_memop **slot; 3529 3530 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt)); 3531 tmpmem.addr = gimple_call_arg (stmt, 0); 3532 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op); 3533 if (*slot) 3534 mem = *slot; 3535 else if (op == INSERT) 3536 { 3537 mem = XNEW (struct tm_memop); 3538 *slot = mem; 3539 mem->value_id = tm_memopt_value_id++; 3540 mem->addr = tmpmem.addr; 3541 } 3542 else 3543 gcc_unreachable (); 3544 return mem->value_id; 3545 } 3546 3547 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */ 3548 3549 static void 3550 tm_memopt_accumulate_memops (basic_block bb) 3551 { 3552 gimple_stmt_iterator gsi; 3553 3554 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 3555 { 3556 gimple *stmt = gsi_stmt (gsi); 3557 bitmap bits; 3558 unsigned int loc; 3559 3560 if (is_tm_store (stmt)) 3561 bits = STORE_LOCAL (bb); 3562 else if (is_tm_load (stmt)) 3563 bits = READ_LOCAL (bb); 3564 else 3565 continue; 3566 3567 loc = tm_memopt_value_number (stmt, INSERT); 3568 bitmap_set_bit (bits, loc); 3569 if (dump_file) 3570 { 3571 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=", 3572 is_tm_load (stmt) ? "LOAD" : "STORE", loc, 3573 gimple_bb (stmt)->index); 3574 print_generic_expr (dump_file, gimple_call_arg (stmt, 0)); 3575 fprintf (dump_file, "\n"); 3576 } 3577 } 3578 } 3579 3580 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */ 3581 3582 static void 3583 dump_tm_memopt_set (const char *set_name, bitmap bits) 3584 { 3585 unsigned i; 3586 bitmap_iterator bi; 3587 const char *comma = ""; 3588 3589 fprintf (dump_file, "TM memopt: %s: [", set_name); 3590 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi) 3591 { 3592 hash_table<tm_memop_hasher>::iterator hi; 3593 struct tm_memop *mem = NULL; 3594 3595 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */ 3596 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi) 3597 if (mem->value_id == i) 3598 break; 3599 gcc_assert (mem->value_id == i); 3600 fprintf (dump_file, "%s", comma); 3601 comma = ", "; 3602 print_generic_expr (dump_file, mem->addr); 3603 } 3604 fprintf (dump_file, "]\n"); 3605 } 3606 3607 /* Prettily dump all of the memopt sets in BLOCKS. */ 3608 3609 static void 3610 dump_tm_memopt_sets (vec<basic_block> blocks) 3611 { 3612 size_t i; 3613 basic_block bb; 3614 3615 for (i = 0; blocks.iterate (i, &bb); ++i) 3616 { 3617 fprintf (dump_file, "------------BB %d---------\n", bb->index); 3618 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb)); 3619 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb)); 3620 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb)); 3621 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb)); 3622 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb)); 3623 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb)); 3624 } 3625 } 3626 3627 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */ 3628 3629 static void 3630 tm_memopt_compute_avin (basic_block bb) 3631 { 3632 edge e; 3633 unsigned ix; 3634 3635 /* Seed with the AVOUT of any predecessor. */ 3636 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++) 3637 { 3638 e = EDGE_PRED (bb, ix); 3639 /* Make sure we have already visited this BB, and is thus 3640 initialized. 3641 3642 If e->src->aux is NULL, this predecessor is actually on an 3643 enclosing transaction. We only care about the current 3644 transaction, so ignore it. */ 3645 if (e->src->aux && BB_VISITED_P (e->src)) 3646 { 3647 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); 3648 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); 3649 break; 3650 } 3651 } 3652 3653 for (; ix < EDGE_COUNT (bb->preds); ix++) 3654 { 3655 e = EDGE_PRED (bb, ix); 3656 if (e->src->aux && BB_VISITED_P (e->src)) 3657 { 3658 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); 3659 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); 3660 } 3661 } 3662 3663 BB_VISITED_P (bb) = true; 3664 } 3665 3666 /* Compute the STORE_ANTIC_IN for the basic block BB. */ 3667 3668 static void 3669 tm_memopt_compute_antin (basic_block bb) 3670 { 3671 edge e; 3672 unsigned ix; 3673 3674 /* Seed with the ANTIC_OUT of any successor. */ 3675 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++) 3676 { 3677 e = EDGE_SUCC (bb, ix); 3678 /* Make sure we have already visited this BB, and is thus 3679 initialized. */ 3680 if (BB_VISITED_P (e->dest)) 3681 { 3682 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); 3683 break; 3684 } 3685 } 3686 3687 for (; ix < EDGE_COUNT (bb->succs); ix++) 3688 { 3689 e = EDGE_SUCC (bb, ix); 3690 if (BB_VISITED_P (e->dest)) 3691 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); 3692 } 3693 3694 BB_VISITED_P (bb) = true; 3695 } 3696 3697 /* Compute the AVAIL sets for every basic block in BLOCKS. 3698 3699 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows: 3700 3701 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb]) 3702 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors]) 3703 3704 This is basically what we do in lcm's compute_available(), but here 3705 we calculate two sets of sets (one for STOREs and one for READs), 3706 and we work on a region instead of the entire CFG. 3707 3708 REGION is the TM region. 3709 BLOCKS are the basic blocks in the region. */ 3710 3711 static void 3712 tm_memopt_compute_available (struct tm_region *region, 3713 vec<basic_block> blocks) 3714 { 3715 edge e; 3716 basic_block *worklist, *qin, *qout, *qend, bb; 3717 unsigned int qlen, i; 3718 edge_iterator ei; 3719 bool changed; 3720 3721 /* Allocate a worklist array/queue. Entries are only added to the 3722 list if they were not already on the list. So the size is 3723 bounded by the number of basic blocks in the region. */ 3724 qlen = blocks.length () - 1; 3725 qin = qout = worklist = 3726 XNEWVEC (basic_block, qlen); 3727 3728 /* Put every block in the region on the worklist. */ 3729 for (i = 0; blocks.iterate (i, &bb); ++i) 3730 { 3731 /* Seed AVAIL_OUT with the LOCAL set. */ 3732 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb)); 3733 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb)); 3734 3735 AVAIL_IN_WORKLIST_P (bb) = true; 3736 /* No need to insert the entry block, since it has an AVIN of 3737 null, and an AVOUT that has already been seeded in. */ 3738 if (bb != region->entry_block) 3739 *qin++ = bb; 3740 } 3741 3742 /* The entry block has been initialized with the local sets. */ 3743 BB_VISITED_P (region->entry_block) = true; 3744 3745 qin = worklist; 3746 qend = &worklist[qlen]; 3747 3748 /* Iterate until the worklist is empty. */ 3749 while (qlen) 3750 { 3751 /* Take the first entry off the worklist. */ 3752 bb = *qout++; 3753 qlen--; 3754 3755 if (qout >= qend) 3756 qout = worklist; 3757 3758 /* This block can be added to the worklist again if necessary. */ 3759 AVAIL_IN_WORKLIST_P (bb) = false; 3760 tm_memopt_compute_avin (bb); 3761 3762 /* Note: We do not add the LOCAL sets here because we already 3763 seeded the AVAIL_OUT sets with them. */ 3764 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb)); 3765 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb)); 3766 if (changed 3767 && (region->exit_blocks == NULL 3768 || !bitmap_bit_p (region->exit_blocks, bb->index))) 3769 /* If the out state of this block changed, then we need to add 3770 its successors to the worklist if they are not already in. */ 3771 FOR_EACH_EDGE (e, ei, bb->succs) 3772 if (!AVAIL_IN_WORKLIST_P (e->dest) 3773 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) 3774 { 3775 *qin++ = e->dest; 3776 AVAIL_IN_WORKLIST_P (e->dest) = true; 3777 qlen++; 3778 3779 if (qin >= qend) 3780 qin = worklist; 3781 } 3782 } 3783 3784 free (worklist); 3785 3786 if (dump_file) 3787 dump_tm_memopt_sets (blocks); 3788 } 3789 3790 /* Compute ANTIC sets for every basic block in BLOCKS. 3791 3792 We compute STORE_ANTIC_OUT as follows: 3793 3794 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb]) 3795 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors]) 3796 3797 REGION is the TM region. 3798 BLOCKS are the basic blocks in the region. */ 3799 3800 static void 3801 tm_memopt_compute_antic (struct tm_region *region, 3802 vec<basic_block> blocks) 3803 { 3804 edge e; 3805 basic_block *worklist, *qin, *qout, *qend, bb; 3806 unsigned int qlen; 3807 int i; 3808 edge_iterator ei; 3809 3810 /* Allocate a worklist array/queue. Entries are only added to the 3811 list if they were not already on the list. So the size is 3812 bounded by the number of basic blocks in the region. */ 3813 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ()); 3814 3815 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i) 3816 { 3817 bb = blocks[i]; 3818 3819 /* Seed ANTIC_OUT with the LOCAL set. */ 3820 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb)); 3821 3822 /* Put every block in the region on the worklist. */ 3823 AVAIL_IN_WORKLIST_P (bb) = true; 3824 /* No need to insert exit blocks, since their ANTIC_IN is NULL, 3825 and their ANTIC_OUT has already been seeded in. */ 3826 if (region->exit_blocks 3827 && !bitmap_bit_p (region->exit_blocks, bb->index)) 3828 { 3829 qlen++; 3830 *qin++ = bb; 3831 } 3832 } 3833 3834 /* The exit blocks have been initialized with the local sets. */ 3835 if (region->exit_blocks) 3836 { 3837 unsigned int i; 3838 bitmap_iterator bi; 3839 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi) 3840 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true; 3841 } 3842 3843 qin = worklist; 3844 qend = &worklist[qlen]; 3845 3846 /* Iterate until the worklist is empty. */ 3847 while (qlen) 3848 { 3849 /* Take the first entry off the worklist. */ 3850 bb = *qout++; 3851 qlen--; 3852 3853 if (qout >= qend) 3854 qout = worklist; 3855 3856 /* This block can be added to the worklist again if necessary. */ 3857 AVAIL_IN_WORKLIST_P (bb) = false; 3858 tm_memopt_compute_antin (bb); 3859 3860 /* Note: We do not add the LOCAL sets here because we already 3861 seeded the ANTIC_OUT sets with them. */ 3862 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb)) 3863 && bb != region->entry_block) 3864 /* If the out state of this block changed, then we need to add 3865 its predecessors to the worklist if they are not already in. */ 3866 FOR_EACH_EDGE (e, ei, bb->preds) 3867 if (!AVAIL_IN_WORKLIST_P (e->src)) 3868 { 3869 *qin++ = e->src; 3870 AVAIL_IN_WORKLIST_P (e->src) = true; 3871 qlen++; 3872 3873 if (qin >= qend) 3874 qin = worklist; 3875 } 3876 } 3877 3878 free (worklist); 3879 3880 if (dump_file) 3881 dump_tm_memopt_sets (blocks); 3882 } 3883 3884 /* Offsets of load variants from TM_LOAD. For example, 3885 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*. 3886 See gtm-builtins.def. */ 3887 #define TRANSFORM_RAR 1 3888 #define TRANSFORM_RAW 2 3889 #define TRANSFORM_RFW 3 3890 /* Offsets of store variants from TM_STORE. */ 3891 #define TRANSFORM_WAR 1 3892 #define TRANSFORM_WAW 2 3893 3894 /* Inform about a load/store optimization. */ 3895 3896 static void 3897 dump_tm_memopt_transform (gimple *stmt) 3898 { 3899 if (dump_file) 3900 { 3901 fprintf (dump_file, "TM memopt: transforming: "); 3902 print_gimple_stmt (dump_file, stmt, 0); 3903 fprintf (dump_file, "\n"); 3904 } 3905 } 3906 3907 /* Perform a read/write optimization. Replaces the TM builtin in STMT 3908 by a builtin that is OFFSET entries down in the builtins table in 3909 gtm-builtins.def. */ 3910 3911 static void 3912 tm_memopt_transform_stmt (unsigned int offset, 3913 gcall *stmt, 3914 gimple_stmt_iterator *gsi) 3915 { 3916 tree fn = gimple_call_fn (stmt); 3917 gcc_assert (TREE_CODE (fn) == ADDR_EXPR); 3918 TREE_OPERAND (fn, 0) 3919 = builtin_decl_explicit ((enum built_in_function) 3920 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)) 3921 + offset)); 3922 gimple_call_set_fn (stmt, fn); 3923 gsi_replace (gsi, stmt, true); 3924 dump_tm_memopt_transform (stmt); 3925 } 3926 3927 /* Perform the actual TM memory optimization transformations in the 3928 basic blocks in BLOCKS. */ 3929 3930 static void 3931 tm_memopt_transform_blocks (vec<basic_block> blocks) 3932 { 3933 size_t i; 3934 basic_block bb; 3935 gimple_stmt_iterator gsi; 3936 3937 for (i = 0; blocks.iterate (i, &bb); ++i) 3938 { 3939 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 3940 { 3941 gimple *stmt = gsi_stmt (gsi); 3942 bitmap read_avail = READ_AVAIL_IN (bb); 3943 bitmap store_avail = STORE_AVAIL_IN (bb); 3944 bitmap store_antic = STORE_ANTIC_OUT (bb); 3945 unsigned int loc; 3946 3947 if (is_tm_simple_load (stmt)) 3948 { 3949 gcall *call_stmt = as_a <gcall *> (stmt); 3950 loc = tm_memopt_value_number (stmt, NO_INSERT); 3951 if (store_avail && bitmap_bit_p (store_avail, loc)) 3952 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi); 3953 else if (store_antic && bitmap_bit_p (store_antic, loc)) 3954 { 3955 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi); 3956 bitmap_set_bit (store_avail, loc); 3957 } 3958 else if (read_avail && bitmap_bit_p (read_avail, loc)) 3959 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi); 3960 else 3961 bitmap_set_bit (read_avail, loc); 3962 } 3963 else if (is_tm_simple_store (stmt)) 3964 { 3965 gcall *call_stmt = as_a <gcall *> (stmt); 3966 loc = tm_memopt_value_number (stmt, NO_INSERT); 3967 if (store_avail && bitmap_bit_p (store_avail, loc)) 3968 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi); 3969 else 3970 { 3971 if (read_avail && bitmap_bit_p (read_avail, loc)) 3972 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi); 3973 bitmap_set_bit (store_avail, loc); 3974 } 3975 } 3976 } 3977 } 3978 } 3979 3980 /* Return a new set of bitmaps for a BB. */ 3981 3982 static struct tm_memopt_bitmaps * 3983 tm_memopt_init_sets (void) 3984 { 3985 struct tm_memopt_bitmaps *b 3986 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps); 3987 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); 3988 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); 3989 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack); 3990 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack); 3991 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); 3992 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); 3993 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); 3994 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack); 3995 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack); 3996 return b; 3997 } 3998 3999 /* Free sets computed for each BB. */ 4000 4001 static void 4002 tm_memopt_free_sets (vec<basic_block> blocks) 4003 { 4004 size_t i; 4005 basic_block bb; 4006 4007 for (i = 0; blocks.iterate (i, &bb); ++i) 4008 bb->aux = NULL; 4009 } 4010 4011 /* Clear the visited bit for every basic block in BLOCKS. */ 4012 4013 static void 4014 tm_memopt_clear_visited (vec<basic_block> blocks) 4015 { 4016 size_t i; 4017 basic_block bb; 4018 4019 for (i = 0; blocks.iterate (i, &bb); ++i) 4020 BB_VISITED_P (bb) = false; 4021 } 4022 4023 /* Replace TM load/stores with hints for the runtime. We handle 4024 things like read-after-write, write-after-read, read-after-read, 4025 read-for-write, etc. */ 4026 4027 static unsigned int 4028 execute_tm_memopt (void) 4029 { 4030 struct tm_region *region; 4031 vec<basic_block> bbs; 4032 4033 tm_memopt_value_id = 0; 4034 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10); 4035 4036 for (region = all_tm_regions; region; region = region->next) 4037 { 4038 /* All the TM stores/loads in the current region. */ 4039 size_t i; 4040 basic_block bb; 4041 4042 bitmap_obstack_initialize (&tm_memopt_obstack); 4043 4044 /* Save all BBs for the current region. */ 4045 bbs = get_tm_region_blocks (region->entry_block, 4046 region->exit_blocks, 4047 region->irr_blocks, 4048 NULL, 4049 false); 4050 4051 /* Collect all the memory operations. */ 4052 for (i = 0; bbs.iterate (i, &bb); ++i) 4053 { 4054 bb->aux = tm_memopt_init_sets (); 4055 tm_memopt_accumulate_memops (bb); 4056 } 4057 4058 /* Solve data flow equations and transform each block accordingly. */ 4059 tm_memopt_clear_visited (bbs); 4060 tm_memopt_compute_available (region, bbs); 4061 tm_memopt_clear_visited (bbs); 4062 tm_memopt_compute_antic (region, bbs); 4063 tm_memopt_transform_blocks (bbs); 4064 4065 tm_memopt_free_sets (bbs); 4066 bbs.release (); 4067 bitmap_obstack_release (&tm_memopt_obstack); 4068 tm_memopt_value_numbers->empty (); 4069 } 4070 4071 delete tm_memopt_value_numbers; 4072 tm_memopt_value_numbers = NULL; 4073 return 0; 4074 } 4075 4076 namespace { 4077 4078 const pass_data pass_data_tm_memopt = 4079 { 4080 GIMPLE_PASS, /* type */ 4081 "tmmemopt", /* name */ 4082 OPTGROUP_NONE, /* optinfo_flags */ 4083 TV_TRANS_MEM, /* tv_id */ 4084 ( PROP_ssa | PROP_cfg ), /* properties_required */ 4085 0, /* properties_provided */ 4086 0, /* properties_destroyed */ 4087 0, /* todo_flags_start */ 4088 0, /* todo_flags_finish */ 4089 }; 4090 4091 class pass_tm_memopt : public gimple_opt_pass 4092 { 4093 public: 4094 pass_tm_memopt (gcc::context *ctxt) 4095 : gimple_opt_pass (pass_data_tm_memopt, ctxt) 4096 {} 4097 4098 /* opt_pass methods: */ 4099 virtual bool gate (function *) { return flag_tm && optimize > 0; } 4100 virtual unsigned int execute (function *) { return execute_tm_memopt (); } 4101 4102 }; // class pass_tm_memopt 4103 4104 } // anon namespace 4105 4106 gimple_opt_pass * 4107 make_pass_tm_memopt (gcc::context *ctxt) 4108 { 4109 return new pass_tm_memopt (ctxt); 4110 } 4111 4112 4113 /* Interprocedual analysis for the creation of transactional clones. 4114 The aim of this pass is to find which functions are referenced in 4115 a non-irrevocable transaction context, and for those over which 4116 we have control (or user directive), create a version of the 4117 function which uses only the transactional interface to reference 4118 protected memories. This analysis proceeds in several steps: 4119 4120 (1) Collect the set of all possible transactional clones: 4121 4122 (a) For all local public functions marked tm_callable, push 4123 it onto the tm_callee queue. 4124 4125 (b) For all local functions, scan for calls in transaction blocks. 4126 Push the caller and callee onto the tm_caller and tm_callee 4127 queues. Count the number of callers for each callee. 4128 4129 (c) For each local function on the callee list, assume we will 4130 create a transactional clone. Push *all* calls onto the 4131 callee queues; count the number of clone callers separately 4132 to the number of original callers. 4133 4134 (2) Propagate irrevocable status up the dominator tree: 4135 4136 (a) Any external function on the callee list that is not marked 4137 tm_callable is irrevocable. Push all callers of such onto 4138 a worklist. 4139 4140 (b) For each function on the worklist, mark each block that 4141 contains an irrevocable call. Use the AND operator to 4142 propagate that mark up the dominator tree. 4143 4144 (c) If we reach the entry block for a possible transactional 4145 clone, then the transactional clone is irrevocable, and 4146 we should not create the clone after all. Push all 4147 callers onto the worklist. 4148 4149 (d) Place tm_irrevocable calls at the beginning of the relevant 4150 blocks. Special case here is the entry block for the entire 4151 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for 4152 the library to begin the region in serial mode. Decrement 4153 the call count for all callees in the irrevocable region. 4154 4155 (3) Create the transactional clones: 4156 4157 Any tm_callee that still has a non-zero call count is cloned. 4158 */ 4159 4160 /* This structure is stored in the AUX field of each cgraph_node. */ 4161 struct tm_ipa_cg_data 4162 { 4163 /* The clone of the function that got created. */ 4164 struct cgraph_node *clone; 4165 4166 /* The tm regions in the normal function. */ 4167 struct tm_region *all_tm_regions; 4168 4169 /* The blocks of the normal/clone functions that contain irrevocable 4170 calls, or blocks that are post-dominated by irrevocable calls. */ 4171 bitmap irrevocable_blocks_normal; 4172 bitmap irrevocable_blocks_clone; 4173 4174 /* The blocks of the normal function that are involved in transactions. */ 4175 bitmap transaction_blocks_normal; 4176 4177 /* The number of callers to the transactional clone of this function 4178 from normal and transactional clones respectively. */ 4179 unsigned tm_callers_normal; 4180 unsigned tm_callers_clone; 4181 4182 /* True if all calls to this function's transactional clone 4183 are irrevocable. Also automatically true if the function 4184 has no transactional clone. */ 4185 bool is_irrevocable; 4186 4187 /* Flags indicating the presence of this function in various queues. */ 4188 bool in_callee_queue; 4189 bool in_worklist; 4190 4191 /* Flags indicating the kind of scan desired while in the worklist. */ 4192 bool want_irr_scan_normal; 4193 }; 4194 4195 typedef vec<cgraph_node *> cgraph_node_queue; 4196 4197 /* Return the ipa data associated with NODE, allocating zeroed memory 4198 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases 4199 and set *NODE accordingly. */ 4200 4201 static struct tm_ipa_cg_data * 4202 get_cg_data (struct cgraph_node **node, bool traverse_aliases) 4203 { 4204 struct tm_ipa_cg_data *d; 4205 4206 if (traverse_aliases && (*node)->alias) 4207 *node = (*node)->get_alias_target (); 4208 4209 d = (struct tm_ipa_cg_data *) (*node)->aux; 4210 4211 if (d == NULL) 4212 { 4213 d = (struct tm_ipa_cg_data *) 4214 obstack_alloc (&tm_obstack.obstack, sizeof (*d)); 4215 (*node)->aux = (void *) d; 4216 memset (d, 0, sizeof (*d)); 4217 } 4218 4219 return d; 4220 } 4221 4222 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that 4223 it is already present. */ 4224 4225 static void 4226 maybe_push_queue (struct cgraph_node *node, 4227 cgraph_node_queue *queue_p, bool *in_queue_p) 4228 { 4229 if (!*in_queue_p) 4230 { 4231 *in_queue_p = true; 4232 queue_p->safe_push (node); 4233 } 4234 } 4235 4236 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone. 4237 Queue all callees within block BB. */ 4238 4239 static void 4240 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p, 4241 basic_block bb, bool for_clone) 4242 { 4243 gimple_stmt_iterator gsi; 4244 4245 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 4246 { 4247 gimple *stmt = gsi_stmt (gsi); 4248 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) 4249 { 4250 tree fndecl = gimple_call_fndecl (stmt); 4251 if (fndecl) 4252 { 4253 struct tm_ipa_cg_data *d; 4254 unsigned *pcallers; 4255 struct cgraph_node *node; 4256 4257 if (is_tm_ending_fndecl (fndecl)) 4258 continue; 4259 if (find_tm_replacement_function (fndecl)) 4260 continue; 4261 4262 node = cgraph_node::get (fndecl); 4263 gcc_assert (node != NULL); 4264 d = get_cg_data (&node, true); 4265 4266 pcallers = (for_clone ? &d->tm_callers_clone 4267 : &d->tm_callers_normal); 4268 *pcallers += 1; 4269 4270 maybe_push_queue (node, callees_p, &d->in_callee_queue); 4271 } 4272 } 4273 } 4274 } 4275 4276 /* Scan all calls in NODE that are within a transaction region, 4277 and push the resulting nodes into the callee queue. */ 4278 4279 static void 4280 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d, 4281 cgraph_node_queue *callees_p) 4282 { 4283 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack); 4284 d->all_tm_regions = all_tm_regions; 4285 4286 for (tm_region *r = all_tm_regions; r; r = r->next) 4287 { 4288 vec<basic_block> bbs; 4289 basic_block bb; 4290 unsigned i; 4291 4292 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL, 4293 d->transaction_blocks_normal, false, false); 4294 4295 FOR_EACH_VEC_ELT (bbs, i, bb) 4296 ipa_tm_scan_calls_block (callees_p, bb, false); 4297 4298 bbs.release (); 4299 } 4300 } 4301 4302 /* Scan all calls in NODE as if this is the transactional clone, 4303 and push the destinations into the callee queue. */ 4304 4305 static void 4306 ipa_tm_scan_calls_clone (struct cgraph_node *node, 4307 cgraph_node_queue *callees_p) 4308 { 4309 struct function *fn = DECL_STRUCT_FUNCTION (node->decl); 4310 basic_block bb; 4311 4312 FOR_EACH_BB_FN (bb, fn) 4313 ipa_tm_scan_calls_block (callees_p, bb, true); 4314 } 4315 4316 /* The function NODE has been detected to be irrevocable. Push all 4317 of its callers onto WORKLIST for the purpose of re-scanning them. */ 4318 4319 static void 4320 ipa_tm_note_irrevocable (struct cgraph_node *node, 4321 cgraph_node_queue *worklist_p) 4322 { 4323 struct tm_ipa_cg_data *d = get_cg_data (&node, true); 4324 struct cgraph_edge *e; 4325 4326 d->is_irrevocable = true; 4327 4328 for (e = node->callers; e ; e = e->next_caller) 4329 { 4330 basic_block bb; 4331 struct cgraph_node *caller; 4332 4333 /* Don't examine recursive calls. */ 4334 if (e->caller == node) 4335 continue; 4336 /* Even if we think we can go irrevocable, believe the user 4337 above all. */ 4338 if (is_tm_safe_or_pure (e->caller->decl)) 4339 continue; 4340 4341 caller = e->caller; 4342 d = get_cg_data (&caller, true); 4343 4344 /* Check if the callee is in a transactional region. If so, 4345 schedule the function for normal re-scan as well. */ 4346 bb = gimple_bb (e->call_stmt); 4347 gcc_assert (bb != NULL); 4348 if (d->transaction_blocks_normal 4349 && bitmap_bit_p (d->transaction_blocks_normal, bb->index)) 4350 d->want_irr_scan_normal = true; 4351 4352 maybe_push_queue (caller, worklist_p, &d->in_worklist); 4353 } 4354 } 4355 4356 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement 4357 within the block is irrevocable. */ 4358 4359 static bool 4360 ipa_tm_scan_irr_block (basic_block bb) 4361 { 4362 gimple_stmt_iterator gsi; 4363 tree fn; 4364 4365 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 4366 { 4367 gimple *stmt = gsi_stmt (gsi); 4368 switch (gimple_code (stmt)) 4369 { 4370 case GIMPLE_ASSIGN: 4371 if (gimple_assign_single_p (stmt)) 4372 { 4373 tree lhs = gimple_assign_lhs (stmt); 4374 tree rhs = gimple_assign_rhs1 (stmt); 4375 if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs)) 4376 return true; 4377 } 4378 break; 4379 4380 case GIMPLE_CALL: 4381 { 4382 tree lhs = gimple_call_lhs (stmt); 4383 if (lhs && volatile_lvalue_p (lhs)) 4384 return true; 4385 4386 if (is_tm_pure_call (stmt)) 4387 break; 4388 4389 fn = gimple_call_fn (stmt); 4390 4391 /* Functions with the attribute are by definition irrevocable. */ 4392 if (is_tm_irrevocable (fn)) 4393 return true; 4394 4395 /* For direct function calls, go ahead and check for replacement 4396 functions, or transitive irrevocable functions. For indirect 4397 functions, we'll ask the runtime. */ 4398 if (TREE_CODE (fn) == ADDR_EXPR) 4399 { 4400 struct tm_ipa_cg_data *d; 4401 struct cgraph_node *node; 4402 4403 fn = TREE_OPERAND (fn, 0); 4404 if (is_tm_ending_fndecl (fn)) 4405 break; 4406 if (find_tm_replacement_function (fn)) 4407 break; 4408 4409 node = cgraph_node::get (fn); 4410 d = get_cg_data (&node, true); 4411 4412 /* Return true if irrevocable, but above all, believe 4413 the user. */ 4414 if (d->is_irrevocable 4415 && !is_tm_safe_or_pure (fn)) 4416 return true; 4417 } 4418 break; 4419 } 4420 4421 case GIMPLE_ASM: 4422 /* ??? The Approved Method of indicating that an inline 4423 assembly statement is not relevant to the transaction 4424 is to wrap it in a __tm_waiver block. This is not 4425 yet implemented, so we can't check for it. */ 4426 if (is_tm_safe (current_function_decl)) 4427 { 4428 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node); 4429 SET_EXPR_LOCATION (t, gimple_location (stmt)); 4430 error ("%Kasm not allowed in %<transaction_safe%> function", t); 4431 } 4432 return true; 4433 4434 default: 4435 break; 4436 } 4437 } 4438 4439 return false; 4440 } 4441 4442 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking 4443 for new irrevocable blocks, marking them in NEW_IRR. Don't bother 4444 scanning past OLD_IRR or EXIT_BLOCKS. */ 4445 4446 static bool 4447 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr, 4448 bitmap old_irr, bitmap exit_blocks) 4449 { 4450 bool any_new_irr = false; 4451 edge e; 4452 edge_iterator ei; 4453 bitmap visited_blocks = BITMAP_ALLOC (NULL); 4454 4455 do 4456 { 4457 basic_block bb = pqueue->pop (); 4458 4459 /* Don't re-scan blocks we know already are irrevocable. */ 4460 if (old_irr && bitmap_bit_p (old_irr, bb->index)) 4461 continue; 4462 4463 if (ipa_tm_scan_irr_block (bb)) 4464 { 4465 bitmap_set_bit (new_irr, bb->index); 4466 any_new_irr = true; 4467 } 4468 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index)) 4469 { 4470 FOR_EACH_EDGE (e, ei, bb->succs) 4471 if (!bitmap_bit_p (visited_blocks, e->dest->index)) 4472 { 4473 bitmap_set_bit (visited_blocks, e->dest->index); 4474 pqueue->safe_push (e->dest); 4475 } 4476 } 4477 } 4478 while (!pqueue->is_empty ()); 4479 4480 BITMAP_FREE (visited_blocks); 4481 4482 return any_new_irr; 4483 } 4484 4485 /* Propagate the irrevocable property both up and down the dominator tree. 4486 BB is the current block being scanned; EXIT_BLOCKS are the edges of the 4487 TM regions; OLD_IRR are the results of a previous scan of the dominator 4488 tree which has been fully propagated; NEW_IRR is the set of new blocks 4489 which are gaining the irrevocable property during the current scan. */ 4490 4491 static void 4492 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr, 4493 bitmap old_irr, bitmap exit_blocks) 4494 { 4495 vec<basic_block> bbs; 4496 bitmap all_region_blocks; 4497 4498 /* If this block is in the old set, no need to rescan. */ 4499 if (old_irr && bitmap_bit_p (old_irr, entry_block->index)) 4500 return; 4501 4502 all_region_blocks = BITMAP_ALLOC (&tm_obstack); 4503 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL, 4504 all_region_blocks, false); 4505 do 4506 { 4507 basic_block bb = bbs.pop (); 4508 bool this_irr = bitmap_bit_p (new_irr, bb->index); 4509 bool all_son_irr = false; 4510 edge_iterator ei; 4511 edge e; 4512 4513 /* Propagate up. If my children are, I am too, but we must have 4514 at least one child that is. */ 4515 if (!this_irr) 4516 { 4517 FOR_EACH_EDGE (e, ei, bb->succs) 4518 { 4519 if (!bitmap_bit_p (new_irr, e->dest->index)) 4520 { 4521 all_son_irr = false; 4522 break; 4523 } 4524 else 4525 all_son_irr = true; 4526 } 4527 if (all_son_irr) 4528 { 4529 /* Add block to new_irr if it hasn't already been processed. */ 4530 if (!old_irr || !bitmap_bit_p (old_irr, bb->index)) 4531 { 4532 bitmap_set_bit (new_irr, bb->index); 4533 this_irr = true; 4534 } 4535 } 4536 } 4537 4538 /* Propagate down to everyone we immediately dominate. */ 4539 if (this_irr) 4540 { 4541 basic_block son; 4542 for (son = first_dom_son (CDI_DOMINATORS, bb); 4543 son; 4544 son = next_dom_son (CDI_DOMINATORS, son)) 4545 { 4546 /* Make sure block is actually in a TM region, and it 4547 isn't already in old_irr. */ 4548 if ((!old_irr || !bitmap_bit_p (old_irr, son->index)) 4549 && bitmap_bit_p (all_region_blocks, son->index)) 4550 bitmap_set_bit (new_irr, son->index); 4551 } 4552 } 4553 } 4554 while (!bbs.is_empty ()); 4555 4556 BITMAP_FREE (all_region_blocks); 4557 bbs.release (); 4558 } 4559 4560 static void 4561 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone) 4562 { 4563 gimple_stmt_iterator gsi; 4564 4565 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 4566 { 4567 gimple *stmt = gsi_stmt (gsi); 4568 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) 4569 { 4570 tree fndecl = gimple_call_fndecl (stmt); 4571 if (fndecl) 4572 { 4573 struct tm_ipa_cg_data *d; 4574 unsigned *pcallers; 4575 struct cgraph_node *tnode; 4576 4577 if (is_tm_ending_fndecl (fndecl)) 4578 continue; 4579 if (find_tm_replacement_function (fndecl)) 4580 continue; 4581 4582 tnode = cgraph_node::get (fndecl); 4583 d = get_cg_data (&tnode, true); 4584 4585 pcallers = (for_clone ? &d->tm_callers_clone 4586 : &d->tm_callers_normal); 4587 4588 gcc_assert (*pcallers > 0); 4589 *pcallers -= 1; 4590 } 4591 } 4592 } 4593 } 4594 4595 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions, 4596 as well as other irrevocable actions such as inline assembly. Mark all 4597 such blocks as irrevocable and decrement the number of calls to 4598 transactional clones. Return true if, for the transactional clone, the 4599 entire function is irrevocable. */ 4600 4601 static bool 4602 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone) 4603 { 4604 struct tm_ipa_cg_data *d; 4605 bitmap new_irr, old_irr; 4606 bool ret = false; 4607 4608 /* Builtin operators (operator new, and such). */ 4609 if (DECL_STRUCT_FUNCTION (node->decl) == NULL 4610 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL) 4611 return false; 4612 4613 push_cfun (DECL_STRUCT_FUNCTION (node->decl)); 4614 calculate_dominance_info (CDI_DOMINATORS); 4615 4616 d = get_cg_data (&node, true); 4617 auto_vec<basic_block, 10> queue; 4618 new_irr = BITMAP_ALLOC (&tm_obstack); 4619 4620 /* Scan each tm region, propagating irrevocable status through the tree. */ 4621 if (for_clone) 4622 { 4623 old_irr = d->irrevocable_blocks_clone; 4624 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); 4625 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL)) 4626 { 4627 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 4628 new_irr, 4629 old_irr, NULL); 4630 ret = bitmap_bit_p (new_irr, 4631 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index); 4632 } 4633 } 4634 else 4635 { 4636 struct tm_region *region; 4637 4638 old_irr = d->irrevocable_blocks_normal; 4639 for (region = d->all_tm_regions; region; region = region->next) 4640 { 4641 queue.quick_push (region->entry_block); 4642 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, 4643 region->exit_blocks)) 4644 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr, 4645 region->exit_blocks); 4646 } 4647 } 4648 4649 /* If we found any new irrevocable blocks, reduce the call count for 4650 transactional clones within the irrevocable blocks. Save the new 4651 set of irrevocable blocks for next time. */ 4652 if (!bitmap_empty_p (new_irr)) 4653 { 4654 bitmap_iterator bmi; 4655 unsigned i; 4656 4657 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) 4658 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i), 4659 for_clone); 4660 4661 if (old_irr) 4662 { 4663 bitmap_ior_into (old_irr, new_irr); 4664 BITMAP_FREE (new_irr); 4665 } 4666 else if (for_clone) 4667 d->irrevocable_blocks_clone = new_irr; 4668 else 4669 d->irrevocable_blocks_normal = new_irr; 4670 4671 if (dump_file && new_irr) 4672 { 4673 const char *dname; 4674 bitmap_iterator bmi; 4675 unsigned i; 4676 4677 dname = lang_hooks.decl_printable_name (current_function_decl, 2); 4678 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) 4679 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i); 4680 } 4681 } 4682 else 4683 BITMAP_FREE (new_irr); 4684 4685 pop_cfun (); 4686 4687 return ret; 4688 } 4689 4690 /* Return true if, for the transactional clone of NODE, any call 4691 may enter irrevocable mode. */ 4692 4693 static bool 4694 ipa_tm_mayenterirr_function (struct cgraph_node *node) 4695 { 4696 struct tm_ipa_cg_data *d; 4697 tree decl; 4698 unsigned flags; 4699 4700 d = get_cg_data (&node, true); 4701 decl = node->decl; 4702 flags = flags_from_decl_or_type (decl); 4703 4704 /* Handle some TM builtins. Ordinarily these aren't actually generated 4705 at this point, but handling these functions when written in by the 4706 user makes it easier to build unit tests. */ 4707 if (flags & ECF_TM_BUILTIN) 4708 return false; 4709 4710 /* Filter out all functions that are marked. */ 4711 if (flags & ECF_TM_PURE) 4712 return false; 4713 if (is_tm_safe (decl)) 4714 return false; 4715 if (is_tm_irrevocable (decl)) 4716 return true; 4717 if (is_tm_callable (decl)) 4718 return true; 4719 if (find_tm_replacement_function (decl)) 4720 return true; 4721 4722 /* If we aren't seeing the final version of the function we don't 4723 know what it will contain at runtime. */ 4724 if (node->get_availability () < AVAIL_AVAILABLE) 4725 return true; 4726 4727 /* If the function must go irrevocable, then of course true. */ 4728 if (d->is_irrevocable) 4729 return true; 4730 4731 /* If there are any blocks marked irrevocable, then the function 4732 as a whole may enter irrevocable. */ 4733 if (d->irrevocable_blocks_clone) 4734 return true; 4735 4736 /* We may have previously marked this function as tm_may_enter_irr; 4737 see pass_diagnose_tm_blocks. */ 4738 if (node->local.tm_may_enter_irr) 4739 return true; 4740 4741 /* Recurse on the main body for aliases. In general, this will 4742 result in one of the bits above being set so that we will not 4743 have to recurse next time. */ 4744 if (node->alias) 4745 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias)); 4746 4747 /* What remains is unmarked local functions without items that force 4748 the function to go irrevocable. */ 4749 return false; 4750 } 4751 4752 /* Diagnose calls from transaction_safe functions to unmarked 4753 functions that are determined to not be safe. */ 4754 4755 static void 4756 ipa_tm_diagnose_tm_safe (struct cgraph_node *node) 4757 { 4758 struct cgraph_edge *e; 4759 4760 for (e = node->callees; e ; e = e->next_callee) 4761 if (!is_tm_callable (e->callee->decl) 4762 && e->callee->local.tm_may_enter_irr) 4763 error_at (gimple_location (e->call_stmt), 4764 "unsafe function call %qD within " 4765 "%<transaction_safe%> function", e->callee->decl); 4766 } 4767 4768 /* Diagnose call from atomic transactions to unmarked functions 4769 that are determined to not be safe. */ 4770 4771 static void 4772 ipa_tm_diagnose_transaction (struct cgraph_node *node, 4773 struct tm_region *all_tm_regions) 4774 { 4775 struct tm_region *r; 4776 4777 for (r = all_tm_regions; r ; r = r->next) 4778 if (gimple_transaction_subcode (r->get_transaction_stmt ()) 4779 & GTMA_IS_RELAXED) 4780 { 4781 /* Atomic transactions can be nested inside relaxed. */ 4782 if (r->inner) 4783 ipa_tm_diagnose_transaction (node, r->inner); 4784 } 4785 else 4786 { 4787 vec<basic_block> bbs; 4788 gimple_stmt_iterator gsi; 4789 basic_block bb; 4790 size_t i; 4791 4792 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, 4793 r->irr_blocks, NULL, false); 4794 4795 for (i = 0; bbs.iterate (i, &bb); ++i) 4796 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 4797 { 4798 gimple *stmt = gsi_stmt (gsi); 4799 tree fndecl; 4800 4801 if (gimple_code (stmt) == GIMPLE_ASM) 4802 { 4803 error_at (gimple_location (stmt), 4804 "asm not allowed in atomic transaction"); 4805 continue; 4806 } 4807 4808 if (!is_gimple_call (stmt)) 4809 continue; 4810 fndecl = gimple_call_fndecl (stmt); 4811 4812 /* Indirect function calls have been diagnosed already. */ 4813 if (!fndecl) 4814 continue; 4815 4816 /* Stop at the end of the transaction. */ 4817 if (is_tm_ending_fndecl (fndecl)) 4818 { 4819 if (bitmap_bit_p (r->exit_blocks, bb->index)) 4820 break; 4821 continue; 4822 } 4823 4824 /* Marked functions have been diagnosed already. */ 4825 if (is_tm_pure_call (stmt)) 4826 continue; 4827 if (is_tm_callable (fndecl)) 4828 continue; 4829 4830 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr) 4831 error_at (gimple_location (stmt), 4832 "unsafe function call %qD within " 4833 "atomic transaction", fndecl); 4834 } 4835 4836 bbs.release (); 4837 } 4838 } 4839 4840 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in 4841 OLD_DECL. The returned value is a freshly malloced pointer that 4842 should be freed by the caller. */ 4843 4844 static tree 4845 tm_mangle (tree old_asm_id) 4846 { 4847 const char *old_asm_name; 4848 char *tm_name; 4849 void *alloc = NULL; 4850 struct demangle_component *dc; 4851 tree new_asm_id; 4852 4853 /* Determine if the symbol is already a valid C++ mangled name. Do this 4854 even for C, which might be interfacing with C++ code via appropriately 4855 ugly identifiers. */ 4856 /* ??? We could probably do just as well checking for "_Z" and be done. */ 4857 old_asm_name = IDENTIFIER_POINTER (old_asm_id); 4858 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc); 4859 4860 if (dc == NULL) 4861 { 4862 char length[8]; 4863 4864 do_unencoded: 4865 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id)); 4866 tm_name = concat ("_ZGTt", length, old_asm_name, NULL); 4867 } 4868 else 4869 { 4870 old_asm_name += 2; /* Skip _Z */ 4871 4872 switch (dc->type) 4873 { 4874 case DEMANGLE_COMPONENT_TRANSACTION_CLONE: 4875 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE: 4876 /* Don't play silly games, you! */ 4877 goto do_unencoded; 4878 4879 case DEMANGLE_COMPONENT_HIDDEN_ALIAS: 4880 /* I'd really like to know if we can ever be passed one of 4881 these from the C++ front end. The Logical Thing would 4882 seem that hidden-alias should be outer-most, so that we 4883 get hidden-alias of a transaction-clone and not vice-versa. */ 4884 old_asm_name += 2; 4885 break; 4886 4887 default: 4888 break; 4889 } 4890 4891 tm_name = concat ("_ZGTt", old_asm_name, NULL); 4892 } 4893 free (alloc); 4894 4895 new_asm_id = get_identifier (tm_name); 4896 free (tm_name); 4897 4898 return new_asm_id; 4899 } 4900 4901 static inline void 4902 ipa_tm_mark_force_output_node (struct cgraph_node *node) 4903 { 4904 node->mark_force_output (); 4905 node->analyzed = true; 4906 } 4907 4908 static inline void 4909 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node) 4910 { 4911 node->forced_by_abi = true; 4912 node->analyzed = true; 4913 } 4914 4915 /* Callback data for ipa_tm_create_version_alias. */ 4916 struct create_version_alias_info 4917 { 4918 struct cgraph_node *old_node; 4919 tree new_decl; 4920 }; 4921 4922 /* A subroutine of ipa_tm_create_version, called via 4923 cgraph_for_node_and_aliases. Create new tm clones for each of 4924 the existing aliases. */ 4925 static bool 4926 ipa_tm_create_version_alias (struct cgraph_node *node, void *data) 4927 { 4928 struct create_version_alias_info *info 4929 = (struct create_version_alias_info *)data; 4930 tree old_decl, new_decl, tm_name; 4931 struct cgraph_node *new_node; 4932 4933 if (!node->cpp_implicit_alias) 4934 return false; 4935 4936 old_decl = node->decl; 4937 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); 4938 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl), 4939 TREE_CODE (old_decl), tm_name, 4940 TREE_TYPE (old_decl)); 4941 4942 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); 4943 SET_DECL_RTL (new_decl, NULL); 4944 4945 /* Based loosely on C++'s make_alias_for(). */ 4946 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl); 4947 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl); 4948 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl); 4949 TREE_READONLY (new_decl) = TREE_READONLY (old_decl); 4950 DECL_EXTERNAL (new_decl) = 0; 4951 DECL_ARTIFICIAL (new_decl) = 1; 4952 TREE_ADDRESSABLE (new_decl) = 1; 4953 TREE_USED (new_decl) = 1; 4954 TREE_SYMBOL_REFERENCED (tm_name) = 1; 4955 4956 /* Perform the same remapping to the comdat group. */ 4957 if (DECL_ONE_ONLY (new_decl)) 4958 varpool_node::get (new_decl)->set_comdat_group 4959 (tm_mangle (decl_comdat_group_id (old_decl))); 4960 4961 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl); 4962 new_node->tm_clone = true; 4963 new_node->externally_visible = info->old_node->externally_visible; 4964 new_node->no_reorder = info->old_node->no_reorder; 4965 /* ?? Do not traverse aliases here. */ 4966 get_cg_data (&node, false)->clone = new_node; 4967 4968 record_tm_clone_pair (old_decl, new_decl); 4969 4970 if (info->old_node->force_output 4971 || info->old_node->ref_list.first_referring ()) 4972 ipa_tm_mark_force_output_node (new_node); 4973 if (info->old_node->forced_by_abi) 4974 ipa_tm_mark_forced_by_abi_node (new_node); 4975 return false; 4976 } 4977 4978 /* Create a copy of the function (possibly declaration only) of OLD_NODE, 4979 appropriate for the transactional clone. */ 4980 4981 static void 4982 ipa_tm_create_version (struct cgraph_node *old_node) 4983 { 4984 tree new_decl, old_decl, tm_name; 4985 struct cgraph_node *new_node; 4986 4987 old_decl = old_node->decl; 4988 new_decl = copy_node (old_decl); 4989 4990 /* DECL_ASSEMBLER_NAME needs to be set before we call 4991 cgraph_copy_node_for_versioning below, because cgraph_node will 4992 fill the assembler_name_hash. */ 4993 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); 4994 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); 4995 SET_DECL_RTL (new_decl, NULL); 4996 TREE_SYMBOL_REFERENCED (tm_name) = 1; 4997 4998 /* Perform the same remapping to the comdat group. */ 4999 if (DECL_ONE_ONLY (new_decl)) 5000 varpool_node::get (new_decl)->set_comdat_group 5001 (tm_mangle (DECL_COMDAT_GROUP (old_decl))); 5002 5003 gcc_assert (!old_node->ipa_transforms_to_apply.exists ()); 5004 new_node = old_node->create_version_clone (new_decl, vNULL, NULL); 5005 new_node->local.local = false; 5006 new_node->externally_visible = old_node->externally_visible; 5007 new_node->lowered = true; 5008 new_node->tm_clone = 1; 5009 if (!old_node->implicit_section) 5010 new_node->set_section (old_node->get_section ()); 5011 get_cg_data (&old_node, true)->clone = new_node; 5012 5013 if (old_node->get_availability () >= AVAIL_INTERPOSABLE) 5014 { 5015 /* Remap extern inline to static inline. */ 5016 /* ??? Is it worth trying to use make_decl_one_only? */ 5017 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl)) 5018 { 5019 DECL_EXTERNAL (new_decl) = 0; 5020 TREE_PUBLIC (new_decl) = 0; 5021 DECL_WEAK (new_decl) = 0; 5022 } 5023 5024 tree_function_versioning (old_decl, new_decl, 5025 NULL, false, NULL, 5026 false, NULL, NULL); 5027 } 5028 5029 record_tm_clone_pair (old_decl, new_decl); 5030 5031 symtab->call_cgraph_insertion_hooks (new_node); 5032 if (old_node->force_output 5033 || old_node->ref_list.first_referring ()) 5034 ipa_tm_mark_force_output_node (new_node); 5035 if (old_node->forced_by_abi) 5036 ipa_tm_mark_forced_by_abi_node (new_node); 5037 5038 /* Do the same thing, but for any aliases of the original node. */ 5039 { 5040 struct create_version_alias_info data; 5041 data.old_node = old_node; 5042 data.new_decl = new_decl; 5043 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias, 5044 &data, true); 5045 } 5046 } 5047 5048 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */ 5049 5050 static void 5051 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region, 5052 basic_block bb) 5053 { 5054 gimple_stmt_iterator gsi; 5055 gcall *g; 5056 5057 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); 5058 5059 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE), 5060 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE)); 5061 5062 split_block_after_labels (bb); 5063 gsi = gsi_after_labels (bb); 5064 gsi_insert_before (&gsi, g, GSI_SAME_STMT); 5065 5066 node->create_edge (cgraph_node::get_create 5067 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)), 5068 g, gimple_bb (g)->count); 5069 } 5070 5071 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */ 5072 5073 static bool 5074 ipa_tm_insert_gettmclone_call (struct cgraph_node *node, 5075 struct tm_region *region, 5076 gimple_stmt_iterator *gsi, gcall *stmt) 5077 { 5078 tree gettm_fn, ret, old_fn, callfn; 5079 gcall *g; 5080 gassign *g2; 5081 bool safe; 5082 5083 old_fn = gimple_call_fn (stmt); 5084 5085 if (TREE_CODE (old_fn) == ADDR_EXPR) 5086 { 5087 tree fndecl = TREE_OPERAND (old_fn, 0); 5088 tree clone = get_tm_clone_pair (fndecl); 5089 5090 /* By transforming the call into a TM_GETTMCLONE, we are 5091 technically taking the address of the original function and 5092 its clone. Explain this so inlining will know this function 5093 is needed. */ 5094 cgraph_node::get (fndecl)->mark_address_taken () ; 5095 if (clone) 5096 cgraph_node::get (clone)->mark_address_taken (); 5097 } 5098 5099 safe = is_tm_safe (TREE_TYPE (old_fn)); 5100 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE 5101 : BUILT_IN_TM_GETTMCLONE_IRR); 5102 ret = create_tmp_var (ptr_type_node); 5103 5104 if (!safe) 5105 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); 5106 5107 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */ 5108 if (TREE_CODE (old_fn) == OBJ_TYPE_REF) 5109 old_fn = OBJ_TYPE_REF_EXPR (old_fn); 5110 5111 g = gimple_build_call (gettm_fn, 1, old_fn); 5112 ret = make_ssa_name (ret, g); 5113 gimple_call_set_lhs (g, ret); 5114 5115 gsi_insert_before (gsi, g, GSI_SAME_STMT); 5116 5117 node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count); 5118 5119 /* Cast return value from tm_gettmclone* into appropriate function 5120 pointer. */ 5121 callfn = create_tmp_var (TREE_TYPE (old_fn)); 5122 g2 = gimple_build_assign (callfn, 5123 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret)); 5124 callfn = make_ssa_name (callfn, g2); 5125 gimple_assign_set_lhs (g2, callfn); 5126 gsi_insert_before (gsi, g2, GSI_SAME_STMT); 5127 5128 /* ??? This is a hack to preserve the NOTHROW bit on the call, 5129 which we would have derived from the decl. Failure to save 5130 this bit means we might have to split the basic block. */ 5131 if (gimple_call_nothrow_p (stmt)) 5132 gimple_call_set_nothrow (stmt, true); 5133 5134 gimple_call_set_fn (stmt, callfn); 5135 5136 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS 5137 for a call statement. Fix it. */ 5138 { 5139 tree lhs = gimple_call_lhs (stmt); 5140 tree rettype = TREE_TYPE (gimple_call_fntype (stmt)); 5141 if (lhs 5142 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype)) 5143 { 5144 tree temp; 5145 5146 temp = create_tmp_reg (rettype); 5147 gimple_call_set_lhs (stmt, temp); 5148 5149 g2 = gimple_build_assign (lhs, 5150 fold_build1 (VIEW_CONVERT_EXPR, 5151 TREE_TYPE (lhs), temp)); 5152 gsi_insert_after (gsi, g2, GSI_SAME_STMT); 5153 } 5154 } 5155 5156 update_stmt (stmt); 5157 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt); 5158 if (e && e->indirect_info) 5159 e->indirect_info->polymorphic = false; 5160 5161 return true; 5162 } 5163 5164 /* Helper function for ipa_tm_transform_calls*. Given a call 5165 statement in GSI which resides inside transaction REGION, redirect 5166 the call to either its wrapper function, or its clone. */ 5167 5168 static void 5169 ipa_tm_transform_calls_redirect (struct cgraph_node *node, 5170 struct tm_region *region, 5171 gimple_stmt_iterator *gsi, 5172 bool *need_ssa_rename_p) 5173 { 5174 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); 5175 struct cgraph_node *new_node; 5176 struct cgraph_edge *e = node->get_edge (stmt); 5177 tree fndecl = gimple_call_fndecl (stmt); 5178 5179 /* For indirect calls, pass the address through the runtime. */ 5180 if (fndecl == NULL) 5181 { 5182 *need_ssa_rename_p |= 5183 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); 5184 return; 5185 } 5186 5187 /* Handle some TM builtins. Ordinarily these aren't actually generated 5188 at this point, but handling these functions when written in by the 5189 user makes it easier to build unit tests. */ 5190 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN) 5191 return; 5192 5193 /* Fixup recursive calls inside clones. */ 5194 /* ??? Why did cgraph_copy_node_for_versioning update the call edges 5195 for recursion but not update the call statements themselves? */ 5196 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl)) 5197 { 5198 gimple_call_set_fndecl (stmt, current_function_decl); 5199 return; 5200 } 5201 5202 /* If there is a replacement, use it. */ 5203 fndecl = find_tm_replacement_function (fndecl); 5204 if (fndecl) 5205 { 5206 new_node = cgraph_node::get_create (fndecl); 5207 5208 /* ??? Mark all transaction_wrap functions tm_may_enter_irr. 5209 5210 We can't do this earlier in record_tm_replacement because 5211 cgraph_remove_unreachable_nodes is called before we inject 5212 references to the node. Further, we can't do this in some 5213 nice central place in ipa_tm_execute because we don't have 5214 the exact list of wrapper functions that would be used. 5215 Marking more wrappers than necessary results in the creation 5216 of unnecessary cgraph_nodes, which can cause some of the 5217 other IPA passes to crash. 5218 5219 We do need to mark these nodes so that we get the proper 5220 result in expand_call_tm. */ 5221 /* ??? This seems broken. How is it that we're marking the 5222 CALLEE as may_enter_irr? Surely we should be marking the 5223 CALLER. Also note that find_tm_replacement_function also 5224 contains mappings into the TM runtime, e.g. memcpy. These 5225 we know won't go irrevocable. */ 5226 new_node->local.tm_may_enter_irr = 1; 5227 } 5228 else 5229 { 5230 struct tm_ipa_cg_data *d; 5231 struct cgraph_node *tnode = e->callee; 5232 5233 d = get_cg_data (&tnode, true); 5234 new_node = d->clone; 5235 5236 /* As we've already skipped pure calls and appropriate builtins, 5237 and we've already marked irrevocable blocks, if we can't come 5238 up with a static replacement, then ask the runtime. */ 5239 if (new_node == NULL) 5240 { 5241 *need_ssa_rename_p |= 5242 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); 5243 return; 5244 } 5245 5246 fndecl = new_node->decl; 5247 } 5248 5249 e->redirect_callee (new_node); 5250 gimple_call_set_fndecl (stmt, fndecl); 5251 } 5252 5253 /* Helper function for ipa_tm_transform_calls. For a given BB, 5254 install calls to tm_irrevocable when IRR_BLOCKS are reached, 5255 redirect other calls to the generated transactional clone. */ 5256 5257 static bool 5258 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region, 5259 basic_block bb, bitmap irr_blocks) 5260 { 5261 gimple_stmt_iterator gsi; 5262 bool need_ssa_rename = false; 5263 5264 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) 5265 { 5266 ipa_tm_insert_irr_call (node, region, bb); 5267 return true; 5268 } 5269 5270 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 5271 { 5272 gimple *stmt = gsi_stmt (gsi); 5273 5274 if (!is_gimple_call (stmt)) 5275 continue; 5276 if (is_tm_pure_call (stmt)) 5277 continue; 5278 5279 /* Redirect edges to the appropriate replacement or clone. */ 5280 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename); 5281 } 5282 5283 return need_ssa_rename; 5284 } 5285 5286 /* Walk the CFG for REGION, beginning at BB. Install calls to 5287 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to 5288 the generated transactional clone. */ 5289 5290 static bool 5291 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region, 5292 basic_block bb, bitmap irr_blocks) 5293 { 5294 bool need_ssa_rename = false; 5295 edge e; 5296 edge_iterator ei; 5297 auto_vec<basic_block> queue; 5298 bitmap visited_blocks = BITMAP_ALLOC (NULL); 5299 5300 queue.safe_push (bb); 5301 do 5302 { 5303 bb = queue.pop (); 5304 5305 need_ssa_rename |= 5306 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks); 5307 5308 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) 5309 continue; 5310 5311 if (region && bitmap_bit_p (region->exit_blocks, bb->index)) 5312 continue; 5313 5314 FOR_EACH_EDGE (e, ei, bb->succs) 5315 if (!bitmap_bit_p (visited_blocks, e->dest->index)) 5316 { 5317 bitmap_set_bit (visited_blocks, e->dest->index); 5318 queue.safe_push (e->dest); 5319 } 5320 } 5321 while (!queue.is_empty ()); 5322 5323 BITMAP_FREE (visited_blocks); 5324 5325 return need_ssa_rename; 5326 } 5327 5328 /* Transform the calls within the TM regions within NODE. */ 5329 5330 static void 5331 ipa_tm_transform_transaction (struct cgraph_node *node) 5332 { 5333 struct tm_ipa_cg_data *d; 5334 struct tm_region *region; 5335 bool need_ssa_rename = false; 5336 5337 d = get_cg_data (&node, true); 5338 5339 push_cfun (DECL_STRUCT_FUNCTION (node->decl)); 5340 calculate_dominance_info (CDI_DOMINATORS); 5341 5342 for (region = d->all_tm_regions; region; region = region->next) 5343 { 5344 /* If we're sure to go irrevocable, don't transform anything. */ 5345 if (d->irrevocable_blocks_normal 5346 && bitmap_bit_p (d->irrevocable_blocks_normal, 5347 region->entry_block->index)) 5348 { 5349 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE 5350 | GTMA_MAY_ENTER_IRREVOCABLE 5351 | GTMA_HAS_NO_INSTRUMENTATION); 5352 continue; 5353 } 5354 5355 need_ssa_rename |= 5356 ipa_tm_transform_calls (node, region, region->entry_block, 5357 d->irrevocable_blocks_normal); 5358 } 5359 5360 if (need_ssa_rename) 5361 update_ssa (TODO_update_ssa_only_virtuals); 5362 5363 pop_cfun (); 5364 } 5365 5366 /* Transform the calls within the transactional clone of NODE. */ 5367 5368 static void 5369 ipa_tm_transform_clone (struct cgraph_node *node) 5370 { 5371 struct tm_ipa_cg_data *d; 5372 bool need_ssa_rename; 5373 5374 d = get_cg_data (&node, true); 5375 5376 /* If this function makes no calls and has no irrevocable blocks, 5377 then there's nothing to do. */ 5378 /* ??? Remove non-aborting top-level transactions. */ 5379 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone) 5380 return; 5381 5382 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl)); 5383 calculate_dominance_info (CDI_DOMINATORS); 5384 5385 need_ssa_rename = 5386 ipa_tm_transform_calls (d->clone, NULL, 5387 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 5388 d->irrevocable_blocks_clone); 5389 5390 if (need_ssa_rename) 5391 update_ssa (TODO_update_ssa_only_virtuals); 5392 5393 pop_cfun (); 5394 } 5395 5396 /* Main entry point for the transactional memory IPA pass. */ 5397 5398 static unsigned int 5399 ipa_tm_execute (void) 5400 { 5401 cgraph_node_queue tm_callees = cgraph_node_queue (); 5402 /* List of functions that will go irrevocable. */ 5403 cgraph_node_queue irr_worklist = cgraph_node_queue (); 5404 5405 struct cgraph_node *node; 5406 struct tm_ipa_cg_data *d; 5407 enum availability a; 5408 unsigned int i; 5409 5410 cgraph_node::checking_verify_cgraph_nodes (); 5411 5412 bitmap_obstack_initialize (&tm_obstack); 5413 initialize_original_copy_tables (); 5414 5415 /* For all local functions marked tm_callable, queue them. */ 5416 FOR_EACH_DEFINED_FUNCTION (node) 5417 if (is_tm_callable (node->decl) 5418 && node->get_availability () >= AVAIL_INTERPOSABLE) 5419 { 5420 d = get_cg_data (&node, true); 5421 maybe_push_queue (node, &tm_callees, &d->in_callee_queue); 5422 } 5423 5424 /* For all local reachable functions... */ 5425 FOR_EACH_DEFINED_FUNCTION (node) 5426 if (node->lowered 5427 && node->get_availability () >= AVAIL_INTERPOSABLE) 5428 { 5429 /* ... marked tm_pure, record that fact for the runtime by 5430 indicating that the pure function is its own tm_callable. 5431 No need to do this if the function's address can't be taken. */ 5432 if (is_tm_pure (node->decl)) 5433 { 5434 if (!node->local.local) 5435 record_tm_clone_pair (node->decl, node->decl); 5436 continue; 5437 } 5438 5439 push_cfun (DECL_STRUCT_FUNCTION (node->decl)); 5440 calculate_dominance_info (CDI_DOMINATORS); 5441 5442 tm_region_init (NULL); 5443 if (all_tm_regions) 5444 { 5445 d = get_cg_data (&node, true); 5446 5447 /* Scan for calls that are in each transaction, and 5448 generate the uninstrumented code path. */ 5449 ipa_tm_scan_calls_transaction (d, &tm_callees); 5450 5451 /* Put it in the worklist so we can scan the function 5452 later (ipa_tm_scan_irr_function) and mark the 5453 irrevocable blocks. */ 5454 maybe_push_queue (node, &irr_worklist, &d->in_worklist); 5455 d->want_irr_scan_normal = true; 5456 } 5457 5458 pop_cfun (); 5459 } 5460 5461 /* For every local function on the callee list, scan as if we will be 5462 creating a transactional clone, queueing all new functions we find 5463 along the way. */ 5464 for (i = 0; i < tm_callees.length (); ++i) 5465 { 5466 node = tm_callees[i]; 5467 a = node->get_availability (); 5468 d = get_cg_data (&node, true); 5469 5470 /* Put it in the worklist so we can scan the function later 5471 (ipa_tm_scan_irr_function) and mark the irrevocable 5472 blocks. */ 5473 maybe_push_queue (node, &irr_worklist, &d->in_worklist); 5474 5475 /* Some callees cannot be arbitrarily cloned. These will always be 5476 irrevocable. Mark these now, so that we need not scan them. */ 5477 if (is_tm_irrevocable (node->decl)) 5478 ipa_tm_note_irrevocable (node, &irr_worklist); 5479 else if (a <= AVAIL_NOT_AVAILABLE 5480 && !is_tm_safe_or_pure (node->decl)) 5481 ipa_tm_note_irrevocable (node, &irr_worklist); 5482 else if (a >= AVAIL_INTERPOSABLE) 5483 { 5484 if (!tree_versionable_function_p (node->decl)) 5485 ipa_tm_note_irrevocable (node, &irr_worklist); 5486 else if (!d->is_irrevocable) 5487 { 5488 /* If this is an alias, make sure its base is queued as well. 5489 we need not scan the callees now, as the base will do. */ 5490 if (node->alias) 5491 { 5492 node = cgraph_node::get (node->thunk.alias); 5493 d = get_cg_data (&node, true); 5494 maybe_push_queue (node, &tm_callees, &d->in_callee_queue); 5495 continue; 5496 } 5497 5498 /* Add all nodes called by this function into 5499 tm_callees as well. */ 5500 ipa_tm_scan_calls_clone (node, &tm_callees); 5501 } 5502 } 5503 } 5504 5505 /* Iterate scans until no more work to be done. Prefer not to use 5506 vec::pop because the worklist tends to follow a breadth-first 5507 search of the callgraph, which should allow convergance with a 5508 minimum number of scans. But we also don't want the worklist 5509 array to grow without bound, so we shift the array up periodically. */ 5510 for (i = 0; i < irr_worklist.length (); ++i) 5511 { 5512 if (i > 256 && i == irr_worklist.length () / 8) 5513 { 5514 irr_worklist.block_remove (0, i); 5515 i = 0; 5516 } 5517 5518 node = irr_worklist[i]; 5519 d = get_cg_data (&node, true); 5520 d->in_worklist = false; 5521 5522 if (d->want_irr_scan_normal) 5523 { 5524 d->want_irr_scan_normal = false; 5525 ipa_tm_scan_irr_function (node, false); 5526 } 5527 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true)) 5528 ipa_tm_note_irrevocable (node, &irr_worklist); 5529 } 5530 5531 /* For every function on the callee list, collect the tm_may_enter_irr 5532 bit on the node. */ 5533 irr_worklist.truncate (0); 5534 for (i = 0; i < tm_callees.length (); ++i) 5535 { 5536 node = tm_callees[i]; 5537 if (ipa_tm_mayenterirr_function (node)) 5538 { 5539 d = get_cg_data (&node, true); 5540 gcc_assert (d->in_worklist == false); 5541 maybe_push_queue (node, &irr_worklist, &d->in_worklist); 5542 } 5543 } 5544 5545 /* Propagate the tm_may_enter_irr bit to callers until stable. */ 5546 for (i = 0; i < irr_worklist.length (); ++i) 5547 { 5548 struct cgraph_node *caller; 5549 struct cgraph_edge *e; 5550 struct ipa_ref *ref; 5551 5552 if (i > 256 && i == irr_worklist.length () / 8) 5553 { 5554 irr_worklist.block_remove (0, i); 5555 i = 0; 5556 } 5557 5558 node = irr_worklist[i]; 5559 d = get_cg_data (&node, true); 5560 d->in_worklist = false; 5561 node->local.tm_may_enter_irr = true; 5562 5563 /* Propagate back to normal callers. */ 5564 for (e = node->callers; e ; e = e->next_caller) 5565 { 5566 caller = e->caller; 5567 if (!is_tm_safe_or_pure (caller->decl) 5568 && !caller->local.tm_may_enter_irr) 5569 { 5570 d = get_cg_data (&caller, true); 5571 maybe_push_queue (caller, &irr_worklist, &d->in_worklist); 5572 } 5573 } 5574 5575 /* Propagate back to referring aliases as well. */ 5576 FOR_EACH_ALIAS (node, ref) 5577 { 5578 caller = dyn_cast<cgraph_node *> (ref->referring); 5579 if (!caller->local.tm_may_enter_irr) 5580 { 5581 /* ?? Do not traverse aliases here. */ 5582 d = get_cg_data (&caller, false); 5583 maybe_push_queue (caller, &irr_worklist, &d->in_worklist); 5584 } 5585 } 5586 } 5587 5588 /* Now validate all tm_safe functions, and all atomic regions in 5589 other functions. */ 5590 FOR_EACH_DEFINED_FUNCTION (node) 5591 if (node->lowered 5592 && node->get_availability () >= AVAIL_INTERPOSABLE) 5593 { 5594 d = get_cg_data (&node, true); 5595 if (is_tm_safe (node->decl)) 5596 ipa_tm_diagnose_tm_safe (node); 5597 else if (d->all_tm_regions) 5598 ipa_tm_diagnose_transaction (node, d->all_tm_regions); 5599 } 5600 5601 /* Create clones. Do those that are not irrevocable and have a 5602 positive call count. Do those publicly visible functions that 5603 the user directed us to clone. */ 5604 for (i = 0; i < tm_callees.length (); ++i) 5605 { 5606 bool doit = false; 5607 5608 node = tm_callees[i]; 5609 if (node->cpp_implicit_alias) 5610 continue; 5611 5612 a = node->get_availability (); 5613 d = get_cg_data (&node, true); 5614 5615 if (a <= AVAIL_NOT_AVAILABLE) 5616 doit = is_tm_callable (node->decl); 5617 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl)) 5618 doit = true; 5619 else if (!d->is_irrevocable 5620 && d->tm_callers_normal + d->tm_callers_clone > 0) 5621 doit = true; 5622 5623 if (doit) 5624 ipa_tm_create_version (node); 5625 } 5626 5627 /* Redirect calls to the new clones, and insert irrevocable marks. */ 5628 for (i = 0; i < tm_callees.length (); ++i) 5629 { 5630 node = tm_callees[i]; 5631 if (node->analyzed) 5632 { 5633 d = get_cg_data (&node, true); 5634 if (d->clone) 5635 ipa_tm_transform_clone (node); 5636 } 5637 } 5638 FOR_EACH_DEFINED_FUNCTION (node) 5639 if (node->lowered 5640 && node->get_availability () >= AVAIL_INTERPOSABLE) 5641 { 5642 d = get_cg_data (&node, true); 5643 if (d->all_tm_regions) 5644 ipa_tm_transform_transaction (node); 5645 } 5646 5647 /* Free and clear all data structures. */ 5648 tm_callees.release (); 5649 irr_worklist.release (); 5650 bitmap_obstack_release (&tm_obstack); 5651 free_original_copy_tables (); 5652 5653 FOR_EACH_FUNCTION (node) 5654 node->aux = NULL; 5655 5656 cgraph_node::checking_verify_cgraph_nodes (); 5657 5658 return 0; 5659 } 5660 5661 namespace { 5662 5663 const pass_data pass_data_ipa_tm = 5664 { 5665 SIMPLE_IPA_PASS, /* type */ 5666 "tmipa", /* name */ 5667 OPTGROUP_NONE, /* optinfo_flags */ 5668 TV_TRANS_MEM, /* tv_id */ 5669 ( PROP_ssa | PROP_cfg ), /* properties_required */ 5670 0, /* properties_provided */ 5671 0, /* properties_destroyed */ 5672 0, /* todo_flags_start */ 5673 0, /* todo_flags_finish */ 5674 }; 5675 5676 class pass_ipa_tm : public simple_ipa_opt_pass 5677 { 5678 public: 5679 pass_ipa_tm (gcc::context *ctxt) 5680 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt) 5681 {} 5682 5683 /* opt_pass methods: */ 5684 virtual bool gate (function *) { return flag_tm; } 5685 virtual unsigned int execute (function *) { return ipa_tm_execute (); } 5686 5687 }; // class pass_ipa_tm 5688 5689 } // anon namespace 5690 5691 simple_ipa_opt_pass * 5692 make_pass_ipa_tm (gcc::context *ctxt) 5693 { 5694 return new pass_ipa_tm (ctxt); 5695 } 5696 5697 #include "gt-trans-mem.h" 5698