1 /* Exception handling semantics and decomposition for trees. 2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 3 Free Software Foundation, Inc. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 GCC is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "tm.h" 25 #include "tree.h" 26 #include "flags.h" 27 #include "function.h" 28 #include "except.h" 29 #include "pointer-set.h" 30 #include "tree-flow.h" 31 #include "tree-dump.h" 32 #include "tree-inline.h" 33 #include "tree-iterator.h" 34 #include "tree-pass.h" 35 #include "timevar.h" 36 #include "langhooks.h" 37 #include "ggc.h" 38 #include "diagnostic-core.h" 39 #include "gimple.h" 40 #include "target.h" 41 42 /* In some instances a tree and a gimple need to be stored in a same table, 43 i.e. in hash tables. This is a structure to do this. */ 44 typedef union {tree *tp; tree t; gimple g;} treemple; 45 46 /* Nonzero if we are using EH to handle cleanups. */ 47 static int using_eh_for_cleanups_p = 0; 48 49 void 50 using_eh_for_cleanups (void) 51 { 52 using_eh_for_cleanups_p = 1; 53 } 54 55 /* Misc functions used in this file. */ 56 57 /* Remember and lookup EH landing pad data for arbitrary statements. 58 Really this means any statement that could_throw_p. We could 59 stuff this information into the stmt_ann data structure, but: 60 61 (1) We absolutely rely on this information being kept until 62 we get to rtl. Once we're done with lowering here, if we lose 63 the information there's no way to recover it! 64 65 (2) There are many more statements that *cannot* throw as 66 compared to those that can. We should be saving some amount 67 of space by only allocating memory for those that can throw. */ 68 69 /* Add statement T in function IFUN to landing pad NUM. */ 70 71 void 72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num) 73 { 74 struct throw_stmt_node *n; 75 void **slot; 76 77 gcc_assert (num != 0); 78 79 n = ggc_alloc_throw_stmt_node (); 80 n->stmt = t; 81 n->lp_nr = num; 82 83 if (!get_eh_throw_stmt_table (ifun)) 84 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash, 85 struct_ptr_eq, 86 ggc_free)); 87 88 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT); 89 gcc_assert (!*slot); 90 *slot = n; 91 } 92 93 /* Add statement T in the current function (cfun) to EH landing pad NUM. */ 94 95 void 96 add_stmt_to_eh_lp (gimple t, int num) 97 { 98 add_stmt_to_eh_lp_fn (cfun, t, num); 99 } 100 101 /* Add statement T to the single EH landing pad in REGION. */ 102 103 static void 104 record_stmt_eh_region (eh_region region, gimple t) 105 { 106 if (region == NULL) 107 return; 108 if (region->type == ERT_MUST_NOT_THROW) 109 add_stmt_to_eh_lp_fn (cfun, t, -region->index); 110 else 111 { 112 eh_landing_pad lp = region->landing_pads; 113 if (lp == NULL) 114 lp = gen_eh_landing_pad (region); 115 else 116 gcc_assert (lp->next_lp == NULL); 117 add_stmt_to_eh_lp_fn (cfun, t, lp->index); 118 } 119 } 120 121 122 /* Remove statement T in function IFUN from its EH landing pad. */ 123 124 bool 125 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t) 126 { 127 struct throw_stmt_node dummy; 128 void **slot; 129 130 if (!get_eh_throw_stmt_table (ifun)) 131 return false; 132 133 dummy.stmt = t; 134 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy, 135 NO_INSERT); 136 if (slot) 137 { 138 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot); 139 return true; 140 } 141 else 142 return false; 143 } 144 145 146 /* Remove statement T in the current function (cfun) from its 147 EH landing pad. */ 148 149 bool 150 remove_stmt_from_eh_lp (gimple t) 151 { 152 return remove_stmt_from_eh_lp_fn (cfun, t); 153 } 154 155 /* Determine if statement T is inside an EH region in function IFUN. 156 Positive numbers indicate a landing pad index; negative numbers 157 indicate a MUST_NOT_THROW region index; zero indicates that the 158 statement is not recorded in the region table. */ 159 160 int 161 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t) 162 { 163 struct throw_stmt_node *p, n; 164 165 if (ifun->eh->throw_stmt_table == NULL) 166 return 0; 167 168 n.stmt = t; 169 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n); 170 return p ? p->lp_nr : 0; 171 } 172 173 /* Likewise, but always use the current function. */ 174 175 int 176 lookup_stmt_eh_lp (gimple t) 177 { 178 /* We can get called from initialized data when -fnon-call-exceptions 179 is on; prevent crash. */ 180 if (!cfun) 181 return 0; 182 return lookup_stmt_eh_lp_fn (cfun, t); 183 } 184 185 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY 186 nodes and LABEL_DECL nodes. We will use this during the second phase to 187 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */ 188 189 struct finally_tree_node 190 { 191 /* When storing a GIMPLE_TRY, we have to record a gimple. However 192 when deciding whether a GOTO to a certain LABEL_DECL (which is a 193 tree) leaves the TRY block, its necessary to record a tree in 194 this field. Thus a treemple is used. */ 195 treemple child; 196 gimple parent; 197 }; 198 199 /* Note that this table is *not* marked GTY. It is short-lived. */ 200 static htab_t finally_tree; 201 202 static void 203 record_in_finally_tree (treemple child, gimple parent) 204 { 205 struct finally_tree_node *n; 206 void **slot; 207 208 n = XNEW (struct finally_tree_node); 209 n->child = child; 210 n->parent = parent; 211 212 slot = htab_find_slot (finally_tree, n, INSERT); 213 gcc_assert (!*slot); 214 *slot = n; 215 } 216 217 static void 218 collect_finally_tree (gimple stmt, gimple region); 219 220 /* Go through the gimple sequence. Works with collect_finally_tree to 221 record all GIMPLE_LABEL and GIMPLE_TRY statements. */ 222 223 static void 224 collect_finally_tree_1 (gimple_seq seq, gimple region) 225 { 226 gimple_stmt_iterator gsi; 227 228 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi)) 229 collect_finally_tree (gsi_stmt (gsi), region); 230 } 231 232 static void 233 collect_finally_tree (gimple stmt, gimple region) 234 { 235 treemple temp; 236 237 switch (gimple_code (stmt)) 238 { 239 case GIMPLE_LABEL: 240 temp.t = gimple_label_label (stmt); 241 record_in_finally_tree (temp, region); 242 break; 243 244 case GIMPLE_TRY: 245 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY) 246 { 247 temp.g = stmt; 248 record_in_finally_tree (temp, region); 249 collect_finally_tree_1 (gimple_try_eval (stmt), stmt); 250 collect_finally_tree_1 (gimple_try_cleanup (stmt), region); 251 } 252 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH) 253 { 254 collect_finally_tree_1 (gimple_try_eval (stmt), region); 255 collect_finally_tree_1 (gimple_try_cleanup (stmt), region); 256 } 257 break; 258 259 case GIMPLE_CATCH: 260 collect_finally_tree_1 (gimple_catch_handler (stmt), region); 261 break; 262 263 case GIMPLE_EH_FILTER: 264 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region); 265 break; 266 267 case GIMPLE_EH_ELSE: 268 collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region); 269 collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region); 270 break; 271 272 default: 273 /* A type, a decl, or some kind of statement that we're not 274 interested in. Don't walk them. */ 275 break; 276 } 277 } 278 279 280 /* Use the finally tree to determine if a jump from START to TARGET 281 would leave the try_finally node that START lives in. */ 282 283 static bool 284 outside_finally_tree (treemple start, gimple target) 285 { 286 struct finally_tree_node n, *p; 287 288 do 289 { 290 n.child = start; 291 p = (struct finally_tree_node *) htab_find (finally_tree, &n); 292 if (!p) 293 return true; 294 start.g = p->parent; 295 } 296 while (start.g != target); 297 298 return false; 299 } 300 301 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY 302 nodes into a set of gotos, magic labels, and eh regions. 303 The eh region creation is straight-forward, but frobbing all the gotos 304 and such into shape isn't. */ 305 306 /* The sequence into which we record all EH stuff. This will be 307 placed at the end of the function when we're all done. */ 308 static gimple_seq eh_seq; 309 310 /* Record whether an EH region contains something that can throw, 311 indexed by EH region number. */ 312 static bitmap eh_region_may_contain_throw_map; 313 314 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN 315 statements that are seen to escape this GIMPLE_TRY_FINALLY node. 316 The idea is to record a gimple statement for everything except for 317 the conditionals, which get their labels recorded. Since labels are 318 of type 'tree', we need this node to store both gimple and tree 319 objects. REPL_STMT is the sequence used to replace the goto/return 320 statement. CONT_STMT is used to store the statement that allows 321 the return/goto to jump to the original destination. */ 322 323 struct goto_queue_node 324 { 325 treemple stmt; 326 gimple_seq repl_stmt; 327 gimple cont_stmt; 328 int index; 329 /* This is used when index >= 0 to indicate that stmt is a label (as 330 opposed to a goto stmt). */ 331 int is_label; 332 }; 333 334 /* State of the world while lowering. */ 335 336 struct leh_state 337 { 338 /* What's "current" while constructing the eh region tree. These 339 correspond to variables of the same name in cfun->eh, which we 340 don't have easy access to. */ 341 eh_region cur_region; 342 343 /* What's "current" for the purposes of __builtin_eh_pointer. For 344 a CATCH, this is the associated TRY. For an EH_FILTER, this is 345 the associated ALLOWED_EXCEPTIONS, etc. */ 346 eh_region ehp_region; 347 348 /* Processing of TRY_FINALLY requires a bit more state. This is 349 split out into a separate structure so that we don't have to 350 copy so much when processing other nodes. */ 351 struct leh_tf_state *tf; 352 }; 353 354 struct leh_tf_state 355 { 356 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The 357 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain 358 this so that outside_finally_tree can reliably reference the tree used 359 in the collect_finally_tree data structures. */ 360 gimple try_finally_expr; 361 gimple top_p; 362 363 /* While lowering a top_p usually it is expanded into multiple statements, 364 thus we need the following field to store them. */ 365 gimple_seq top_p_seq; 366 367 /* The state outside this try_finally node. */ 368 struct leh_state *outer; 369 370 /* The exception region created for it. */ 371 eh_region region; 372 373 /* The goto queue. */ 374 struct goto_queue_node *goto_queue; 375 size_t goto_queue_size; 376 size_t goto_queue_active; 377 378 /* Pointer map to help in searching goto_queue when it is large. */ 379 struct pointer_map_t *goto_queue_map; 380 381 /* The set of unique labels seen as entries in the goto queue. */ 382 VEC(tree,heap) *dest_array; 383 384 /* A label to be added at the end of the completed transformed 385 sequence. It will be set if may_fallthru was true *at one time*, 386 though subsequent transformations may have cleared that flag. */ 387 tree fallthru_label; 388 389 /* True if it is possible to fall out the bottom of the try block. 390 Cleared if the fallthru is converted to a goto. */ 391 bool may_fallthru; 392 393 /* True if any entry in goto_queue is a GIMPLE_RETURN. */ 394 bool may_return; 395 396 /* True if the finally block can receive an exception edge. 397 Cleared if the exception case is handled by code duplication. */ 398 bool may_throw; 399 }; 400 401 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple); 402 403 /* Search for STMT in the goto queue. Return the replacement, 404 or null if the statement isn't in the queue. */ 405 406 #define LARGE_GOTO_QUEUE 20 407 408 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq); 409 410 static gimple_seq 411 find_goto_replacement (struct leh_tf_state *tf, treemple stmt) 412 { 413 unsigned int i; 414 void **slot; 415 416 if (tf->goto_queue_active < LARGE_GOTO_QUEUE) 417 { 418 for (i = 0; i < tf->goto_queue_active; i++) 419 if ( tf->goto_queue[i].stmt.g == stmt.g) 420 return tf->goto_queue[i].repl_stmt; 421 return NULL; 422 } 423 424 /* If we have a large number of entries in the goto_queue, create a 425 pointer map and use that for searching. */ 426 427 if (!tf->goto_queue_map) 428 { 429 tf->goto_queue_map = pointer_map_create (); 430 for (i = 0; i < tf->goto_queue_active; i++) 431 { 432 slot = pointer_map_insert (tf->goto_queue_map, 433 tf->goto_queue[i].stmt.g); 434 gcc_assert (*slot == NULL); 435 *slot = &tf->goto_queue[i]; 436 } 437 } 438 439 slot = pointer_map_contains (tf->goto_queue_map, stmt.g); 440 if (slot != NULL) 441 return (((struct goto_queue_node *) *slot)->repl_stmt); 442 443 return NULL; 444 } 445 446 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a 447 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto, 448 then we can just splat it in, otherwise we add the new stmts immediately 449 after the GIMPLE_COND and redirect. */ 450 451 static void 452 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf, 453 gimple_stmt_iterator *gsi) 454 { 455 tree label; 456 gimple_seq new_seq; 457 treemple temp; 458 location_t loc = gimple_location (gsi_stmt (*gsi)); 459 460 temp.tp = tp; 461 new_seq = find_goto_replacement (tf, temp); 462 if (!new_seq) 463 return; 464 465 if (gimple_seq_singleton_p (new_seq) 466 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO) 467 { 468 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq)); 469 return; 470 } 471 472 label = create_artificial_label (loc); 473 /* Set the new label for the GIMPLE_COND */ 474 *tp = label; 475 476 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); 477 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING); 478 } 479 480 /* The real work of replace_goto_queue. Returns with TSI updated to 481 point to the next statement. */ 482 483 static void replace_goto_queue_stmt_list (gimple_seq, struct leh_tf_state *); 484 485 static void 486 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf, 487 gimple_stmt_iterator *gsi) 488 { 489 gimple_seq seq; 490 treemple temp; 491 temp.g = NULL; 492 493 switch (gimple_code (stmt)) 494 { 495 case GIMPLE_GOTO: 496 case GIMPLE_RETURN: 497 temp.g = stmt; 498 seq = find_goto_replacement (tf, temp); 499 if (seq) 500 { 501 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT); 502 gsi_remove (gsi, false); 503 return; 504 } 505 break; 506 507 case GIMPLE_COND: 508 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi); 509 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi); 510 break; 511 512 case GIMPLE_TRY: 513 replace_goto_queue_stmt_list (gimple_try_eval (stmt), tf); 514 replace_goto_queue_stmt_list (gimple_try_cleanup (stmt), tf); 515 break; 516 case GIMPLE_CATCH: 517 replace_goto_queue_stmt_list (gimple_catch_handler (stmt), tf); 518 break; 519 case GIMPLE_EH_FILTER: 520 replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf); 521 break; 522 case GIMPLE_EH_ELSE: 523 replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf); 524 replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf); 525 break; 526 527 default: 528 /* These won't have gotos in them. */ 529 break; 530 } 531 532 gsi_next (gsi); 533 } 534 535 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */ 536 537 static void 538 replace_goto_queue_stmt_list (gimple_seq seq, struct leh_tf_state *tf) 539 { 540 gimple_stmt_iterator gsi = gsi_start (seq); 541 542 while (!gsi_end_p (gsi)) 543 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi); 544 } 545 546 /* Replace all goto queue members. */ 547 548 static void 549 replace_goto_queue (struct leh_tf_state *tf) 550 { 551 if (tf->goto_queue_active == 0) 552 return; 553 replace_goto_queue_stmt_list (tf->top_p_seq, tf); 554 replace_goto_queue_stmt_list (eh_seq, tf); 555 } 556 557 /* Add a new record to the goto queue contained in TF. NEW_STMT is the 558 data to be added, IS_LABEL indicates whether NEW_STMT is a label or 559 a gimple return. */ 560 561 static void 562 record_in_goto_queue (struct leh_tf_state *tf, 563 treemple new_stmt, 564 int index, 565 bool is_label) 566 { 567 size_t active, size; 568 struct goto_queue_node *q; 569 570 gcc_assert (!tf->goto_queue_map); 571 572 active = tf->goto_queue_active; 573 size = tf->goto_queue_size; 574 if (active >= size) 575 { 576 size = (size ? size * 2 : 32); 577 tf->goto_queue_size = size; 578 tf->goto_queue 579 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size); 580 } 581 582 q = &tf->goto_queue[active]; 583 tf->goto_queue_active = active + 1; 584 585 memset (q, 0, sizeof (*q)); 586 q->stmt = new_stmt; 587 q->index = index; 588 q->is_label = is_label; 589 } 590 591 /* Record the LABEL label in the goto queue contained in TF. 592 TF is not null. */ 593 594 static void 595 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label) 596 { 597 int index; 598 treemple temp, new_stmt; 599 600 if (!label) 601 return; 602 603 /* Computed and non-local gotos do not get processed. Given 604 their nature we can neither tell whether we've escaped the 605 finally block nor redirect them if we knew. */ 606 if (TREE_CODE (label) != LABEL_DECL) 607 return; 608 609 /* No need to record gotos that don't leave the try block. */ 610 temp.t = label; 611 if (!outside_finally_tree (temp, tf->try_finally_expr)) 612 return; 613 614 if (! tf->dest_array) 615 { 616 tf->dest_array = VEC_alloc (tree, heap, 10); 617 VEC_quick_push (tree, tf->dest_array, label); 618 index = 0; 619 } 620 else 621 { 622 int n = VEC_length (tree, tf->dest_array); 623 for (index = 0; index < n; ++index) 624 if (VEC_index (tree, tf->dest_array, index) == label) 625 break; 626 if (index == n) 627 VEC_safe_push (tree, heap, tf->dest_array, label); 628 } 629 630 /* In the case of a GOTO we want to record the destination label, 631 since with a GIMPLE_COND we have an easy access to the then/else 632 labels. */ 633 new_stmt = stmt; 634 record_in_goto_queue (tf, new_stmt, index, true); 635 } 636 637 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally 638 node, and if so record that fact in the goto queue associated with that 639 try_finally node. */ 640 641 static void 642 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt) 643 { 644 struct leh_tf_state *tf = state->tf; 645 treemple new_stmt; 646 647 if (!tf) 648 return; 649 650 switch (gimple_code (stmt)) 651 { 652 case GIMPLE_COND: 653 new_stmt.tp = gimple_op_ptr (stmt, 2); 654 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt)); 655 new_stmt.tp = gimple_op_ptr (stmt, 3); 656 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt)); 657 break; 658 case GIMPLE_GOTO: 659 new_stmt.g = stmt; 660 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt)); 661 break; 662 663 case GIMPLE_RETURN: 664 tf->may_return = true; 665 new_stmt.g = stmt; 666 record_in_goto_queue (tf, new_stmt, -1, false); 667 break; 668 669 default: 670 gcc_unreachable (); 671 } 672 } 673 674 675 #ifdef ENABLE_CHECKING 676 /* We do not process GIMPLE_SWITCHes for now. As long as the original source 677 was in fact structured, and we've not yet done jump threading, then none 678 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */ 679 680 static void 681 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr) 682 { 683 struct leh_tf_state *tf = state->tf; 684 size_t i, n; 685 686 if (!tf) 687 return; 688 689 n = gimple_switch_num_labels (switch_expr); 690 691 for (i = 0; i < n; ++i) 692 { 693 treemple temp; 694 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i)); 695 temp.t = lab; 696 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr)); 697 } 698 } 699 #else 700 #define verify_norecord_switch_expr(state, switch_expr) 701 #endif 702 703 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is 704 non-null, insert it before the new branch. */ 705 706 static void 707 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod) 708 { 709 gimple x; 710 711 /* In the case of a return, the queue node must be a gimple statement. */ 712 gcc_assert (!q->is_label); 713 714 /* Note that the return value may have already been computed, e.g., 715 716 int x; 717 int foo (void) 718 { 719 x = 0; 720 try { 721 return x; 722 } finally { 723 x++; 724 } 725 } 726 727 should return 0, not 1. We don't have to do anything to make 728 this happens because the return value has been placed in the 729 RESULT_DECL already. */ 730 731 q->cont_stmt = q->stmt.g; 732 733 if (!q->repl_stmt) 734 q->repl_stmt = gimple_seq_alloc (); 735 736 if (mod) 737 gimple_seq_add_seq (&q->repl_stmt, mod); 738 739 x = gimple_build_goto (finlab); 740 gimple_seq_add_stmt (&q->repl_stmt, x); 741 } 742 743 /* Similar, but easier, for GIMPLE_GOTO. */ 744 745 static void 746 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod, 747 struct leh_tf_state *tf) 748 { 749 gimple x; 750 751 gcc_assert (q->is_label); 752 if (!q->repl_stmt) 753 q->repl_stmt = gimple_seq_alloc (); 754 755 q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index)); 756 757 if (mod) 758 gimple_seq_add_seq (&q->repl_stmt, mod); 759 760 x = gimple_build_goto (finlab); 761 gimple_seq_add_stmt (&q->repl_stmt, x); 762 } 763 764 /* Emit a standard landing pad sequence into SEQ for REGION. */ 765 766 static void 767 emit_post_landing_pad (gimple_seq *seq, eh_region region) 768 { 769 eh_landing_pad lp = region->landing_pads; 770 gimple x; 771 772 if (lp == NULL) 773 lp = gen_eh_landing_pad (region); 774 775 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION); 776 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index; 777 778 x = gimple_build_label (lp->post_landing_pad); 779 gimple_seq_add_stmt (seq, x); 780 } 781 782 /* Emit a RESX statement into SEQ for REGION. */ 783 784 static void 785 emit_resx (gimple_seq *seq, eh_region region) 786 { 787 gimple x = gimple_build_resx (region->index); 788 gimple_seq_add_stmt (seq, x); 789 if (region->outer) 790 record_stmt_eh_region (region->outer, x); 791 } 792 793 /* Emit an EH_DISPATCH statement into SEQ for REGION. */ 794 795 static void 796 emit_eh_dispatch (gimple_seq *seq, eh_region region) 797 { 798 gimple x = gimple_build_eh_dispatch (region->index); 799 gimple_seq_add_stmt (seq, x); 800 } 801 802 /* Note that the current EH region may contain a throw, or a 803 call to a function which itself may contain a throw. */ 804 805 static void 806 note_eh_region_may_contain_throw (eh_region region) 807 { 808 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index)) 809 { 810 if (region->type == ERT_MUST_NOT_THROW) 811 break; 812 region = region->outer; 813 if (region == NULL) 814 break; 815 } 816 } 817 818 /* Check if REGION has been marked as containing a throw. If REGION is 819 NULL, this predicate is false. */ 820 821 static inline bool 822 eh_region_may_contain_throw (eh_region r) 823 { 824 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index); 825 } 826 827 /* We want to transform 828 try { body; } catch { stuff; } 829 to 830 normal_seqence: 831 body; 832 over: 833 eh_seqence: 834 landing_pad: 835 stuff; 836 goto over; 837 838 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad 839 should be placed before the second operand, or NULL. OVER is 840 an existing label that should be put at the exit, or NULL. */ 841 842 static gimple_seq 843 frob_into_branch_around (gimple tp, eh_region region, tree over) 844 { 845 gimple x; 846 gimple_seq cleanup, result; 847 location_t loc = gimple_location (tp); 848 849 cleanup = gimple_try_cleanup (tp); 850 result = gimple_try_eval (tp); 851 852 if (region) 853 emit_post_landing_pad (&eh_seq, region); 854 855 if (gimple_seq_may_fallthru (cleanup)) 856 { 857 if (!over) 858 over = create_artificial_label (loc); 859 x = gimple_build_goto (over); 860 gimple_seq_add_stmt (&cleanup, x); 861 } 862 gimple_seq_add_seq (&eh_seq, cleanup); 863 864 if (over) 865 { 866 x = gimple_build_label (over); 867 gimple_seq_add_stmt (&result, x); 868 } 869 return result; 870 } 871 872 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T. 873 Make sure to record all new labels found. */ 874 875 static gimple_seq 876 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state) 877 { 878 gimple region = NULL; 879 gimple_seq new_seq; 880 881 new_seq = copy_gimple_seq_and_replace_locals (seq); 882 883 if (outer_state->tf) 884 region = outer_state->tf->try_finally_expr; 885 collect_finally_tree_1 (new_seq, region); 886 887 return new_seq; 888 } 889 890 /* A subroutine of lower_try_finally. Create a fallthru label for 891 the given try_finally state. The only tricky bit here is that 892 we have to make sure to record the label in our outer context. */ 893 894 static tree 895 lower_try_finally_fallthru_label (struct leh_tf_state *tf) 896 { 897 tree label = tf->fallthru_label; 898 treemple temp; 899 900 if (!label) 901 { 902 label = create_artificial_label (gimple_location (tf->try_finally_expr)); 903 tf->fallthru_label = label; 904 if (tf->outer->tf) 905 { 906 temp.t = label; 907 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr); 908 } 909 } 910 return label; 911 } 912 913 /* A subroutine of lower_try_finally. If FINALLY consits of a 914 GIMPLE_EH_ELSE node, return it. */ 915 916 static inline gimple 917 get_eh_else (gimple_seq finally) 918 { 919 gimple x = gimple_seq_first_stmt (finally); 920 if (gimple_code (x) == GIMPLE_EH_ELSE) 921 { 922 gcc_assert (gimple_seq_singleton_p (finally)); 923 return x; 924 } 925 return NULL; 926 } 927 928 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions 929 langhook returns non-null, then the language requires that the exception 930 path out of a try_finally be treated specially. To wit: the code within 931 the finally block may not itself throw an exception. We have two choices 932 here. First we can duplicate the finally block and wrap it in a 933 must_not_throw region. Second, we can generate code like 934 935 try { 936 finally_block; 937 } catch { 938 if (fintmp == eh_edge) 939 protect_cleanup_actions; 940 } 941 942 where "fintmp" is the temporary used in the switch statement generation 943 alternative considered below. For the nonce, we always choose the first 944 option. 945 946 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */ 947 948 static void 949 honor_protect_cleanup_actions (struct leh_state *outer_state, 950 struct leh_state *this_state, 951 struct leh_tf_state *tf) 952 { 953 tree protect_cleanup_actions; 954 gimple_stmt_iterator gsi; 955 bool finally_may_fallthru; 956 gimple_seq finally; 957 gimple x, eh_else; 958 959 /* First check for nothing to do. */ 960 if (lang_hooks.eh_protect_cleanup_actions == NULL) 961 return; 962 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions (); 963 if (protect_cleanup_actions == NULL) 964 return; 965 966 finally = gimple_try_cleanup (tf->top_p); 967 eh_else = get_eh_else (finally); 968 969 /* Duplicate the FINALLY block. Only need to do this for try-finally, 970 and not for cleanups. If we've got an EH_ELSE, extract it now. */ 971 if (eh_else) 972 { 973 finally = gimple_eh_else_e_body (eh_else); 974 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else)); 975 } 976 else if (this_state) 977 finally = lower_try_finally_dup_block (finally, outer_state); 978 finally_may_fallthru = gimple_seq_may_fallthru (finally); 979 980 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP 981 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought 982 to be in an enclosing scope, but needs to be implemented at this level 983 to avoid a nesting violation (see wrap_temporary_cleanups in 984 cp/decl.c). Since it's logically at an outer level, we should call 985 terminate before we get to it, so strip it away before adding the 986 MUST_NOT_THROW filter. */ 987 gsi = gsi_start (finally); 988 x = gsi_stmt (gsi); 989 if (gimple_code (x) == GIMPLE_TRY 990 && gimple_try_kind (x) == GIMPLE_TRY_CATCH 991 && gimple_try_catch_is_cleanup (x)) 992 { 993 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT); 994 gsi_remove (&gsi, false); 995 } 996 997 /* Wrap the block with protect_cleanup_actions as the action. */ 998 x = gimple_build_eh_must_not_throw (protect_cleanup_actions); 999 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x), 1000 GIMPLE_TRY_CATCH); 1001 finally = lower_eh_must_not_throw (outer_state, x); 1002 1003 /* Drop all of this into the exception sequence. */ 1004 emit_post_landing_pad (&eh_seq, tf->region); 1005 gimple_seq_add_seq (&eh_seq, finally); 1006 if (finally_may_fallthru) 1007 emit_resx (&eh_seq, tf->region); 1008 1009 /* Having now been handled, EH isn't to be considered with 1010 the rest of the outgoing edges. */ 1011 tf->may_throw = false; 1012 } 1013 1014 /* A subroutine of lower_try_finally. We have determined that there is 1015 no fallthru edge out of the finally block. This means that there is 1016 no outgoing edge corresponding to any incoming edge. Restructure the 1017 try_finally node for this special case. */ 1018 1019 static void 1020 lower_try_finally_nofallthru (struct leh_state *state, 1021 struct leh_tf_state *tf) 1022 { 1023 tree lab; 1024 gimple x, eh_else; 1025 gimple_seq finally; 1026 struct goto_queue_node *q, *qe; 1027 1028 lab = create_artificial_label (gimple_location (tf->try_finally_expr)); 1029 1030 /* We expect that tf->top_p is a GIMPLE_TRY. */ 1031 finally = gimple_try_cleanup (tf->top_p); 1032 tf->top_p_seq = gimple_try_eval (tf->top_p); 1033 1034 x = gimple_build_label (lab); 1035 gimple_seq_add_stmt (&tf->top_p_seq, x); 1036 1037 q = tf->goto_queue; 1038 qe = q + tf->goto_queue_active; 1039 for (; q < qe; ++q) 1040 if (q->index < 0) 1041 do_return_redirection (q, lab, NULL); 1042 else 1043 do_goto_redirection (q, lab, NULL, tf); 1044 1045 replace_goto_queue (tf); 1046 1047 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */ 1048 eh_else = get_eh_else (finally); 1049 if (eh_else) 1050 { 1051 finally = gimple_eh_else_n_body (eh_else); 1052 lower_eh_constructs_1 (state, finally); 1053 gimple_seq_add_seq (&tf->top_p_seq, finally); 1054 1055 if (tf->may_throw) 1056 { 1057 finally = gimple_eh_else_e_body (eh_else); 1058 lower_eh_constructs_1 (state, finally); 1059 1060 emit_post_landing_pad (&eh_seq, tf->region); 1061 gimple_seq_add_seq (&eh_seq, finally); 1062 } 1063 } 1064 else 1065 { 1066 lower_eh_constructs_1 (state, finally); 1067 gimple_seq_add_seq (&tf->top_p_seq, finally); 1068 1069 if (tf->may_throw) 1070 { 1071 emit_post_landing_pad (&eh_seq, tf->region); 1072 1073 x = gimple_build_goto (lab); 1074 gimple_seq_add_stmt (&eh_seq, x); 1075 } 1076 } 1077 } 1078 1079 /* A subroutine of lower_try_finally. We have determined that there is 1080 exactly one destination of the finally block. Restructure the 1081 try_finally node for this special case. */ 1082 1083 static void 1084 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf) 1085 { 1086 struct goto_queue_node *q, *qe; 1087 gimple x; 1088 gimple_seq finally; 1089 tree finally_label; 1090 location_t loc = gimple_location (tf->try_finally_expr); 1091 1092 finally = gimple_try_cleanup (tf->top_p); 1093 tf->top_p_seq = gimple_try_eval (tf->top_p); 1094 1095 /* Since there's only one destination, and the destination edge can only 1096 either be EH or non-EH, that implies that all of our incoming edges 1097 are of the same type. Therefore we can lower EH_ELSE immediately. */ 1098 x = get_eh_else (finally); 1099 if (x) 1100 { 1101 if (tf->may_throw) 1102 finally = gimple_eh_else_e_body (x); 1103 else 1104 finally = gimple_eh_else_n_body (x); 1105 } 1106 1107 lower_eh_constructs_1 (state, finally); 1108 1109 if (tf->may_throw) 1110 { 1111 /* Only reachable via the exception edge. Add the given label to 1112 the head of the FINALLY block. Append a RESX at the end. */ 1113 emit_post_landing_pad (&eh_seq, tf->region); 1114 gimple_seq_add_seq (&eh_seq, finally); 1115 emit_resx (&eh_seq, tf->region); 1116 return; 1117 } 1118 1119 if (tf->may_fallthru) 1120 { 1121 /* Only reachable via the fallthru edge. Do nothing but let 1122 the two blocks run together; we'll fall out the bottom. */ 1123 gimple_seq_add_seq (&tf->top_p_seq, finally); 1124 return; 1125 } 1126 1127 finally_label = create_artificial_label (loc); 1128 x = gimple_build_label (finally_label); 1129 gimple_seq_add_stmt (&tf->top_p_seq, x); 1130 1131 gimple_seq_add_seq (&tf->top_p_seq, finally); 1132 1133 q = tf->goto_queue; 1134 qe = q + tf->goto_queue_active; 1135 1136 if (tf->may_return) 1137 { 1138 /* Reachable by return expressions only. Redirect them. */ 1139 for (; q < qe; ++q) 1140 do_return_redirection (q, finally_label, NULL); 1141 replace_goto_queue (tf); 1142 } 1143 else 1144 { 1145 /* Reachable by goto expressions only. Redirect them. */ 1146 for (; q < qe; ++q) 1147 do_goto_redirection (q, finally_label, NULL, tf); 1148 replace_goto_queue (tf); 1149 1150 if (VEC_index (tree, tf->dest_array, 0) == tf->fallthru_label) 1151 { 1152 /* Reachable by goto to fallthru label only. Redirect it 1153 to the new label (already created, sadly), and do not 1154 emit the final branch out, or the fallthru label. */ 1155 tf->fallthru_label = NULL; 1156 return; 1157 } 1158 } 1159 1160 /* Place the original return/goto to the original destination 1161 immediately after the finally block. */ 1162 x = tf->goto_queue[0].cont_stmt; 1163 gimple_seq_add_stmt (&tf->top_p_seq, x); 1164 maybe_record_in_goto_queue (state, x); 1165 } 1166 1167 /* A subroutine of lower_try_finally. There are multiple edges incoming 1168 and outgoing from the finally block. Implement this by duplicating the 1169 finally block for every destination. */ 1170 1171 static void 1172 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf) 1173 { 1174 gimple_seq finally; 1175 gimple_seq new_stmt; 1176 gimple_seq seq; 1177 gimple x, eh_else; 1178 tree tmp; 1179 location_t tf_loc = gimple_location (tf->try_finally_expr); 1180 1181 finally = gimple_try_cleanup (tf->top_p); 1182 1183 /* Notice EH_ELSE, and simplify some of the remaining code 1184 by considering FINALLY to be the normal return path only. */ 1185 eh_else = get_eh_else (finally); 1186 if (eh_else) 1187 finally = gimple_eh_else_n_body (eh_else); 1188 1189 tf->top_p_seq = gimple_try_eval (tf->top_p); 1190 new_stmt = NULL; 1191 1192 if (tf->may_fallthru) 1193 { 1194 seq = lower_try_finally_dup_block (finally, state); 1195 lower_eh_constructs_1 (state, seq); 1196 gimple_seq_add_seq (&new_stmt, seq); 1197 1198 tmp = lower_try_finally_fallthru_label (tf); 1199 x = gimple_build_goto (tmp); 1200 gimple_seq_add_stmt (&new_stmt, x); 1201 } 1202 1203 if (tf->may_throw) 1204 { 1205 /* We don't need to copy the EH path of EH_ELSE, 1206 since it is only emitted once. */ 1207 if (eh_else) 1208 seq = gimple_eh_else_e_body (eh_else); 1209 else 1210 seq = lower_try_finally_dup_block (finally, state); 1211 lower_eh_constructs_1 (state, seq); 1212 1213 emit_post_landing_pad (&eh_seq, tf->region); 1214 gimple_seq_add_seq (&eh_seq, seq); 1215 emit_resx (&eh_seq, tf->region); 1216 } 1217 1218 if (tf->goto_queue) 1219 { 1220 struct goto_queue_node *q, *qe; 1221 int return_index, index; 1222 struct labels_s 1223 { 1224 struct goto_queue_node *q; 1225 tree label; 1226 } *labels; 1227 1228 return_index = VEC_length (tree, tf->dest_array); 1229 labels = XCNEWVEC (struct labels_s, return_index + 1); 1230 1231 q = tf->goto_queue; 1232 qe = q + tf->goto_queue_active; 1233 for (; q < qe; q++) 1234 { 1235 index = q->index < 0 ? return_index : q->index; 1236 1237 if (!labels[index].q) 1238 labels[index].q = q; 1239 } 1240 1241 for (index = 0; index < return_index + 1; index++) 1242 { 1243 tree lab; 1244 1245 q = labels[index].q; 1246 if (! q) 1247 continue; 1248 1249 lab = labels[index].label 1250 = create_artificial_label (tf_loc); 1251 1252 if (index == return_index) 1253 do_return_redirection (q, lab, NULL); 1254 else 1255 do_goto_redirection (q, lab, NULL, tf); 1256 1257 x = gimple_build_label (lab); 1258 gimple_seq_add_stmt (&new_stmt, x); 1259 1260 seq = lower_try_finally_dup_block (finally, state); 1261 lower_eh_constructs_1 (state, seq); 1262 gimple_seq_add_seq (&new_stmt, seq); 1263 1264 gimple_seq_add_stmt (&new_stmt, q->cont_stmt); 1265 maybe_record_in_goto_queue (state, q->cont_stmt); 1266 } 1267 1268 for (q = tf->goto_queue; q < qe; q++) 1269 { 1270 tree lab; 1271 1272 index = q->index < 0 ? return_index : q->index; 1273 1274 if (labels[index].q == q) 1275 continue; 1276 1277 lab = labels[index].label; 1278 1279 if (index == return_index) 1280 do_return_redirection (q, lab, NULL); 1281 else 1282 do_goto_redirection (q, lab, NULL, tf); 1283 } 1284 1285 replace_goto_queue (tf); 1286 free (labels); 1287 } 1288 1289 /* Need to link new stmts after running replace_goto_queue due 1290 to not wanting to process the same goto stmts twice. */ 1291 gimple_seq_add_seq (&tf->top_p_seq, new_stmt); 1292 } 1293 1294 /* A subroutine of lower_try_finally. There are multiple edges incoming 1295 and outgoing from the finally block. Implement this by instrumenting 1296 each incoming edge and creating a switch statement at the end of the 1297 finally block that branches to the appropriate destination. */ 1298 1299 static void 1300 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf) 1301 { 1302 struct goto_queue_node *q, *qe; 1303 tree finally_tmp, finally_label; 1304 int return_index, eh_index, fallthru_index; 1305 int nlabels, ndests, j, last_case_index; 1306 tree last_case; 1307 VEC (tree,heap) *case_label_vec; 1308 gimple_seq switch_body; 1309 gimple x, eh_else; 1310 tree tmp; 1311 gimple switch_stmt; 1312 gimple_seq finally; 1313 struct pointer_map_t *cont_map = NULL; 1314 /* The location of the TRY_FINALLY stmt. */ 1315 location_t tf_loc = gimple_location (tf->try_finally_expr); 1316 /* The location of the finally block. */ 1317 location_t finally_loc; 1318 1319 switch_body = gimple_seq_alloc (); 1320 finally = gimple_try_cleanup (tf->top_p); 1321 eh_else = get_eh_else (finally); 1322 1323 /* Mash the TRY block to the head of the chain. */ 1324 tf->top_p_seq = gimple_try_eval (tf->top_p); 1325 1326 /* The location of the finally is either the last stmt in the finally 1327 block or the location of the TRY_FINALLY itself. */ 1328 x = gimple_seq_last_stmt (finally); 1329 finally_loc = x ? gimple_location (x) : tf_loc; 1330 1331 /* Prepare for switch statement generation. */ 1332 nlabels = VEC_length (tree, tf->dest_array); 1333 return_index = nlabels; 1334 eh_index = return_index + tf->may_return; 1335 fallthru_index = eh_index + (tf->may_throw && !eh_else); 1336 ndests = fallthru_index + tf->may_fallthru; 1337 1338 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp"); 1339 finally_label = create_artificial_label (finally_loc); 1340 1341 /* We use VEC_quick_push on case_label_vec throughout this function, 1342 since we know the size in advance and allocate precisely as muce 1343 space as needed. */ 1344 case_label_vec = VEC_alloc (tree, heap, ndests); 1345 last_case = NULL; 1346 last_case_index = 0; 1347 1348 /* Begin inserting code for getting to the finally block. Things 1349 are done in this order to correspond to the sequence the code is 1350 layed out. */ 1351 1352 if (tf->may_fallthru) 1353 { 1354 x = gimple_build_assign (finally_tmp, 1355 build_int_cst (integer_type_node, 1356 fallthru_index)); 1357 gimple_seq_add_stmt (&tf->top_p_seq, x); 1358 1359 tmp = build_int_cst (integer_type_node, fallthru_index); 1360 last_case = build_case_label (tmp, NULL, 1361 create_artificial_label (tf_loc)); 1362 VEC_quick_push (tree, case_label_vec, last_case); 1363 last_case_index++; 1364 1365 x = gimple_build_label (CASE_LABEL (last_case)); 1366 gimple_seq_add_stmt (&switch_body, x); 1367 1368 tmp = lower_try_finally_fallthru_label (tf); 1369 x = gimple_build_goto (tmp); 1370 gimple_seq_add_stmt (&switch_body, x); 1371 } 1372 1373 /* For EH_ELSE, emit the exception path (plus resx) now, then 1374 subsequently we only need consider the normal path. */ 1375 if (eh_else) 1376 { 1377 if (tf->may_throw) 1378 { 1379 finally = gimple_eh_else_e_body (eh_else); 1380 lower_eh_constructs_1 (state, finally); 1381 1382 emit_post_landing_pad (&eh_seq, tf->region); 1383 gimple_seq_add_seq (&eh_seq, finally); 1384 emit_resx (&eh_seq, tf->region); 1385 } 1386 1387 finally = gimple_eh_else_n_body (eh_else); 1388 } 1389 else if (tf->may_throw) 1390 { 1391 emit_post_landing_pad (&eh_seq, tf->region); 1392 1393 x = gimple_build_assign (finally_tmp, 1394 build_int_cst (integer_type_node, eh_index)); 1395 gimple_seq_add_stmt (&eh_seq, x); 1396 1397 x = gimple_build_goto (finally_label); 1398 gimple_seq_add_stmt (&eh_seq, x); 1399 1400 tmp = build_int_cst (integer_type_node, eh_index); 1401 last_case = build_case_label (tmp, NULL, 1402 create_artificial_label (tf_loc)); 1403 VEC_quick_push (tree, case_label_vec, last_case); 1404 last_case_index++; 1405 1406 x = gimple_build_label (CASE_LABEL (last_case)); 1407 gimple_seq_add_stmt (&eh_seq, x); 1408 emit_resx (&eh_seq, tf->region); 1409 } 1410 1411 x = gimple_build_label (finally_label); 1412 gimple_seq_add_stmt (&tf->top_p_seq, x); 1413 1414 lower_eh_constructs_1 (state, finally); 1415 gimple_seq_add_seq (&tf->top_p_seq, finally); 1416 1417 /* Redirect each incoming goto edge. */ 1418 q = tf->goto_queue; 1419 qe = q + tf->goto_queue_active; 1420 j = last_case_index + tf->may_return; 1421 /* Prepare the assignments to finally_tmp that are executed upon the 1422 entrance through a particular edge. */ 1423 for (; q < qe; ++q) 1424 { 1425 gimple_seq mod; 1426 int switch_id; 1427 unsigned int case_index; 1428 1429 mod = gimple_seq_alloc (); 1430 1431 if (q->index < 0) 1432 { 1433 x = gimple_build_assign (finally_tmp, 1434 build_int_cst (integer_type_node, 1435 return_index)); 1436 gimple_seq_add_stmt (&mod, x); 1437 do_return_redirection (q, finally_label, mod); 1438 switch_id = return_index; 1439 } 1440 else 1441 { 1442 x = gimple_build_assign (finally_tmp, 1443 build_int_cst (integer_type_node, q->index)); 1444 gimple_seq_add_stmt (&mod, x); 1445 do_goto_redirection (q, finally_label, mod, tf); 1446 switch_id = q->index; 1447 } 1448 1449 case_index = j + q->index; 1450 if (VEC_length (tree, case_label_vec) <= case_index 1451 || !VEC_index (tree, case_label_vec, case_index)) 1452 { 1453 tree case_lab; 1454 void **slot; 1455 tmp = build_int_cst (integer_type_node, switch_id); 1456 case_lab = build_case_label (tmp, NULL, 1457 create_artificial_label (tf_loc)); 1458 /* We store the cont_stmt in the pointer map, so that we can recover 1459 it in the loop below. */ 1460 if (!cont_map) 1461 cont_map = pointer_map_create (); 1462 slot = pointer_map_insert (cont_map, case_lab); 1463 *slot = q->cont_stmt; 1464 VEC_quick_push (tree, case_label_vec, case_lab); 1465 } 1466 } 1467 for (j = last_case_index; j < last_case_index + nlabels; j++) 1468 { 1469 gimple cont_stmt; 1470 void **slot; 1471 1472 last_case = VEC_index (tree, case_label_vec, j); 1473 1474 gcc_assert (last_case); 1475 gcc_assert (cont_map); 1476 1477 slot = pointer_map_contains (cont_map, last_case); 1478 gcc_assert (slot); 1479 cont_stmt = *(gimple *) slot; 1480 1481 x = gimple_build_label (CASE_LABEL (last_case)); 1482 gimple_seq_add_stmt (&switch_body, x); 1483 gimple_seq_add_stmt (&switch_body, cont_stmt); 1484 maybe_record_in_goto_queue (state, cont_stmt); 1485 } 1486 if (cont_map) 1487 pointer_map_destroy (cont_map); 1488 1489 replace_goto_queue (tf); 1490 1491 /* Make sure that the last case is the default label, as one is required. 1492 Then sort the labels, which is also required in GIMPLE. */ 1493 CASE_LOW (last_case) = NULL; 1494 sort_case_labels (case_label_vec); 1495 1496 /* Build the switch statement, setting last_case to be the default 1497 label. */ 1498 switch_stmt = gimple_build_switch_vec (finally_tmp, last_case, 1499 case_label_vec); 1500 gimple_set_location (switch_stmt, finally_loc); 1501 1502 /* Need to link SWITCH_STMT after running replace_goto_queue 1503 due to not wanting to process the same goto stmts twice. */ 1504 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt); 1505 gimple_seq_add_seq (&tf->top_p_seq, switch_body); 1506 } 1507 1508 /* Decide whether or not we are going to duplicate the finally block. 1509 There are several considerations. 1510 1511 First, if this is Java, then the finally block contains code 1512 written by the user. It has line numbers associated with it, 1513 so duplicating the block means it's difficult to set a breakpoint. 1514 Since controlling code generation via -g is verboten, we simply 1515 never duplicate code without optimization. 1516 1517 Second, we'd like to prevent egregious code growth. One way to 1518 do this is to estimate the size of the finally block, multiply 1519 that by the number of copies we'd need to make, and compare against 1520 the estimate of the size of the switch machinery we'd have to add. */ 1521 1522 static bool 1523 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally) 1524 { 1525 int f_estimate, sw_estimate; 1526 gimple eh_else; 1527 1528 /* If there's an EH_ELSE involved, the exception path is separate 1529 and really doesn't come into play for this computation. */ 1530 eh_else = get_eh_else (finally); 1531 if (eh_else) 1532 { 1533 ndests -= may_throw; 1534 finally = gimple_eh_else_n_body (eh_else); 1535 } 1536 1537 if (!optimize) 1538 { 1539 gimple_stmt_iterator gsi; 1540 1541 if (ndests == 1) 1542 return true; 1543 1544 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi)) 1545 { 1546 gimple stmt = gsi_stmt (gsi); 1547 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt)) 1548 return false; 1549 } 1550 return true; 1551 } 1552 1553 /* Finally estimate N times, plus N gotos. */ 1554 f_estimate = count_insns_seq (finally, &eni_size_weights); 1555 f_estimate = (f_estimate + 1) * ndests; 1556 1557 /* Switch statement (cost 10), N variable assignments, N gotos. */ 1558 sw_estimate = 10 + 2 * ndests; 1559 1560 /* Optimize for size clearly wants our best guess. */ 1561 if (optimize_function_for_size_p (cfun)) 1562 return f_estimate < sw_estimate; 1563 1564 /* ??? These numbers are completely made up so far. */ 1565 if (optimize > 1) 1566 return f_estimate < 100 || f_estimate < sw_estimate * 2; 1567 else 1568 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3; 1569 } 1570 1571 /* REG is the enclosing region for a possible cleanup region, or the region 1572 itself. Returns TRUE if such a region would be unreachable. 1573 1574 Cleanup regions within a must-not-throw region aren't actually reachable 1575 even if there are throwing stmts within them, because the personality 1576 routine will call terminate before unwinding. */ 1577 1578 static bool 1579 cleanup_is_dead_in (eh_region reg) 1580 { 1581 while (reg && reg->type == ERT_CLEANUP) 1582 reg = reg->outer; 1583 return (reg && reg->type == ERT_MUST_NOT_THROW); 1584 } 1585 1586 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes 1587 to a sequence of labels and blocks, plus the exception region trees 1588 that record all the magic. This is complicated by the need to 1589 arrange for the FINALLY block to be executed on all exits. */ 1590 1591 static gimple_seq 1592 lower_try_finally (struct leh_state *state, gimple tp) 1593 { 1594 struct leh_tf_state this_tf; 1595 struct leh_state this_state; 1596 int ndests; 1597 gimple_seq old_eh_seq; 1598 1599 /* Process the try block. */ 1600 1601 memset (&this_tf, 0, sizeof (this_tf)); 1602 this_tf.try_finally_expr = tp; 1603 this_tf.top_p = tp; 1604 this_tf.outer = state; 1605 if (using_eh_for_cleanups_p && !cleanup_is_dead_in (state->cur_region)) 1606 { 1607 this_tf.region = gen_eh_region_cleanup (state->cur_region); 1608 this_state.cur_region = this_tf.region; 1609 } 1610 else 1611 { 1612 this_tf.region = NULL; 1613 this_state.cur_region = state->cur_region; 1614 } 1615 1616 this_state.ehp_region = state->ehp_region; 1617 this_state.tf = &this_tf; 1618 1619 old_eh_seq = eh_seq; 1620 eh_seq = NULL; 1621 1622 lower_eh_constructs_1 (&this_state, gimple_try_eval(tp)); 1623 1624 /* Determine if the try block is escaped through the bottom. */ 1625 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp)); 1626 1627 /* Determine if any exceptions are possible within the try block. */ 1628 if (this_tf.region) 1629 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region); 1630 if (this_tf.may_throw) 1631 honor_protect_cleanup_actions (state, &this_state, &this_tf); 1632 1633 /* Determine how many edges (still) reach the finally block. Or rather, 1634 how many destinations are reached by the finally block. Use this to 1635 determine how we process the finally block itself. */ 1636 1637 ndests = VEC_length (tree, this_tf.dest_array); 1638 ndests += this_tf.may_fallthru; 1639 ndests += this_tf.may_return; 1640 ndests += this_tf.may_throw; 1641 1642 /* If the FINALLY block is not reachable, dike it out. */ 1643 if (ndests == 0) 1644 { 1645 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp)); 1646 gimple_try_set_cleanup (tp, NULL); 1647 } 1648 /* If the finally block doesn't fall through, then any destination 1649 we might try to impose there isn't reached either. There may be 1650 some minor amount of cleanup and redirection still needed. */ 1651 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp))) 1652 lower_try_finally_nofallthru (state, &this_tf); 1653 1654 /* We can easily special-case redirection to a single destination. */ 1655 else if (ndests == 1) 1656 lower_try_finally_onedest (state, &this_tf); 1657 else if (decide_copy_try_finally (ndests, this_tf.may_throw, 1658 gimple_try_cleanup (tp))) 1659 lower_try_finally_copy (state, &this_tf); 1660 else 1661 lower_try_finally_switch (state, &this_tf); 1662 1663 /* If someone requested we add a label at the end of the transformed 1664 block, do so. */ 1665 if (this_tf.fallthru_label) 1666 { 1667 /* This must be reached only if ndests == 0. */ 1668 gimple x = gimple_build_label (this_tf.fallthru_label); 1669 gimple_seq_add_stmt (&this_tf.top_p_seq, x); 1670 } 1671 1672 VEC_free (tree, heap, this_tf.dest_array); 1673 free (this_tf.goto_queue); 1674 if (this_tf.goto_queue_map) 1675 pointer_map_destroy (this_tf.goto_queue_map); 1676 1677 /* If there was an old (aka outer) eh_seq, append the current eh_seq. 1678 If there was no old eh_seq, then the append is trivially already done. */ 1679 if (old_eh_seq) 1680 { 1681 if (eh_seq == NULL) 1682 eh_seq = old_eh_seq; 1683 else 1684 { 1685 gimple_seq new_eh_seq = eh_seq; 1686 eh_seq = old_eh_seq; 1687 gimple_seq_add_seq(&eh_seq, new_eh_seq); 1688 } 1689 } 1690 1691 return this_tf.top_p_seq; 1692 } 1693 1694 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a 1695 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the 1696 exception region trees that records all the magic. */ 1697 1698 static gimple_seq 1699 lower_catch (struct leh_state *state, gimple tp) 1700 { 1701 eh_region try_region = NULL; 1702 struct leh_state this_state = *state; 1703 gimple_stmt_iterator gsi; 1704 tree out_label; 1705 gimple_seq new_seq; 1706 gimple x; 1707 location_t try_catch_loc = gimple_location (tp); 1708 1709 if (flag_exceptions) 1710 { 1711 try_region = gen_eh_region_try (state->cur_region); 1712 this_state.cur_region = try_region; 1713 } 1714 1715 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1716 1717 if (!eh_region_may_contain_throw (try_region)) 1718 return gimple_try_eval (tp); 1719 1720 new_seq = NULL; 1721 emit_eh_dispatch (&new_seq, try_region); 1722 emit_resx (&new_seq, try_region); 1723 1724 this_state.cur_region = state->cur_region; 1725 this_state.ehp_region = try_region; 1726 1727 out_label = NULL; 1728 for (gsi = gsi_start (gimple_try_cleanup (tp)); 1729 !gsi_end_p (gsi); 1730 gsi_next (&gsi)) 1731 { 1732 eh_catch c; 1733 gimple gcatch; 1734 gimple_seq handler; 1735 1736 gcatch = gsi_stmt (gsi); 1737 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch)); 1738 1739 handler = gimple_catch_handler (gcatch); 1740 lower_eh_constructs_1 (&this_state, handler); 1741 1742 c->label = create_artificial_label (UNKNOWN_LOCATION); 1743 x = gimple_build_label (c->label); 1744 gimple_seq_add_stmt (&new_seq, x); 1745 1746 gimple_seq_add_seq (&new_seq, handler); 1747 1748 if (gimple_seq_may_fallthru (new_seq)) 1749 { 1750 if (!out_label) 1751 out_label = create_artificial_label (try_catch_loc); 1752 1753 x = gimple_build_goto (out_label); 1754 gimple_seq_add_stmt (&new_seq, x); 1755 } 1756 if (!c->type_list) 1757 break; 1758 } 1759 1760 gimple_try_set_cleanup (tp, new_seq); 1761 1762 return frob_into_branch_around (tp, try_region, out_label); 1763 } 1764 1765 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a 1766 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception 1767 region trees that record all the magic. */ 1768 1769 static gimple_seq 1770 lower_eh_filter (struct leh_state *state, gimple tp) 1771 { 1772 struct leh_state this_state = *state; 1773 eh_region this_region = NULL; 1774 gimple inner, x; 1775 gimple_seq new_seq; 1776 1777 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp)); 1778 1779 if (flag_exceptions) 1780 { 1781 this_region = gen_eh_region_allowed (state->cur_region, 1782 gimple_eh_filter_types (inner)); 1783 this_state.cur_region = this_region; 1784 } 1785 1786 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1787 1788 if (!eh_region_may_contain_throw (this_region)) 1789 return gimple_try_eval (tp); 1790 1791 new_seq = NULL; 1792 this_state.cur_region = state->cur_region; 1793 this_state.ehp_region = this_region; 1794 1795 emit_eh_dispatch (&new_seq, this_region); 1796 emit_resx (&new_seq, this_region); 1797 1798 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION); 1799 x = gimple_build_label (this_region->u.allowed.label); 1800 gimple_seq_add_stmt (&new_seq, x); 1801 1802 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner)); 1803 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner)); 1804 1805 gimple_try_set_cleanup (tp, new_seq); 1806 1807 return frob_into_branch_around (tp, this_region, NULL); 1808 } 1809 1810 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with 1811 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks, 1812 plus the exception region trees that record all the magic. */ 1813 1814 static gimple_seq 1815 lower_eh_must_not_throw (struct leh_state *state, gimple tp) 1816 { 1817 struct leh_state this_state = *state; 1818 1819 if (flag_exceptions) 1820 { 1821 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp)); 1822 eh_region this_region; 1823 1824 this_region = gen_eh_region_must_not_throw (state->cur_region); 1825 this_region->u.must_not_throw.failure_decl 1826 = gimple_eh_must_not_throw_fndecl (inner); 1827 this_region->u.must_not_throw.failure_loc = gimple_location (tp); 1828 1829 /* In order to get mangling applied to this decl, we must mark it 1830 used now. Otherwise, pass_ipa_free_lang_data won't think it 1831 needs to happen. */ 1832 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1; 1833 1834 this_state.cur_region = this_region; 1835 } 1836 1837 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1838 1839 return gimple_try_eval (tp); 1840 } 1841 1842 /* Implement a cleanup expression. This is similar to try-finally, 1843 except that we only execute the cleanup block for exception edges. */ 1844 1845 static gimple_seq 1846 lower_cleanup (struct leh_state *state, gimple tp) 1847 { 1848 struct leh_state this_state = *state; 1849 eh_region this_region = NULL; 1850 struct leh_tf_state fake_tf; 1851 gimple_seq result; 1852 bool cleanup_dead = cleanup_is_dead_in (state->cur_region); 1853 1854 if (flag_exceptions && !cleanup_dead) 1855 { 1856 this_region = gen_eh_region_cleanup (state->cur_region); 1857 this_state.cur_region = this_region; 1858 } 1859 1860 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1861 1862 if (cleanup_dead || !eh_region_may_contain_throw (this_region)) 1863 return gimple_try_eval (tp); 1864 1865 /* Build enough of a try-finally state so that we can reuse 1866 honor_protect_cleanup_actions. */ 1867 memset (&fake_tf, 0, sizeof (fake_tf)); 1868 fake_tf.top_p = fake_tf.try_finally_expr = tp; 1869 fake_tf.outer = state; 1870 fake_tf.region = this_region; 1871 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp)); 1872 fake_tf.may_throw = true; 1873 1874 honor_protect_cleanup_actions (state, NULL, &fake_tf); 1875 1876 if (fake_tf.may_throw) 1877 { 1878 /* In this case honor_protect_cleanup_actions had nothing to do, 1879 and we should process this normally. */ 1880 lower_eh_constructs_1 (state, gimple_try_cleanup (tp)); 1881 result = frob_into_branch_around (tp, this_region, 1882 fake_tf.fallthru_label); 1883 } 1884 else 1885 { 1886 /* In this case honor_protect_cleanup_actions did nearly all of 1887 the work. All we have left is to append the fallthru_label. */ 1888 1889 result = gimple_try_eval (tp); 1890 if (fake_tf.fallthru_label) 1891 { 1892 gimple x = gimple_build_label (fake_tf.fallthru_label); 1893 gimple_seq_add_stmt (&result, x); 1894 } 1895 } 1896 return result; 1897 } 1898 1899 /* Main loop for lowering eh constructs. Also moves gsi to the next 1900 statement. */ 1901 1902 static void 1903 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi) 1904 { 1905 gimple_seq replace; 1906 gimple x; 1907 gimple stmt = gsi_stmt (*gsi); 1908 1909 switch (gimple_code (stmt)) 1910 { 1911 case GIMPLE_CALL: 1912 { 1913 tree fndecl = gimple_call_fndecl (stmt); 1914 tree rhs, lhs; 1915 1916 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) 1917 switch (DECL_FUNCTION_CODE (fndecl)) 1918 { 1919 case BUILT_IN_EH_POINTER: 1920 /* The front end may have generated a call to 1921 __builtin_eh_pointer (0) within a catch region. Replace 1922 this zero argument with the current catch region number. */ 1923 if (state->ehp_region) 1924 { 1925 tree nr = build_int_cst (integer_type_node, 1926 state->ehp_region->index); 1927 gimple_call_set_arg (stmt, 0, nr); 1928 } 1929 else 1930 { 1931 /* The user has dome something silly. Remove it. */ 1932 rhs = null_pointer_node; 1933 goto do_replace; 1934 } 1935 break; 1936 1937 case BUILT_IN_EH_FILTER: 1938 /* ??? This should never appear, but since it's a builtin it 1939 is accessible to abuse by users. Just remove it and 1940 replace the use with the arbitrary value zero. */ 1941 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0); 1942 do_replace: 1943 lhs = gimple_call_lhs (stmt); 1944 x = gimple_build_assign (lhs, rhs); 1945 gsi_insert_before (gsi, x, GSI_SAME_STMT); 1946 /* FALLTHRU */ 1947 1948 case BUILT_IN_EH_COPY_VALUES: 1949 /* Likewise this should not appear. Remove it. */ 1950 gsi_remove (gsi, true); 1951 return; 1952 1953 default: 1954 break; 1955 } 1956 } 1957 /* FALLTHRU */ 1958 1959 case GIMPLE_ASSIGN: 1960 /* If the stmt can throw use a new temporary for the assignment 1961 to a LHS. This makes sure the old value of the LHS is 1962 available on the EH edge. Only do so for statements that 1963 potentially fall thru (no noreturn calls e.g.), otherwise 1964 this new assignment might create fake fallthru regions. */ 1965 if (stmt_could_throw_p (stmt) 1966 && gimple_has_lhs (stmt) 1967 && gimple_stmt_may_fallthru (stmt) 1968 && !tree_could_throw_p (gimple_get_lhs (stmt)) 1969 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt)))) 1970 { 1971 tree lhs = gimple_get_lhs (stmt); 1972 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL); 1973 gimple s = gimple_build_assign (lhs, tmp); 1974 gimple_set_location (s, gimple_location (stmt)); 1975 gimple_set_block (s, gimple_block (stmt)); 1976 gimple_set_lhs (stmt, tmp); 1977 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE 1978 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE) 1979 DECL_GIMPLE_REG_P (tmp) = 1; 1980 gsi_insert_after (gsi, s, GSI_SAME_STMT); 1981 } 1982 /* Look for things that can throw exceptions, and record them. */ 1983 if (state->cur_region && stmt_could_throw_p (stmt)) 1984 { 1985 record_stmt_eh_region (state->cur_region, stmt); 1986 note_eh_region_may_contain_throw (state->cur_region); 1987 } 1988 break; 1989 1990 case GIMPLE_COND: 1991 case GIMPLE_GOTO: 1992 case GIMPLE_RETURN: 1993 maybe_record_in_goto_queue (state, stmt); 1994 break; 1995 1996 case GIMPLE_SWITCH: 1997 verify_norecord_switch_expr (state, stmt); 1998 break; 1999 2000 case GIMPLE_TRY: 2001 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY) 2002 replace = lower_try_finally (state, stmt); 2003 else 2004 { 2005 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt)); 2006 if (!x) 2007 { 2008 replace = gimple_try_eval (stmt); 2009 lower_eh_constructs_1 (state, replace); 2010 } 2011 else 2012 switch (gimple_code (x)) 2013 { 2014 case GIMPLE_CATCH: 2015 replace = lower_catch (state, stmt); 2016 break; 2017 case GIMPLE_EH_FILTER: 2018 replace = lower_eh_filter (state, stmt); 2019 break; 2020 case GIMPLE_EH_MUST_NOT_THROW: 2021 replace = lower_eh_must_not_throw (state, stmt); 2022 break; 2023 case GIMPLE_EH_ELSE: 2024 /* This code is only valid with GIMPLE_TRY_FINALLY. */ 2025 gcc_unreachable (); 2026 default: 2027 replace = lower_cleanup (state, stmt); 2028 break; 2029 } 2030 } 2031 2032 /* Remove the old stmt and insert the transformed sequence 2033 instead. */ 2034 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT); 2035 gsi_remove (gsi, true); 2036 2037 /* Return since we don't want gsi_next () */ 2038 return; 2039 2040 case GIMPLE_EH_ELSE: 2041 /* We should be eliminating this in lower_try_finally et al. */ 2042 gcc_unreachable (); 2043 2044 default: 2045 /* A type, a decl, or some kind of statement that we're not 2046 interested in. Don't walk them. */ 2047 break; 2048 } 2049 2050 gsi_next (gsi); 2051 } 2052 2053 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */ 2054 2055 static void 2056 lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq) 2057 { 2058 gimple_stmt_iterator gsi; 2059 for (gsi = gsi_start (seq); !gsi_end_p (gsi);) 2060 lower_eh_constructs_2 (state, &gsi); 2061 } 2062 2063 static unsigned int 2064 lower_eh_constructs (void) 2065 { 2066 struct leh_state null_state; 2067 gimple_seq bodyp; 2068 2069 bodyp = gimple_body (current_function_decl); 2070 if (bodyp == NULL) 2071 return 0; 2072 2073 finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free); 2074 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL); 2075 memset (&null_state, 0, sizeof (null_state)); 2076 2077 collect_finally_tree_1 (bodyp, NULL); 2078 lower_eh_constructs_1 (&null_state, bodyp); 2079 2080 /* We assume there's a return statement, or something, at the end of 2081 the function, and thus ploping the EH sequence afterward won't 2082 change anything. */ 2083 gcc_assert (!gimple_seq_may_fallthru (bodyp)); 2084 gimple_seq_add_seq (&bodyp, eh_seq); 2085 2086 /* We assume that since BODYP already existed, adding EH_SEQ to it 2087 didn't change its value, and we don't have to re-set the function. */ 2088 gcc_assert (bodyp == gimple_body (current_function_decl)); 2089 2090 htab_delete (finally_tree); 2091 BITMAP_FREE (eh_region_may_contain_throw_map); 2092 eh_seq = NULL; 2093 2094 /* If this function needs a language specific EH personality routine 2095 and the frontend didn't already set one do so now. */ 2096 if (function_needs_eh_personality (cfun) == eh_personality_lang 2097 && !DECL_FUNCTION_PERSONALITY (current_function_decl)) 2098 DECL_FUNCTION_PERSONALITY (current_function_decl) 2099 = lang_hooks.eh_personality (); 2100 2101 return 0; 2102 } 2103 2104 struct gimple_opt_pass pass_lower_eh = 2105 { 2106 { 2107 GIMPLE_PASS, 2108 "eh", /* name */ 2109 NULL, /* gate */ 2110 lower_eh_constructs, /* execute */ 2111 NULL, /* sub */ 2112 NULL, /* next */ 2113 0, /* static_pass_number */ 2114 TV_TREE_EH, /* tv_id */ 2115 PROP_gimple_lcf, /* properties_required */ 2116 PROP_gimple_leh, /* properties_provided */ 2117 0, /* properties_destroyed */ 2118 0, /* todo_flags_start */ 2119 0 /* todo_flags_finish */ 2120 } 2121 }; 2122 2123 /* Create the multiple edges from an EH_DISPATCH statement to all of 2124 the possible handlers for its EH region. Return true if there's 2125 no fallthru edge; false if there is. */ 2126 2127 bool 2128 make_eh_dispatch_edges (gimple stmt) 2129 { 2130 eh_region r; 2131 eh_catch c; 2132 basic_block src, dst; 2133 2134 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); 2135 src = gimple_bb (stmt); 2136 2137 switch (r->type) 2138 { 2139 case ERT_TRY: 2140 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 2141 { 2142 dst = label_to_block (c->label); 2143 make_edge (src, dst, 0); 2144 2145 /* A catch-all handler doesn't have a fallthru. */ 2146 if (c->type_list == NULL) 2147 return false; 2148 } 2149 break; 2150 2151 case ERT_ALLOWED_EXCEPTIONS: 2152 dst = label_to_block (r->u.allowed.label); 2153 make_edge (src, dst, 0); 2154 break; 2155 2156 default: 2157 gcc_unreachable (); 2158 } 2159 2160 return true; 2161 } 2162 2163 /* Create the single EH edge from STMT to its nearest landing pad, 2164 if there is such a landing pad within the current function. */ 2165 2166 void 2167 make_eh_edges (gimple stmt) 2168 { 2169 basic_block src, dst; 2170 eh_landing_pad lp; 2171 int lp_nr; 2172 2173 lp_nr = lookup_stmt_eh_lp (stmt); 2174 if (lp_nr <= 0) 2175 return; 2176 2177 lp = get_eh_landing_pad_from_number (lp_nr); 2178 gcc_assert (lp != NULL); 2179 2180 src = gimple_bb (stmt); 2181 dst = label_to_block (lp->post_landing_pad); 2182 make_edge (src, dst, EDGE_EH); 2183 } 2184 2185 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree; 2186 do not actually perform the final edge redirection. 2187 2188 CHANGE_REGION is true when we're being called from cleanup_empty_eh and 2189 we intend to change the destination EH region as well; this means 2190 EH_LANDING_PAD_NR must already be set on the destination block label. 2191 If false, we're being called from generic cfg manipulation code and we 2192 should preserve our place within the region tree. */ 2193 2194 static void 2195 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region) 2196 { 2197 eh_landing_pad old_lp, new_lp; 2198 basic_block old_bb; 2199 gimple throw_stmt; 2200 int old_lp_nr, new_lp_nr; 2201 tree old_label, new_label; 2202 edge_iterator ei; 2203 edge e; 2204 2205 old_bb = edge_in->dest; 2206 old_label = gimple_block_label (old_bb); 2207 old_lp_nr = EH_LANDING_PAD_NR (old_label); 2208 gcc_assert (old_lp_nr > 0); 2209 old_lp = get_eh_landing_pad_from_number (old_lp_nr); 2210 2211 throw_stmt = last_stmt (edge_in->src); 2212 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr); 2213 2214 new_label = gimple_block_label (new_bb); 2215 2216 /* Look for an existing region that might be using NEW_BB already. */ 2217 new_lp_nr = EH_LANDING_PAD_NR (new_label); 2218 if (new_lp_nr) 2219 { 2220 new_lp = get_eh_landing_pad_from_number (new_lp_nr); 2221 gcc_assert (new_lp); 2222 2223 /* Unless CHANGE_REGION is true, the new and old landing pad 2224 had better be associated with the same EH region. */ 2225 gcc_assert (change_region || new_lp->region == old_lp->region); 2226 } 2227 else 2228 { 2229 new_lp = NULL; 2230 gcc_assert (!change_region); 2231 } 2232 2233 /* Notice when we redirect the last EH edge away from OLD_BB. */ 2234 FOR_EACH_EDGE (e, ei, old_bb->preds) 2235 if (e != edge_in && (e->flags & EDGE_EH)) 2236 break; 2237 2238 if (new_lp) 2239 { 2240 /* NEW_LP already exists. If there are still edges into OLD_LP, 2241 there's nothing to do with the EH tree. If there are no more 2242 edges into OLD_LP, then we want to remove OLD_LP as it is unused. 2243 If CHANGE_REGION is true, then our caller is expecting to remove 2244 the landing pad. */ 2245 if (e == NULL && !change_region) 2246 remove_eh_landing_pad (old_lp); 2247 } 2248 else 2249 { 2250 /* No correct landing pad exists. If there are no more edges 2251 into OLD_LP, then we can simply re-use the existing landing pad. 2252 Otherwise, we have to create a new landing pad. */ 2253 if (e == NULL) 2254 { 2255 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0; 2256 new_lp = old_lp; 2257 } 2258 else 2259 new_lp = gen_eh_landing_pad (old_lp->region); 2260 new_lp->post_landing_pad = new_label; 2261 EH_LANDING_PAD_NR (new_label) = new_lp->index; 2262 } 2263 2264 /* Maybe move the throwing statement to the new region. */ 2265 if (old_lp != new_lp) 2266 { 2267 remove_stmt_from_eh_lp (throw_stmt); 2268 add_stmt_to_eh_lp (throw_stmt, new_lp->index); 2269 } 2270 } 2271 2272 /* Redirect EH edge E to NEW_BB. */ 2273 2274 edge 2275 redirect_eh_edge (edge edge_in, basic_block new_bb) 2276 { 2277 redirect_eh_edge_1 (edge_in, new_bb, false); 2278 return ssa_redirect_edge (edge_in, new_bb); 2279 } 2280 2281 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the 2282 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB. 2283 The actual edge update will happen in the caller. */ 2284 2285 void 2286 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb) 2287 { 2288 tree new_lab = gimple_block_label (new_bb); 2289 bool any_changed = false; 2290 basic_block old_bb; 2291 eh_region r; 2292 eh_catch c; 2293 2294 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); 2295 switch (r->type) 2296 { 2297 case ERT_TRY: 2298 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 2299 { 2300 old_bb = label_to_block (c->label); 2301 if (old_bb == e->dest) 2302 { 2303 c->label = new_lab; 2304 any_changed = true; 2305 } 2306 } 2307 break; 2308 2309 case ERT_ALLOWED_EXCEPTIONS: 2310 old_bb = label_to_block (r->u.allowed.label); 2311 gcc_assert (old_bb == e->dest); 2312 r->u.allowed.label = new_lab; 2313 any_changed = true; 2314 break; 2315 2316 default: 2317 gcc_unreachable (); 2318 } 2319 2320 gcc_assert (any_changed); 2321 } 2322 2323 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */ 2324 2325 bool 2326 operation_could_trap_helper_p (enum tree_code op, 2327 bool fp_operation, 2328 bool honor_trapv, 2329 bool honor_nans, 2330 bool honor_snans, 2331 tree divisor, 2332 bool *handled) 2333 { 2334 *handled = true; 2335 switch (op) 2336 { 2337 case TRUNC_DIV_EXPR: 2338 case CEIL_DIV_EXPR: 2339 case FLOOR_DIV_EXPR: 2340 case ROUND_DIV_EXPR: 2341 case EXACT_DIV_EXPR: 2342 case CEIL_MOD_EXPR: 2343 case FLOOR_MOD_EXPR: 2344 case ROUND_MOD_EXPR: 2345 case TRUNC_MOD_EXPR: 2346 case RDIV_EXPR: 2347 if (honor_snans || honor_trapv) 2348 return true; 2349 if (fp_operation) 2350 return flag_trapping_math; 2351 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor)) 2352 return true; 2353 return false; 2354 2355 case LT_EXPR: 2356 case LE_EXPR: 2357 case GT_EXPR: 2358 case GE_EXPR: 2359 case LTGT_EXPR: 2360 /* Some floating point comparisons may trap. */ 2361 return honor_nans; 2362 2363 case EQ_EXPR: 2364 case NE_EXPR: 2365 case UNORDERED_EXPR: 2366 case ORDERED_EXPR: 2367 case UNLT_EXPR: 2368 case UNLE_EXPR: 2369 case UNGT_EXPR: 2370 case UNGE_EXPR: 2371 case UNEQ_EXPR: 2372 return honor_snans; 2373 2374 case CONVERT_EXPR: 2375 case FIX_TRUNC_EXPR: 2376 /* Conversion of floating point might trap. */ 2377 return honor_nans; 2378 2379 case NEGATE_EXPR: 2380 case ABS_EXPR: 2381 case CONJ_EXPR: 2382 /* These operations don't trap with floating point. */ 2383 if (honor_trapv) 2384 return true; 2385 return false; 2386 2387 case PLUS_EXPR: 2388 case MINUS_EXPR: 2389 case MULT_EXPR: 2390 /* Any floating arithmetic may trap. */ 2391 if (fp_operation && flag_trapping_math) 2392 return true; 2393 if (honor_trapv) 2394 return true; 2395 return false; 2396 2397 case COMPLEX_EXPR: 2398 case CONSTRUCTOR: 2399 /* Constructing an object cannot trap. */ 2400 return false; 2401 2402 default: 2403 /* Any floating arithmetic may trap. */ 2404 if (fp_operation && flag_trapping_math) 2405 return true; 2406 2407 *handled = false; 2408 return false; 2409 } 2410 } 2411 2412 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied 2413 on floating-point values. HONOR_TRAPV is true if OP is applied on integer 2414 type operands that may trap. If OP is a division operator, DIVISOR contains 2415 the value of the divisor. */ 2416 2417 bool 2418 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv, 2419 tree divisor) 2420 { 2421 bool honor_nans = (fp_operation && flag_trapping_math 2422 && !flag_finite_math_only); 2423 bool honor_snans = fp_operation && flag_signaling_nans != 0; 2424 bool handled; 2425 2426 if (TREE_CODE_CLASS (op) != tcc_comparison 2427 && TREE_CODE_CLASS (op) != tcc_unary 2428 && TREE_CODE_CLASS (op) != tcc_binary) 2429 return false; 2430 2431 return operation_could_trap_helper_p (op, fp_operation, honor_trapv, 2432 honor_nans, honor_snans, divisor, 2433 &handled); 2434 } 2435 2436 /* Return true if EXPR can trap, as in dereferencing an invalid pointer 2437 location or floating point arithmetic. C.f. the rtl version, may_trap_p. 2438 This routine expects only GIMPLE lhs or rhs input. */ 2439 2440 bool 2441 tree_could_trap_p (tree expr) 2442 { 2443 enum tree_code code; 2444 bool fp_operation = false; 2445 bool honor_trapv = false; 2446 tree t, base, div = NULL_TREE; 2447 2448 if (!expr) 2449 return false; 2450 2451 code = TREE_CODE (expr); 2452 t = TREE_TYPE (expr); 2453 2454 if (t) 2455 { 2456 if (COMPARISON_CLASS_P (expr)) 2457 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0))); 2458 else 2459 fp_operation = FLOAT_TYPE_P (t); 2460 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t); 2461 } 2462 2463 if (TREE_CODE_CLASS (code) == tcc_binary) 2464 div = TREE_OPERAND (expr, 1); 2465 if (operation_could_trap_p (code, fp_operation, honor_trapv, div)) 2466 return true; 2467 2468 restart: 2469 switch (code) 2470 { 2471 case TARGET_MEM_REF: 2472 if (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR 2473 && !TMR_INDEX (expr) && !TMR_INDEX2 (expr)) 2474 return false; 2475 return !TREE_THIS_NOTRAP (expr); 2476 2477 case COMPONENT_REF: 2478 case REALPART_EXPR: 2479 case IMAGPART_EXPR: 2480 case BIT_FIELD_REF: 2481 case VIEW_CONVERT_EXPR: 2482 case WITH_SIZE_EXPR: 2483 expr = TREE_OPERAND (expr, 0); 2484 code = TREE_CODE (expr); 2485 goto restart; 2486 2487 case ARRAY_RANGE_REF: 2488 base = TREE_OPERAND (expr, 0); 2489 if (tree_could_trap_p (base)) 2490 return true; 2491 if (TREE_THIS_NOTRAP (expr)) 2492 return false; 2493 return !range_in_array_bounds_p (expr); 2494 2495 case ARRAY_REF: 2496 base = TREE_OPERAND (expr, 0); 2497 if (tree_could_trap_p (base)) 2498 return true; 2499 if (TREE_THIS_NOTRAP (expr)) 2500 return false; 2501 return !in_array_bounds_p (expr); 2502 2503 case MEM_REF: 2504 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR) 2505 return false; 2506 /* Fallthru. */ 2507 case INDIRECT_REF: 2508 return !TREE_THIS_NOTRAP (expr); 2509 2510 case ASM_EXPR: 2511 return TREE_THIS_VOLATILE (expr); 2512 2513 case CALL_EXPR: 2514 t = get_callee_fndecl (expr); 2515 /* Assume that calls to weak functions may trap. */ 2516 if (!t || !DECL_P (t)) 2517 return true; 2518 if (DECL_WEAK (t)) 2519 return tree_could_trap_p (t); 2520 return false; 2521 2522 case FUNCTION_DECL: 2523 /* Assume that accesses to weak functions may trap, unless we know 2524 they are certainly defined in current TU or in some other 2525 LTO partition. */ 2526 if (DECL_WEAK (expr)) 2527 { 2528 struct cgraph_node *node; 2529 if (!DECL_EXTERNAL (expr)) 2530 return false; 2531 node = cgraph_function_node (cgraph_get_node (expr), NULL); 2532 if (node && node->in_other_partition) 2533 return false; 2534 return true; 2535 } 2536 return false; 2537 2538 case VAR_DECL: 2539 /* Assume that accesses to weak vars may trap, unless we know 2540 they are certainly defined in current TU or in some other 2541 LTO partition. */ 2542 if (DECL_WEAK (expr)) 2543 { 2544 struct varpool_node *node; 2545 if (!DECL_EXTERNAL (expr)) 2546 return false; 2547 node = varpool_variable_node (varpool_get_node (expr), NULL); 2548 if (node && node->in_other_partition) 2549 return false; 2550 return true; 2551 } 2552 return false; 2553 2554 default: 2555 return false; 2556 } 2557 } 2558 2559 2560 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a 2561 an assignment or a conditional) may throw. */ 2562 2563 static bool 2564 stmt_could_throw_1_p (gimple stmt) 2565 { 2566 enum tree_code code = gimple_expr_code (stmt); 2567 bool honor_nans = false; 2568 bool honor_snans = false; 2569 bool fp_operation = false; 2570 bool honor_trapv = false; 2571 tree t; 2572 size_t i; 2573 bool handled, ret; 2574 2575 if (TREE_CODE_CLASS (code) == tcc_comparison 2576 || TREE_CODE_CLASS (code) == tcc_unary 2577 || TREE_CODE_CLASS (code) == tcc_binary) 2578 { 2579 if (is_gimple_assign (stmt) 2580 && TREE_CODE_CLASS (code) == tcc_comparison) 2581 t = TREE_TYPE (gimple_assign_rhs1 (stmt)); 2582 else if (gimple_code (stmt) == GIMPLE_COND) 2583 t = TREE_TYPE (gimple_cond_lhs (stmt)); 2584 else 2585 t = gimple_expr_type (stmt); 2586 fp_operation = FLOAT_TYPE_P (t); 2587 if (fp_operation) 2588 { 2589 honor_nans = flag_trapping_math && !flag_finite_math_only; 2590 honor_snans = flag_signaling_nans != 0; 2591 } 2592 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t)) 2593 honor_trapv = true; 2594 } 2595 2596 /* Check if the main expression may trap. */ 2597 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL; 2598 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv, 2599 honor_nans, honor_snans, t, 2600 &handled); 2601 if (handled) 2602 return ret; 2603 2604 /* If the expression does not trap, see if any of the individual operands may 2605 trap. */ 2606 for (i = 0; i < gimple_num_ops (stmt); i++) 2607 if (tree_could_trap_p (gimple_op (stmt, i))) 2608 return true; 2609 2610 return false; 2611 } 2612 2613 2614 /* Return true if statement STMT could throw an exception. */ 2615 2616 bool 2617 stmt_could_throw_p (gimple stmt) 2618 { 2619 if (!flag_exceptions) 2620 return false; 2621 2622 /* The only statements that can throw an exception are assignments, 2623 conditionals, calls, resx, and asms. */ 2624 switch (gimple_code (stmt)) 2625 { 2626 case GIMPLE_RESX: 2627 return true; 2628 2629 case GIMPLE_CALL: 2630 return !gimple_call_nothrow_p (stmt); 2631 2632 case GIMPLE_ASSIGN: 2633 case GIMPLE_COND: 2634 if (!cfun->can_throw_non_call_exceptions) 2635 return false; 2636 return stmt_could_throw_1_p (stmt); 2637 2638 case GIMPLE_ASM: 2639 if (!cfun->can_throw_non_call_exceptions) 2640 return false; 2641 return gimple_asm_volatile_p (stmt); 2642 2643 default: 2644 return false; 2645 } 2646 } 2647 2648 2649 /* Return true if expression T could throw an exception. */ 2650 2651 bool 2652 tree_could_throw_p (tree t) 2653 { 2654 if (!flag_exceptions) 2655 return false; 2656 if (TREE_CODE (t) == MODIFY_EXPR) 2657 { 2658 if (cfun->can_throw_non_call_exceptions 2659 && tree_could_trap_p (TREE_OPERAND (t, 0))) 2660 return true; 2661 t = TREE_OPERAND (t, 1); 2662 } 2663 2664 if (TREE_CODE (t) == WITH_SIZE_EXPR) 2665 t = TREE_OPERAND (t, 0); 2666 if (TREE_CODE (t) == CALL_EXPR) 2667 return (call_expr_flags (t) & ECF_NOTHROW) == 0; 2668 if (cfun->can_throw_non_call_exceptions) 2669 return tree_could_trap_p (t); 2670 return false; 2671 } 2672 2673 /* Return true if STMT can throw an exception that is not caught within 2674 the current function (CFUN). */ 2675 2676 bool 2677 stmt_can_throw_external (gimple stmt) 2678 { 2679 int lp_nr; 2680 2681 if (!stmt_could_throw_p (stmt)) 2682 return false; 2683 2684 lp_nr = lookup_stmt_eh_lp (stmt); 2685 return lp_nr == 0; 2686 } 2687 2688 /* Return true if STMT can throw an exception that is caught within 2689 the current function (CFUN). */ 2690 2691 bool 2692 stmt_can_throw_internal (gimple stmt) 2693 { 2694 int lp_nr; 2695 2696 if (!stmt_could_throw_p (stmt)) 2697 return false; 2698 2699 lp_nr = lookup_stmt_eh_lp (stmt); 2700 return lp_nr > 0; 2701 } 2702 2703 /* Given a statement STMT in IFUN, if STMT can no longer throw, then 2704 remove any entry it might have from the EH table. Return true if 2705 any change was made. */ 2706 2707 bool 2708 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt) 2709 { 2710 if (stmt_could_throw_p (stmt)) 2711 return false; 2712 return remove_stmt_from_eh_lp_fn (ifun, stmt); 2713 } 2714 2715 /* Likewise, but always use the current function. */ 2716 2717 bool 2718 maybe_clean_eh_stmt (gimple stmt) 2719 { 2720 return maybe_clean_eh_stmt_fn (cfun, stmt); 2721 } 2722 2723 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced 2724 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT 2725 in the table if it should be in there. Return TRUE if a replacement was 2726 done that my require an EH edge purge. */ 2727 2728 bool 2729 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt) 2730 { 2731 int lp_nr = lookup_stmt_eh_lp (old_stmt); 2732 2733 if (lp_nr != 0) 2734 { 2735 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt); 2736 2737 if (new_stmt == old_stmt && new_stmt_could_throw) 2738 return false; 2739 2740 remove_stmt_from_eh_lp (old_stmt); 2741 if (new_stmt_could_throw) 2742 { 2743 add_stmt_to_eh_lp (new_stmt, lp_nr); 2744 return false; 2745 } 2746 else 2747 return true; 2748 } 2749 2750 return false; 2751 } 2752 2753 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT 2754 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP 2755 operand is the return value of duplicate_eh_regions. */ 2756 2757 bool 2758 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt, 2759 struct function *old_fun, gimple old_stmt, 2760 struct pointer_map_t *map, int default_lp_nr) 2761 { 2762 int old_lp_nr, new_lp_nr; 2763 void **slot; 2764 2765 if (!stmt_could_throw_p (new_stmt)) 2766 return false; 2767 2768 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt); 2769 if (old_lp_nr == 0) 2770 { 2771 if (default_lp_nr == 0) 2772 return false; 2773 new_lp_nr = default_lp_nr; 2774 } 2775 else if (old_lp_nr > 0) 2776 { 2777 eh_landing_pad old_lp, new_lp; 2778 2779 old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr); 2780 slot = pointer_map_contains (map, old_lp); 2781 new_lp = (eh_landing_pad) *slot; 2782 new_lp_nr = new_lp->index; 2783 } 2784 else 2785 { 2786 eh_region old_r, new_r; 2787 2788 old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr); 2789 slot = pointer_map_contains (map, old_r); 2790 new_r = (eh_region) *slot; 2791 new_lp_nr = -new_r->index; 2792 } 2793 2794 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr); 2795 return true; 2796 } 2797 2798 /* Similar, but both OLD_STMT and NEW_STMT are within the current function, 2799 and thus no remapping is required. */ 2800 2801 bool 2802 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt) 2803 { 2804 int lp_nr; 2805 2806 if (!stmt_could_throw_p (new_stmt)) 2807 return false; 2808 2809 lp_nr = lookup_stmt_eh_lp (old_stmt); 2810 if (lp_nr == 0) 2811 return false; 2812 2813 add_stmt_to_eh_lp (new_stmt, lp_nr); 2814 return true; 2815 } 2816 2817 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of 2818 GIMPLE_TRY) that are similar enough to be considered the same. Currently 2819 this only handles handlers consisting of a single call, as that's the 2820 important case for C++: a destructor call for a particular object showing 2821 up in multiple handlers. */ 2822 2823 static bool 2824 same_handler_p (gimple_seq oneh, gimple_seq twoh) 2825 { 2826 gimple_stmt_iterator gsi; 2827 gimple ones, twos; 2828 unsigned int ai; 2829 2830 gsi = gsi_start (oneh); 2831 if (!gsi_one_before_end_p (gsi)) 2832 return false; 2833 ones = gsi_stmt (gsi); 2834 2835 gsi = gsi_start (twoh); 2836 if (!gsi_one_before_end_p (gsi)) 2837 return false; 2838 twos = gsi_stmt (gsi); 2839 2840 if (!is_gimple_call (ones) 2841 || !is_gimple_call (twos) 2842 || gimple_call_lhs (ones) 2843 || gimple_call_lhs (twos) 2844 || gimple_call_chain (ones) 2845 || gimple_call_chain (twos) 2846 || !gimple_call_same_target_p (ones, twos) 2847 || gimple_call_num_args (ones) != gimple_call_num_args (twos)) 2848 return false; 2849 2850 for (ai = 0; ai < gimple_call_num_args (ones); ++ai) 2851 if (!operand_equal_p (gimple_call_arg (ones, ai), 2852 gimple_call_arg (twos, ai), 0)) 2853 return false; 2854 2855 return true; 2856 } 2857 2858 /* Optimize 2859 try { A() } finally { try { ~B() } catch { ~A() } } 2860 try { ... } finally { ~A() } 2861 into 2862 try { A() } catch { ~B() } 2863 try { ~B() ... } finally { ~A() } 2864 2865 This occurs frequently in C++, where A is a local variable and B is a 2866 temporary used in the initializer for A. */ 2867 2868 static void 2869 optimize_double_finally (gimple one, gimple two) 2870 { 2871 gimple oneh; 2872 gimple_stmt_iterator gsi; 2873 2874 gsi = gsi_start (gimple_try_cleanup (one)); 2875 if (!gsi_one_before_end_p (gsi)) 2876 return; 2877 2878 oneh = gsi_stmt (gsi); 2879 if (gimple_code (oneh) != GIMPLE_TRY 2880 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH) 2881 return; 2882 2883 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two))) 2884 { 2885 gimple_seq seq = gimple_try_eval (oneh); 2886 2887 gimple_try_set_cleanup (one, seq); 2888 gimple_try_set_kind (one, GIMPLE_TRY_CATCH); 2889 seq = copy_gimple_seq_and_replace_locals (seq); 2890 gimple_seq_add_seq (&seq, gimple_try_eval (two)); 2891 gimple_try_set_eval (two, seq); 2892 } 2893 } 2894 2895 /* Perform EH refactoring optimizations that are simpler to do when code 2896 flow has been lowered but EH structures haven't. */ 2897 2898 static void 2899 refactor_eh_r (gimple_seq seq) 2900 { 2901 gimple_stmt_iterator gsi; 2902 gimple one, two; 2903 2904 one = NULL; 2905 two = NULL; 2906 gsi = gsi_start (seq); 2907 while (1) 2908 { 2909 one = two; 2910 if (gsi_end_p (gsi)) 2911 two = NULL; 2912 else 2913 two = gsi_stmt (gsi); 2914 if (one 2915 && two 2916 && gimple_code (one) == GIMPLE_TRY 2917 && gimple_code (two) == GIMPLE_TRY 2918 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY 2919 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY) 2920 optimize_double_finally (one, two); 2921 if (one) 2922 switch (gimple_code (one)) 2923 { 2924 case GIMPLE_TRY: 2925 refactor_eh_r (gimple_try_eval (one)); 2926 refactor_eh_r (gimple_try_cleanup (one)); 2927 break; 2928 case GIMPLE_CATCH: 2929 refactor_eh_r (gimple_catch_handler (one)); 2930 break; 2931 case GIMPLE_EH_FILTER: 2932 refactor_eh_r (gimple_eh_filter_failure (one)); 2933 break; 2934 case GIMPLE_EH_ELSE: 2935 refactor_eh_r (gimple_eh_else_n_body (one)); 2936 refactor_eh_r (gimple_eh_else_e_body (one)); 2937 break; 2938 default: 2939 break; 2940 } 2941 if (two) 2942 gsi_next (&gsi); 2943 else 2944 break; 2945 } 2946 } 2947 2948 static unsigned 2949 refactor_eh (void) 2950 { 2951 refactor_eh_r (gimple_body (current_function_decl)); 2952 return 0; 2953 } 2954 2955 static bool 2956 gate_refactor_eh (void) 2957 { 2958 return flag_exceptions != 0; 2959 } 2960 2961 struct gimple_opt_pass pass_refactor_eh = 2962 { 2963 { 2964 GIMPLE_PASS, 2965 "ehopt", /* name */ 2966 gate_refactor_eh, /* gate */ 2967 refactor_eh, /* execute */ 2968 NULL, /* sub */ 2969 NULL, /* next */ 2970 0, /* static_pass_number */ 2971 TV_TREE_EH, /* tv_id */ 2972 PROP_gimple_lcf, /* properties_required */ 2973 0, /* properties_provided */ 2974 0, /* properties_destroyed */ 2975 0, /* todo_flags_start */ 2976 0 /* todo_flags_finish */ 2977 } 2978 }; 2979 2980 /* At the end of gimple optimization, we can lower RESX. */ 2981 2982 static bool 2983 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map) 2984 { 2985 int lp_nr; 2986 eh_region src_r, dst_r; 2987 gimple_stmt_iterator gsi; 2988 gimple x; 2989 tree fn, src_nr; 2990 bool ret = false; 2991 2992 lp_nr = lookup_stmt_eh_lp (stmt); 2993 if (lp_nr != 0) 2994 dst_r = get_eh_region_from_lp_number (lp_nr); 2995 else 2996 dst_r = NULL; 2997 2998 src_r = get_eh_region_from_number (gimple_resx_region (stmt)); 2999 gsi = gsi_last_bb (bb); 3000 3001 if (src_r == NULL) 3002 { 3003 /* We can wind up with no source region when pass_cleanup_eh shows 3004 that there are no entries into an eh region and deletes it, but 3005 then the block that contains the resx isn't removed. This can 3006 happen without optimization when the switch statement created by 3007 lower_try_finally_switch isn't simplified to remove the eh case. 3008 3009 Resolve this by expanding the resx node to an abort. */ 3010 3011 fn = builtin_decl_implicit (BUILT_IN_TRAP); 3012 x = gimple_build_call (fn, 0); 3013 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3014 3015 while (EDGE_COUNT (bb->succs) > 0) 3016 remove_edge (EDGE_SUCC (bb, 0)); 3017 } 3018 else if (dst_r) 3019 { 3020 /* When we have a destination region, we resolve this by copying 3021 the excptr and filter values into place, and changing the edge 3022 to immediately after the landing pad. */ 3023 edge e; 3024 3025 if (lp_nr < 0) 3026 { 3027 basic_block new_bb; 3028 void **slot; 3029 tree lab; 3030 3031 /* We are resuming into a MUST_NOT_CALL region. Expand a call to 3032 the failure decl into a new block, if needed. */ 3033 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW); 3034 3035 slot = pointer_map_contains (mnt_map, dst_r); 3036 if (slot == NULL) 3037 { 3038 gimple_stmt_iterator gsi2; 3039 3040 new_bb = create_empty_bb (bb); 3041 lab = gimple_block_label (new_bb); 3042 gsi2 = gsi_start_bb (new_bb); 3043 3044 fn = dst_r->u.must_not_throw.failure_decl; 3045 x = gimple_build_call (fn, 0); 3046 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc); 3047 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING); 3048 3049 slot = pointer_map_insert (mnt_map, dst_r); 3050 *slot = lab; 3051 } 3052 else 3053 { 3054 lab = (tree) *slot; 3055 new_bb = label_to_block (lab); 3056 } 3057 3058 gcc_assert (EDGE_COUNT (bb->succs) == 0); 3059 e = make_edge (bb, new_bb, EDGE_FALLTHRU); 3060 e->count = bb->count; 3061 e->probability = REG_BR_PROB_BASE; 3062 } 3063 else 3064 { 3065 edge_iterator ei; 3066 tree dst_nr = build_int_cst (integer_type_node, dst_r->index); 3067 3068 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES); 3069 src_nr = build_int_cst (integer_type_node, src_r->index); 3070 x = gimple_build_call (fn, 2, dst_nr, src_nr); 3071 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3072 3073 /* Update the flags for the outgoing edge. */ 3074 e = single_succ_edge (bb); 3075 gcc_assert (e->flags & EDGE_EH); 3076 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU; 3077 3078 /* If there are no more EH users of the landing pad, delete it. */ 3079 FOR_EACH_EDGE (e, ei, e->dest->preds) 3080 if (e->flags & EDGE_EH) 3081 break; 3082 if (e == NULL) 3083 { 3084 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr); 3085 remove_eh_landing_pad (lp); 3086 } 3087 } 3088 3089 ret = true; 3090 } 3091 else 3092 { 3093 tree var; 3094 3095 /* When we don't have a destination region, this exception escapes 3096 up the call chain. We resolve this by generating a call to the 3097 _Unwind_Resume library function. */ 3098 3099 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup 3100 with no arguments for C++ and Java. Check for that. */ 3101 if (src_r->use_cxa_end_cleanup) 3102 { 3103 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP); 3104 x = gimple_build_call (fn, 0); 3105 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3106 } 3107 else 3108 { 3109 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER); 3110 src_nr = build_int_cst (integer_type_node, src_r->index); 3111 x = gimple_build_call (fn, 1, src_nr); 3112 var = create_tmp_var (ptr_type_node, NULL); 3113 var = make_ssa_name (var, x); 3114 gimple_call_set_lhs (x, var); 3115 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3116 3117 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME); 3118 x = gimple_build_call (fn, 1, var); 3119 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3120 } 3121 3122 gcc_assert (EDGE_COUNT (bb->succs) == 0); 3123 } 3124 3125 gsi_remove (&gsi, true); 3126 3127 return ret; 3128 } 3129 3130 static unsigned 3131 execute_lower_resx (void) 3132 { 3133 basic_block bb; 3134 struct pointer_map_t *mnt_map; 3135 bool dominance_invalidated = false; 3136 bool any_rewritten = false; 3137 3138 mnt_map = pointer_map_create (); 3139 3140 FOR_EACH_BB (bb) 3141 { 3142 gimple last = last_stmt (bb); 3143 if (last && is_gimple_resx (last)) 3144 { 3145 dominance_invalidated |= lower_resx (bb, last, mnt_map); 3146 any_rewritten = true; 3147 } 3148 } 3149 3150 pointer_map_destroy (mnt_map); 3151 3152 if (dominance_invalidated) 3153 { 3154 free_dominance_info (CDI_DOMINATORS); 3155 free_dominance_info (CDI_POST_DOMINATORS); 3156 } 3157 3158 return any_rewritten ? TODO_update_ssa_only_virtuals : 0; 3159 } 3160 3161 static bool 3162 gate_lower_resx (void) 3163 { 3164 return flag_exceptions != 0; 3165 } 3166 3167 struct gimple_opt_pass pass_lower_resx = 3168 { 3169 { 3170 GIMPLE_PASS, 3171 "resx", /* name */ 3172 gate_lower_resx, /* gate */ 3173 execute_lower_resx, /* execute */ 3174 NULL, /* sub */ 3175 NULL, /* next */ 3176 0, /* static_pass_number */ 3177 TV_TREE_EH, /* tv_id */ 3178 PROP_gimple_lcf, /* properties_required */ 3179 0, /* properties_provided */ 3180 0, /* properties_destroyed */ 3181 0, /* todo_flags_start */ 3182 TODO_verify_flow /* todo_flags_finish */ 3183 } 3184 }; 3185 3186 /* Try to optimize var = {v} {CLOBBER} stmts followed just by 3187 external throw. */ 3188 3189 static void 3190 optimize_clobbers (basic_block bb) 3191 { 3192 gimple_stmt_iterator gsi = gsi_last_bb (bb); 3193 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) 3194 { 3195 gimple stmt = gsi_stmt (gsi); 3196 if (is_gimple_debug (stmt)) 3197 continue; 3198 if (!gimple_clobber_p (stmt) 3199 || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) 3200 return; 3201 unlink_stmt_vdef (stmt); 3202 gsi_remove (&gsi, true); 3203 release_defs (stmt); 3204 } 3205 } 3206 3207 /* Try to sink var = {v} {CLOBBER} stmts followed just by 3208 internal throw to successor BB. */ 3209 3210 static int 3211 sink_clobbers (basic_block bb) 3212 { 3213 edge e; 3214 edge_iterator ei; 3215 gimple_stmt_iterator gsi, dgsi; 3216 basic_block succbb; 3217 bool any_clobbers = false; 3218 3219 /* Only optimize if BB has a single EH successor and 3220 all predecessor edges are EH too. */ 3221 if (!single_succ_p (bb) 3222 || (single_succ_edge (bb)->flags & EDGE_EH) == 0) 3223 return 0; 3224 3225 FOR_EACH_EDGE (e, ei, bb->preds) 3226 { 3227 if ((e->flags & EDGE_EH) == 0) 3228 return 0; 3229 } 3230 3231 /* And BB contains only CLOBBER stmts before the final 3232 RESX. */ 3233 gsi = gsi_last_bb (bb); 3234 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) 3235 { 3236 gimple stmt = gsi_stmt (gsi); 3237 if (is_gimple_debug (stmt)) 3238 continue; 3239 if (gimple_code (stmt) == GIMPLE_LABEL) 3240 break; 3241 if (!gimple_clobber_p (stmt) 3242 || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) 3243 return 0; 3244 any_clobbers = true; 3245 } 3246 if (!any_clobbers) 3247 return 0; 3248 3249 succbb = single_succ (bb); 3250 dgsi = gsi_after_labels (succbb); 3251 gsi = gsi_last_bb (bb); 3252 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) 3253 { 3254 gimple stmt = gsi_stmt (gsi); 3255 tree vdef; 3256 if (is_gimple_debug (stmt)) 3257 continue; 3258 if (gimple_code (stmt) == GIMPLE_LABEL) 3259 break; 3260 unlink_stmt_vdef (stmt); 3261 gsi_remove (&gsi, false); 3262 vdef = gimple_vdef (stmt); 3263 if (vdef && TREE_CODE (vdef) == SSA_NAME) 3264 { 3265 vdef = SSA_NAME_VAR (vdef); 3266 mark_sym_for_renaming (vdef); 3267 gimple_set_vdef (stmt, vdef); 3268 gimple_set_vuse (stmt, vdef); 3269 } 3270 release_defs (stmt); 3271 gsi_insert_before (&dgsi, stmt, GSI_SAME_STMT); 3272 } 3273 3274 return TODO_update_ssa_only_virtuals; 3275 } 3276 3277 /* At the end of inlining, we can lower EH_DISPATCH. Return true when 3278 we have found some duplicate labels and removed some edges. */ 3279 3280 static bool 3281 lower_eh_dispatch (basic_block src, gimple stmt) 3282 { 3283 gimple_stmt_iterator gsi; 3284 int region_nr; 3285 eh_region r; 3286 tree filter, fn; 3287 gimple x; 3288 bool redirected = false; 3289 3290 region_nr = gimple_eh_dispatch_region (stmt); 3291 r = get_eh_region_from_number (region_nr); 3292 3293 gsi = gsi_last_bb (src); 3294 3295 switch (r->type) 3296 { 3297 case ERT_TRY: 3298 { 3299 VEC (tree, heap) *labels = NULL; 3300 tree default_label = NULL; 3301 eh_catch c; 3302 edge_iterator ei; 3303 edge e; 3304 struct pointer_set_t *seen_values = pointer_set_create (); 3305 3306 /* Collect the labels for a switch. Zero the post_landing_pad 3307 field becase we'll no longer have anything keeping these labels 3308 in existance and the optimizer will be free to merge these 3309 blocks at will. */ 3310 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 3311 { 3312 tree tp_node, flt_node, lab = c->label; 3313 bool have_label = false; 3314 3315 c->label = NULL; 3316 tp_node = c->type_list; 3317 flt_node = c->filter_list; 3318 3319 if (tp_node == NULL) 3320 { 3321 default_label = lab; 3322 break; 3323 } 3324 do 3325 { 3326 /* Filter out duplicate labels that arise when this handler 3327 is shadowed by an earlier one. When no labels are 3328 attached to the handler anymore, we remove 3329 the corresponding edge and then we delete unreachable 3330 blocks at the end of this pass. */ 3331 if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node))) 3332 { 3333 tree t = build_case_label (TREE_VALUE (flt_node), 3334 NULL, lab); 3335 VEC_safe_push (tree, heap, labels, t); 3336 pointer_set_insert (seen_values, TREE_VALUE (flt_node)); 3337 have_label = true; 3338 } 3339 3340 tp_node = TREE_CHAIN (tp_node); 3341 flt_node = TREE_CHAIN (flt_node); 3342 } 3343 while (tp_node); 3344 if (! have_label) 3345 { 3346 remove_edge (find_edge (src, label_to_block (lab))); 3347 redirected = true; 3348 } 3349 } 3350 3351 /* Clean up the edge flags. */ 3352 FOR_EACH_EDGE (e, ei, src->succs) 3353 { 3354 if (e->flags & EDGE_FALLTHRU) 3355 { 3356 /* If there was no catch-all, use the fallthru edge. */ 3357 if (default_label == NULL) 3358 default_label = gimple_block_label (e->dest); 3359 e->flags &= ~EDGE_FALLTHRU; 3360 } 3361 } 3362 gcc_assert (default_label != NULL); 3363 3364 /* Don't generate a switch if there's only a default case. 3365 This is common in the form of try { A; } catch (...) { B; }. */ 3366 if (labels == NULL) 3367 { 3368 e = single_succ_edge (src); 3369 e->flags |= EDGE_FALLTHRU; 3370 } 3371 else 3372 { 3373 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER); 3374 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, 3375 region_nr)); 3376 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL); 3377 filter = make_ssa_name (filter, x); 3378 gimple_call_set_lhs (x, filter); 3379 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3380 3381 /* Turn the default label into a default case. */ 3382 default_label = build_case_label (NULL, NULL, default_label); 3383 sort_case_labels (labels); 3384 3385 x = gimple_build_switch_vec (filter, default_label, labels); 3386 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3387 3388 VEC_free (tree, heap, labels); 3389 } 3390 pointer_set_destroy (seen_values); 3391 } 3392 break; 3393 3394 case ERT_ALLOWED_EXCEPTIONS: 3395 { 3396 edge b_e = BRANCH_EDGE (src); 3397 edge f_e = FALLTHRU_EDGE (src); 3398 3399 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER); 3400 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, 3401 region_nr)); 3402 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL); 3403 filter = make_ssa_name (filter, x); 3404 gimple_call_set_lhs (x, filter); 3405 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3406 3407 r->u.allowed.label = NULL; 3408 x = gimple_build_cond (EQ_EXPR, filter, 3409 build_int_cst (TREE_TYPE (filter), 3410 r->u.allowed.filter), 3411 NULL_TREE, NULL_TREE); 3412 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3413 3414 b_e->flags = b_e->flags | EDGE_TRUE_VALUE; 3415 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE; 3416 } 3417 break; 3418 3419 default: 3420 gcc_unreachable (); 3421 } 3422 3423 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */ 3424 gsi_remove (&gsi, true); 3425 return redirected; 3426 } 3427 3428 static unsigned 3429 execute_lower_eh_dispatch (void) 3430 { 3431 basic_block bb; 3432 int flags = 0; 3433 bool redirected = false; 3434 3435 assign_filter_values (); 3436 3437 FOR_EACH_BB (bb) 3438 { 3439 gimple last = last_stmt (bb); 3440 if (last == NULL) 3441 continue; 3442 if (gimple_code (last) == GIMPLE_EH_DISPATCH) 3443 { 3444 redirected |= lower_eh_dispatch (bb, last); 3445 flags |= TODO_update_ssa_only_virtuals; 3446 } 3447 else if (gimple_code (last) == GIMPLE_RESX) 3448 { 3449 if (stmt_can_throw_external (last)) 3450 optimize_clobbers (bb); 3451 else 3452 flags |= sink_clobbers (bb); 3453 } 3454 } 3455 3456 if (redirected) 3457 delete_unreachable_blocks (); 3458 return flags; 3459 } 3460 3461 static bool 3462 gate_lower_eh_dispatch (void) 3463 { 3464 return cfun->eh->region_tree != NULL; 3465 } 3466 3467 struct gimple_opt_pass pass_lower_eh_dispatch = 3468 { 3469 { 3470 GIMPLE_PASS, 3471 "ehdisp", /* name */ 3472 gate_lower_eh_dispatch, /* gate */ 3473 execute_lower_eh_dispatch, /* execute */ 3474 NULL, /* sub */ 3475 NULL, /* next */ 3476 0, /* static_pass_number */ 3477 TV_TREE_EH, /* tv_id */ 3478 PROP_gimple_lcf, /* properties_required */ 3479 0, /* properties_provided */ 3480 0, /* properties_destroyed */ 3481 0, /* todo_flags_start */ 3482 TODO_verify_flow /* todo_flags_finish */ 3483 } 3484 }; 3485 3486 /* Walk statements, see what regions are really referenced and remove 3487 those that are unused. */ 3488 3489 static void 3490 remove_unreachable_handlers (void) 3491 { 3492 sbitmap r_reachable, lp_reachable; 3493 eh_region region; 3494 eh_landing_pad lp; 3495 basic_block bb; 3496 int lp_nr, r_nr; 3497 3498 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array)); 3499 lp_reachable 3500 = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array)); 3501 sbitmap_zero (r_reachable); 3502 sbitmap_zero (lp_reachable); 3503 3504 FOR_EACH_BB (bb) 3505 { 3506 gimple_stmt_iterator gsi; 3507 3508 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 3509 { 3510 gimple stmt = gsi_stmt (gsi); 3511 lp_nr = lookup_stmt_eh_lp (stmt); 3512 3513 /* Negative LP numbers are MUST_NOT_THROW regions which 3514 are not considered BB enders. */ 3515 if (lp_nr < 0) 3516 SET_BIT (r_reachable, -lp_nr); 3517 3518 /* Positive LP numbers are real landing pads, are are BB enders. */ 3519 else if (lp_nr > 0) 3520 { 3521 gcc_assert (gsi_one_before_end_p (gsi)); 3522 region = get_eh_region_from_lp_number (lp_nr); 3523 SET_BIT (r_reachable, region->index); 3524 SET_BIT (lp_reachable, lp_nr); 3525 } 3526 3527 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */ 3528 switch (gimple_code (stmt)) 3529 { 3530 case GIMPLE_RESX: 3531 SET_BIT (r_reachable, gimple_resx_region (stmt)); 3532 break; 3533 case GIMPLE_EH_DISPATCH: 3534 SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt)); 3535 break; 3536 default: 3537 break; 3538 } 3539 } 3540 } 3541 3542 if (dump_file) 3543 { 3544 fprintf (dump_file, "Before removal of unreachable regions:\n"); 3545 dump_eh_tree (dump_file, cfun); 3546 fprintf (dump_file, "Reachable regions: "); 3547 dump_sbitmap_file (dump_file, r_reachable); 3548 fprintf (dump_file, "Reachable landing pads: "); 3549 dump_sbitmap_file (dump_file, lp_reachable); 3550 } 3551 3552 for (r_nr = 1; 3553 VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr) 3554 if (region && !TEST_BIT (r_reachable, r_nr)) 3555 { 3556 if (dump_file) 3557 fprintf (dump_file, "Removing unreachable region %d\n", r_nr); 3558 remove_eh_handler (region); 3559 } 3560 3561 for (lp_nr = 1; 3562 VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr) 3563 if (lp && !TEST_BIT (lp_reachable, lp_nr)) 3564 { 3565 if (dump_file) 3566 fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr); 3567 remove_eh_landing_pad (lp); 3568 } 3569 3570 if (dump_file) 3571 { 3572 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n"); 3573 dump_eh_tree (dump_file, cfun); 3574 fprintf (dump_file, "\n\n"); 3575 } 3576 3577 sbitmap_free (r_reachable); 3578 sbitmap_free (lp_reachable); 3579 3580 #ifdef ENABLE_CHECKING 3581 verify_eh_tree (cfun); 3582 #endif 3583 } 3584 3585 /* Remove unreachable handlers if any landing pads have been removed after 3586 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */ 3587 3588 void 3589 maybe_remove_unreachable_handlers (void) 3590 { 3591 eh_landing_pad lp; 3592 int i; 3593 3594 if (cfun->eh == NULL) 3595 return; 3596 3597 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i) 3598 if (lp && lp->post_landing_pad) 3599 { 3600 if (label_to_block (lp->post_landing_pad) == NULL) 3601 { 3602 remove_unreachable_handlers (); 3603 return; 3604 } 3605 } 3606 } 3607 3608 /* Remove regions that do not have landing pads. This assumes 3609 that remove_unreachable_handlers has already been run, and 3610 that we've just manipulated the landing pads since then. */ 3611 3612 static void 3613 remove_unreachable_handlers_no_lp (void) 3614 { 3615 eh_region r; 3616 int i; 3617 sbitmap r_reachable; 3618 basic_block bb; 3619 3620 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array)); 3621 sbitmap_zero (r_reachable); 3622 3623 FOR_EACH_BB (bb) 3624 { 3625 gimple stmt = last_stmt (bb); 3626 if (stmt) 3627 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */ 3628 switch (gimple_code (stmt)) 3629 { 3630 case GIMPLE_RESX: 3631 SET_BIT (r_reachable, gimple_resx_region (stmt)); 3632 break; 3633 case GIMPLE_EH_DISPATCH: 3634 SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt)); 3635 break; 3636 default: 3637 break; 3638 } 3639 } 3640 3641 for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i) 3642 if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW 3643 && !TEST_BIT (r_reachable, i)) 3644 { 3645 if (dump_file) 3646 fprintf (dump_file, "Removing unreachable region %d\n", i); 3647 remove_eh_handler (r); 3648 } 3649 3650 sbitmap_free (r_reachable); 3651 } 3652 3653 /* Undo critical edge splitting on an EH landing pad. Earlier, we 3654 optimisticaly split all sorts of edges, including EH edges. The 3655 optimization passes in between may not have needed them; if not, 3656 we should undo the split. 3657 3658 Recognize this case by having one EH edge incoming to the BB and 3659 one normal edge outgoing; BB should be empty apart from the 3660 post_landing_pad label. 3661 3662 Note that this is slightly different from the empty handler case 3663 handled by cleanup_empty_eh, in that the actual handler may yet 3664 have actual code but the landing pad has been separated from the 3665 handler. As such, cleanup_empty_eh relies on this transformation 3666 having been done first. */ 3667 3668 static bool 3669 unsplit_eh (eh_landing_pad lp) 3670 { 3671 basic_block bb = label_to_block (lp->post_landing_pad); 3672 gimple_stmt_iterator gsi; 3673 edge e_in, e_out; 3674 3675 /* Quickly check the edge counts on BB for singularity. */ 3676 if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1) 3677 return false; 3678 e_in = EDGE_PRED (bb, 0); 3679 e_out = EDGE_SUCC (bb, 0); 3680 3681 /* Input edge must be EH and output edge must be normal. */ 3682 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0) 3683 return false; 3684 3685 /* The block must be empty except for the labels and debug insns. */ 3686 gsi = gsi_after_labels (bb); 3687 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) 3688 gsi_next_nondebug (&gsi); 3689 if (!gsi_end_p (gsi)) 3690 return false; 3691 3692 /* The destination block must not already have a landing pad 3693 for a different region. */ 3694 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 3695 { 3696 gimple stmt = gsi_stmt (gsi); 3697 tree lab; 3698 int lp_nr; 3699 3700 if (gimple_code (stmt) != GIMPLE_LABEL) 3701 break; 3702 lab = gimple_label_label (stmt); 3703 lp_nr = EH_LANDING_PAD_NR (lab); 3704 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) 3705 return false; 3706 } 3707 3708 /* The new destination block must not already be a destination of 3709 the source block, lest we merge fallthru and eh edges and get 3710 all sorts of confused. */ 3711 if (find_edge (e_in->src, e_out->dest)) 3712 return false; 3713 3714 /* ??? We can get degenerate phis due to cfg cleanups. I would have 3715 thought this should have been cleaned up by a phicprop pass, but 3716 that doesn't appear to handle virtuals. Propagate by hand. */ 3717 if (!gimple_seq_empty_p (phi_nodes (bb))) 3718 { 3719 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); ) 3720 { 3721 gimple use_stmt, phi = gsi_stmt (gsi); 3722 tree lhs = gimple_phi_result (phi); 3723 tree rhs = gimple_phi_arg_def (phi, 0); 3724 use_operand_p use_p; 3725 imm_use_iterator iter; 3726 3727 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) 3728 { 3729 FOR_EACH_IMM_USE_ON_STMT (use_p, iter) 3730 SET_USE (use_p, rhs); 3731 } 3732 3733 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) 3734 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1; 3735 3736 remove_phi_node (&gsi, true); 3737 } 3738 } 3739 3740 if (dump_file && (dump_flags & TDF_DETAILS)) 3741 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n", 3742 lp->index, e_out->dest->index); 3743 3744 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving 3745 a successor edge, humor it. But do the real CFG change with the 3746 predecessor of E_OUT in order to preserve the ordering of arguments 3747 to the PHI nodes in E_OUT->DEST. */ 3748 redirect_eh_edge_1 (e_in, e_out->dest, false); 3749 redirect_edge_pred (e_out, e_in->src); 3750 e_out->flags = e_in->flags; 3751 e_out->probability = e_in->probability; 3752 e_out->count = e_in->count; 3753 remove_edge (e_in); 3754 3755 return true; 3756 } 3757 3758 /* Examine each landing pad block and see if it matches unsplit_eh. */ 3759 3760 static bool 3761 unsplit_all_eh (void) 3762 { 3763 bool changed = false; 3764 eh_landing_pad lp; 3765 int i; 3766 3767 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i) 3768 if (lp) 3769 changed |= unsplit_eh (lp); 3770 3771 return changed; 3772 } 3773 3774 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming 3775 to OLD_BB to NEW_BB; return true on success, false on failure. 3776 3777 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any 3778 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT. 3779 Virtual PHIs may be deleted and marked for renaming. */ 3780 3781 static bool 3782 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb, 3783 edge old_bb_out, bool change_region) 3784 { 3785 gimple_stmt_iterator ngsi, ogsi; 3786 edge_iterator ei; 3787 edge e; 3788 bitmap rename_virts; 3789 bitmap ophi_handled; 3790 3791 /* The destination block must not be a regular successor for any 3792 of the preds of the landing pad. Thus, avoid turning 3793 <..> 3794 | \ EH 3795 | <..> 3796 | / 3797 <..> 3798 into 3799 <..> 3800 | | EH 3801 <..> 3802 which CFG verification would choke on. See PR45172 and PR51089. */ 3803 FOR_EACH_EDGE (e, ei, old_bb->preds) 3804 if (find_edge (e->src, new_bb)) 3805 return false; 3806 3807 FOR_EACH_EDGE (e, ei, old_bb->preds) 3808 redirect_edge_var_map_clear (e); 3809 3810 ophi_handled = BITMAP_ALLOC (NULL); 3811 rename_virts = BITMAP_ALLOC (NULL); 3812 3813 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map 3814 for the edges we're going to move. */ 3815 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi)) 3816 { 3817 gimple ophi, nphi = gsi_stmt (ngsi); 3818 tree nresult, nop; 3819 3820 nresult = gimple_phi_result (nphi); 3821 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx); 3822 3823 /* Find the corresponding PHI in OLD_BB so we can forward-propagate 3824 the source ssa_name. */ 3825 ophi = NULL; 3826 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi)) 3827 { 3828 ophi = gsi_stmt (ogsi); 3829 if (gimple_phi_result (ophi) == nop) 3830 break; 3831 ophi = NULL; 3832 } 3833 3834 /* If we did find the corresponding PHI, copy those inputs. */ 3835 if (ophi) 3836 { 3837 /* If NOP is used somewhere else beyond phis in new_bb, give up. */ 3838 if (!has_single_use (nop)) 3839 { 3840 imm_use_iterator imm_iter; 3841 use_operand_p use_p; 3842 3843 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop) 3844 { 3845 if (!gimple_debug_bind_p (USE_STMT (use_p)) 3846 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI 3847 || gimple_bb (USE_STMT (use_p)) != new_bb)) 3848 goto fail; 3849 } 3850 } 3851 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop)); 3852 FOR_EACH_EDGE (e, ei, old_bb->preds) 3853 { 3854 location_t oloc; 3855 tree oop; 3856 3857 if ((e->flags & EDGE_EH) == 0) 3858 continue; 3859 oop = gimple_phi_arg_def (ophi, e->dest_idx); 3860 oloc = gimple_phi_arg_location (ophi, e->dest_idx); 3861 redirect_edge_var_map_add (e, nresult, oop, oloc); 3862 } 3863 } 3864 /* If we didn't find the PHI, but it's a VOP, remember to rename 3865 it later, assuming all other tests succeed. */ 3866 else if (!is_gimple_reg (nresult)) 3867 bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult)); 3868 /* If we didn't find the PHI, and it's a real variable, we know 3869 from the fact that OLD_BB is tree_empty_eh_handler_p that the 3870 variable is unchanged from input to the block and we can simply 3871 re-use the input to NEW_BB from the OLD_BB_OUT edge. */ 3872 else 3873 { 3874 location_t nloc 3875 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx); 3876 FOR_EACH_EDGE (e, ei, old_bb->preds) 3877 redirect_edge_var_map_add (e, nresult, nop, nloc); 3878 } 3879 } 3880 3881 /* Second, verify that all PHIs from OLD_BB have been handled. If not, 3882 we don't know what values from the other edges into NEW_BB to use. */ 3883 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi)) 3884 { 3885 gimple ophi = gsi_stmt (ogsi); 3886 tree oresult = gimple_phi_result (ophi); 3887 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult))) 3888 goto fail; 3889 } 3890 3891 /* At this point we know that the merge will succeed. Remove the PHI 3892 nodes for the virtuals that we want to rename. */ 3893 if (!bitmap_empty_p (rename_virts)) 3894 { 3895 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); ) 3896 { 3897 gimple nphi = gsi_stmt (ngsi); 3898 tree nresult = gimple_phi_result (nphi); 3899 if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult))) 3900 { 3901 mark_virtual_phi_result_for_renaming (nphi); 3902 remove_phi_node (&ngsi, true); 3903 } 3904 else 3905 gsi_next (&ngsi); 3906 } 3907 } 3908 3909 /* Finally, move the edges and update the PHIs. */ 3910 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); ) 3911 if (e->flags & EDGE_EH) 3912 { 3913 redirect_eh_edge_1 (e, new_bb, change_region); 3914 redirect_edge_succ (e, new_bb); 3915 flush_pending_stmts (e); 3916 } 3917 else 3918 ei_next (&ei); 3919 3920 BITMAP_FREE (ophi_handled); 3921 BITMAP_FREE (rename_virts); 3922 return true; 3923 3924 fail: 3925 FOR_EACH_EDGE (e, ei, old_bb->preds) 3926 redirect_edge_var_map_clear (e); 3927 BITMAP_FREE (ophi_handled); 3928 BITMAP_FREE (rename_virts); 3929 return false; 3930 } 3931 3932 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its 3933 old region to NEW_REGION at BB. */ 3934 3935 static void 3936 cleanup_empty_eh_move_lp (basic_block bb, edge e_out, 3937 eh_landing_pad lp, eh_region new_region) 3938 { 3939 gimple_stmt_iterator gsi; 3940 eh_landing_pad *pp; 3941 3942 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp) 3943 continue; 3944 *pp = lp->next_lp; 3945 3946 lp->region = new_region; 3947 lp->next_lp = new_region->landing_pads; 3948 new_region->landing_pads = lp; 3949 3950 /* Delete the RESX that was matched within the empty handler block. */ 3951 gsi = gsi_last_bb (bb); 3952 mark_virtual_ops_for_renaming (gsi_stmt (gsi)); 3953 gsi_remove (&gsi, true); 3954 3955 /* Clean up E_OUT for the fallthru. */ 3956 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU; 3957 e_out->probability = REG_BR_PROB_BASE; 3958 } 3959 3960 /* A subroutine of cleanup_empty_eh. Handle more complex cases of 3961 unsplitting than unsplit_eh was prepared to handle, e.g. when 3962 multiple incoming edges and phis are involved. */ 3963 3964 static bool 3965 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp) 3966 { 3967 gimple_stmt_iterator gsi; 3968 tree lab; 3969 3970 /* We really ought not have totally lost everything following 3971 a landing pad label. Given that BB is empty, there had better 3972 be a successor. */ 3973 gcc_assert (e_out != NULL); 3974 3975 /* The destination block must not already have a landing pad 3976 for a different region. */ 3977 lab = NULL; 3978 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 3979 { 3980 gimple stmt = gsi_stmt (gsi); 3981 int lp_nr; 3982 3983 if (gimple_code (stmt) != GIMPLE_LABEL) 3984 break; 3985 lab = gimple_label_label (stmt); 3986 lp_nr = EH_LANDING_PAD_NR (lab); 3987 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) 3988 return false; 3989 } 3990 3991 /* Attempt to move the PHIs into the successor block. */ 3992 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false)) 3993 { 3994 if (dump_file && (dump_flags & TDF_DETAILS)) 3995 fprintf (dump_file, 3996 "Unsplit EH landing pad %d to block %i " 3997 "(via cleanup_empty_eh).\n", 3998 lp->index, e_out->dest->index); 3999 return true; 4000 } 4001 4002 return false; 4003 } 4004 4005 /* Return true if edge E_FIRST is part of an empty infinite loop 4006 or leads to such a loop through a series of single successor 4007 empty bbs. */ 4008 4009 static bool 4010 infinite_empty_loop_p (edge e_first) 4011 { 4012 bool inf_loop = false; 4013 edge e; 4014 4015 if (e_first->dest == e_first->src) 4016 return true; 4017 4018 e_first->src->aux = (void *) 1; 4019 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest)) 4020 { 4021 gimple_stmt_iterator gsi; 4022 if (e->dest->aux) 4023 { 4024 inf_loop = true; 4025 break; 4026 } 4027 e->dest->aux = (void *) 1; 4028 gsi = gsi_after_labels (e->dest); 4029 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) 4030 gsi_next_nondebug (&gsi); 4031 if (!gsi_end_p (gsi)) 4032 break; 4033 } 4034 e_first->src->aux = NULL; 4035 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest)) 4036 e->dest->aux = NULL; 4037 4038 return inf_loop; 4039 } 4040 4041 /* Examine the block associated with LP to determine if it's an empty 4042 handler for its EH region. If so, attempt to redirect EH edges to 4043 an outer region. Return true the CFG was updated in any way. This 4044 is similar to jump forwarding, just across EH edges. */ 4045 4046 static bool 4047 cleanup_empty_eh (eh_landing_pad lp) 4048 { 4049 basic_block bb = label_to_block (lp->post_landing_pad); 4050 gimple_stmt_iterator gsi; 4051 gimple resx; 4052 eh_region new_region; 4053 edge_iterator ei; 4054 edge e, e_out; 4055 bool has_non_eh_pred; 4056 bool ret = false; 4057 int new_lp_nr; 4058 4059 /* There can be zero or one edges out of BB. This is the quickest test. */ 4060 switch (EDGE_COUNT (bb->succs)) 4061 { 4062 case 0: 4063 e_out = NULL; 4064 break; 4065 case 1: 4066 e_out = EDGE_SUCC (bb, 0); 4067 break; 4068 default: 4069 return false; 4070 } 4071 4072 resx = last_stmt (bb); 4073 if (resx && is_gimple_resx (resx)) 4074 { 4075 if (stmt_can_throw_external (resx)) 4076 optimize_clobbers (bb); 4077 else if (sink_clobbers (bb)) 4078 ret = true; 4079 } 4080 4081 gsi = gsi_after_labels (bb); 4082 4083 /* Make sure to skip debug statements. */ 4084 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) 4085 gsi_next_nondebug (&gsi); 4086 4087 /* If the block is totally empty, look for more unsplitting cases. */ 4088 if (gsi_end_p (gsi)) 4089 { 4090 /* For the degenerate case of an infinite loop bail out. */ 4091 if (infinite_empty_loop_p (e_out)) 4092 return ret; 4093 4094 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp); 4095 } 4096 4097 /* The block should consist only of a single RESX statement, modulo a 4098 preceding call to __builtin_stack_restore if there is no outgoing 4099 edge, since the call can be eliminated in this case. */ 4100 resx = gsi_stmt (gsi); 4101 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE)) 4102 { 4103 gsi_next (&gsi); 4104 resx = gsi_stmt (gsi); 4105 } 4106 if (!is_gimple_resx (resx)) 4107 return ret; 4108 gcc_assert (gsi_one_before_end_p (gsi)); 4109 4110 /* Determine if there are non-EH edges, or resx edges into the handler. */ 4111 has_non_eh_pred = false; 4112 FOR_EACH_EDGE (e, ei, bb->preds) 4113 if (!(e->flags & EDGE_EH)) 4114 has_non_eh_pred = true; 4115 4116 /* Find the handler that's outer of the empty handler by looking at 4117 where the RESX instruction was vectored. */ 4118 new_lp_nr = lookup_stmt_eh_lp (resx); 4119 new_region = get_eh_region_from_lp_number (new_lp_nr); 4120 4121 /* If there's no destination region within the current function, 4122 redirection is trivial via removing the throwing statements from 4123 the EH region, removing the EH edges, and allowing the block 4124 to go unreachable. */ 4125 if (new_region == NULL) 4126 { 4127 gcc_assert (e_out == NULL); 4128 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 4129 if (e->flags & EDGE_EH) 4130 { 4131 gimple stmt = last_stmt (e->src); 4132 remove_stmt_from_eh_lp (stmt); 4133 remove_edge (e); 4134 } 4135 else 4136 ei_next (&ei); 4137 goto succeed; 4138 } 4139 4140 /* If the destination region is a MUST_NOT_THROW, allow the runtime 4141 to handle the abort and allow the blocks to go unreachable. */ 4142 if (new_region->type == ERT_MUST_NOT_THROW) 4143 { 4144 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 4145 if (e->flags & EDGE_EH) 4146 { 4147 gimple stmt = last_stmt (e->src); 4148 remove_stmt_from_eh_lp (stmt); 4149 add_stmt_to_eh_lp (stmt, new_lp_nr); 4150 remove_edge (e); 4151 } 4152 else 4153 ei_next (&ei); 4154 goto succeed; 4155 } 4156 4157 /* Try to redirect the EH edges and merge the PHIs into the destination 4158 landing pad block. If the merge succeeds, we'll already have redirected 4159 all the EH edges. The handler itself will go unreachable if there were 4160 no normal edges. */ 4161 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true)) 4162 goto succeed; 4163 4164 /* Finally, if all input edges are EH edges, then we can (potentially) 4165 reduce the number of transfers from the runtime by moving the landing 4166 pad from the original region to the new region. This is a win when 4167 we remove the last CLEANUP region along a particular exception 4168 propagation path. Since nothing changes except for the region with 4169 which the landing pad is associated, the PHI nodes do not need to be 4170 adjusted at all. */ 4171 if (!has_non_eh_pred) 4172 { 4173 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region); 4174 if (dump_file && (dump_flags & TDF_DETAILS)) 4175 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n", 4176 lp->index, new_region->index); 4177 4178 /* ??? The CFG didn't change, but we may have rendered the 4179 old EH region unreachable. Trigger a cleanup there. */ 4180 return true; 4181 } 4182 4183 return ret; 4184 4185 succeed: 4186 if (dump_file && (dump_flags & TDF_DETAILS)) 4187 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index); 4188 remove_eh_landing_pad (lp); 4189 return true; 4190 } 4191 4192 /* Do a post-order traversal of the EH region tree. Examine each 4193 post_landing_pad block and see if we can eliminate it as empty. */ 4194 4195 static bool 4196 cleanup_all_empty_eh (void) 4197 { 4198 bool changed = false; 4199 eh_landing_pad lp; 4200 int i; 4201 4202 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i) 4203 if (lp) 4204 changed |= cleanup_empty_eh (lp); 4205 4206 return changed; 4207 } 4208 4209 /* Perform cleanups and lowering of exception handling 4210 1) cleanups regions with handlers doing nothing are optimized out 4211 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out 4212 3) Info about regions that are containing instructions, and regions 4213 reachable via local EH edges is collected 4214 4) Eh tree is pruned for regions no longer neccesary. 4215 4216 TODO: Push MUST_NOT_THROW regions to the root of the EH tree. 4217 Unify those that have the same failure decl and locus. 4218 */ 4219 4220 static unsigned int 4221 execute_cleanup_eh_1 (void) 4222 { 4223 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die 4224 looking up unreachable landing pads. */ 4225 remove_unreachable_handlers (); 4226 4227 /* Watch out for the region tree vanishing due to all unreachable. */ 4228 if (cfun->eh->region_tree && optimize) 4229 { 4230 bool changed = false; 4231 4232 changed |= unsplit_all_eh (); 4233 changed |= cleanup_all_empty_eh (); 4234 4235 if (changed) 4236 { 4237 free_dominance_info (CDI_DOMINATORS); 4238 free_dominance_info (CDI_POST_DOMINATORS); 4239 4240 /* We delayed all basic block deletion, as we may have performed 4241 cleanups on EH edges while non-EH edges were still present. */ 4242 delete_unreachable_blocks (); 4243 4244 /* We manipulated the landing pads. Remove any region that no 4245 longer has a landing pad. */ 4246 remove_unreachable_handlers_no_lp (); 4247 4248 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; 4249 } 4250 } 4251 4252 return 0; 4253 } 4254 4255 static unsigned int 4256 execute_cleanup_eh (void) 4257 { 4258 int ret = execute_cleanup_eh_1 (); 4259 4260 /* If the function no longer needs an EH personality routine 4261 clear it. This exposes cross-language inlining opportunities 4262 and avoids references to a never defined personality routine. */ 4263 if (DECL_FUNCTION_PERSONALITY (current_function_decl) 4264 && function_needs_eh_personality (cfun) != eh_personality_lang) 4265 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE; 4266 4267 return ret; 4268 } 4269 4270 static bool 4271 gate_cleanup_eh (void) 4272 { 4273 return cfun->eh != NULL && cfun->eh->region_tree != NULL; 4274 } 4275 4276 struct gimple_opt_pass pass_cleanup_eh = { 4277 { 4278 GIMPLE_PASS, 4279 "ehcleanup", /* name */ 4280 gate_cleanup_eh, /* gate */ 4281 execute_cleanup_eh, /* execute */ 4282 NULL, /* sub */ 4283 NULL, /* next */ 4284 0, /* static_pass_number */ 4285 TV_TREE_EH, /* tv_id */ 4286 PROP_gimple_lcf, /* properties_required */ 4287 0, /* properties_provided */ 4288 0, /* properties_destroyed */ 4289 0, /* todo_flags_start */ 4290 0 /* todo_flags_finish */ 4291 } 4292 }; 4293 4294 /* Verify that BB containing STMT as the last statement, has precisely the 4295 edge that make_eh_edges would create. */ 4296 4297 DEBUG_FUNCTION bool 4298 verify_eh_edges (gimple stmt) 4299 { 4300 basic_block bb = gimple_bb (stmt); 4301 eh_landing_pad lp = NULL; 4302 int lp_nr; 4303 edge_iterator ei; 4304 edge e, eh_edge; 4305 4306 lp_nr = lookup_stmt_eh_lp (stmt); 4307 if (lp_nr > 0) 4308 lp = get_eh_landing_pad_from_number (lp_nr); 4309 4310 eh_edge = NULL; 4311 FOR_EACH_EDGE (e, ei, bb->succs) 4312 { 4313 if (e->flags & EDGE_EH) 4314 { 4315 if (eh_edge) 4316 { 4317 error ("BB %i has multiple EH edges", bb->index); 4318 return true; 4319 } 4320 else 4321 eh_edge = e; 4322 } 4323 } 4324 4325 if (lp == NULL) 4326 { 4327 if (eh_edge) 4328 { 4329 error ("BB %i can not throw but has an EH edge", bb->index); 4330 return true; 4331 } 4332 return false; 4333 } 4334 4335 if (!stmt_could_throw_p (stmt)) 4336 { 4337 error ("BB %i last statement has incorrectly set lp", bb->index); 4338 return true; 4339 } 4340 4341 if (eh_edge == NULL) 4342 { 4343 error ("BB %i is missing an EH edge", bb->index); 4344 return true; 4345 } 4346 4347 if (eh_edge->dest != label_to_block (lp->post_landing_pad)) 4348 { 4349 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index); 4350 return true; 4351 } 4352 4353 return false; 4354 } 4355 4356 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */ 4357 4358 DEBUG_FUNCTION bool 4359 verify_eh_dispatch_edge (gimple stmt) 4360 { 4361 eh_region r; 4362 eh_catch c; 4363 basic_block src, dst; 4364 bool want_fallthru = true; 4365 edge_iterator ei; 4366 edge e, fall_edge; 4367 4368 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); 4369 src = gimple_bb (stmt); 4370 4371 FOR_EACH_EDGE (e, ei, src->succs) 4372 gcc_assert (e->aux == NULL); 4373 4374 switch (r->type) 4375 { 4376 case ERT_TRY: 4377 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 4378 { 4379 dst = label_to_block (c->label); 4380 e = find_edge (src, dst); 4381 if (e == NULL) 4382 { 4383 error ("BB %i is missing an edge", src->index); 4384 return true; 4385 } 4386 e->aux = (void *)e; 4387 4388 /* A catch-all handler doesn't have a fallthru. */ 4389 if (c->type_list == NULL) 4390 { 4391 want_fallthru = false; 4392 break; 4393 } 4394 } 4395 break; 4396 4397 case ERT_ALLOWED_EXCEPTIONS: 4398 dst = label_to_block (r->u.allowed.label); 4399 e = find_edge (src, dst); 4400 if (e == NULL) 4401 { 4402 error ("BB %i is missing an edge", src->index); 4403 return true; 4404 } 4405 e->aux = (void *)e; 4406 break; 4407 4408 default: 4409 gcc_unreachable (); 4410 } 4411 4412 fall_edge = NULL; 4413 FOR_EACH_EDGE (e, ei, src->succs) 4414 { 4415 if (e->flags & EDGE_FALLTHRU) 4416 { 4417 if (fall_edge != NULL) 4418 { 4419 error ("BB %i too many fallthru edges", src->index); 4420 return true; 4421 } 4422 fall_edge = e; 4423 } 4424 else if (e->aux) 4425 e->aux = NULL; 4426 else 4427 { 4428 error ("BB %i has incorrect edge", src->index); 4429 return true; 4430 } 4431 } 4432 if ((fall_edge != NULL) ^ want_fallthru) 4433 { 4434 error ("BB %i has incorrect fallthru edge", src->index); 4435 return true; 4436 } 4437 4438 return false; 4439 } 4440