1 /* Exception handling semantics and decomposition for trees. 2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 3 Free Software Foundation, Inc. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 GCC is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "tm.h" 25 #include "tree.h" 26 #include "flags.h" 27 #include "function.h" 28 #include "except.h" 29 #include "pointer-set.h" 30 #include "tree-flow.h" 31 #include "tree-dump.h" 32 #include "tree-inline.h" 33 #include "tree-iterator.h" 34 #include "tree-pass.h" 35 #include "timevar.h" 36 #include "langhooks.h" 37 #include "ggc.h" 38 #include "diagnostic-core.h" 39 #include "gimple.h" 40 #include "target.h" 41 42 /* In some instances a tree and a gimple need to be stored in a same table, 43 i.e. in hash tables. This is a structure to do this. */ 44 typedef union {tree *tp; tree t; gimple g;} treemple; 45 46 /* Nonzero if we are using EH to handle cleanups. */ 47 static int using_eh_for_cleanups_p = 0; 48 49 void 50 using_eh_for_cleanups (void) 51 { 52 using_eh_for_cleanups_p = 1; 53 } 54 55 /* Misc functions used in this file. */ 56 57 /* Remember and lookup EH landing pad data for arbitrary statements. 58 Really this means any statement that could_throw_p. We could 59 stuff this information into the stmt_ann data structure, but: 60 61 (1) We absolutely rely on this information being kept until 62 we get to rtl. Once we're done with lowering here, if we lose 63 the information there's no way to recover it! 64 65 (2) There are many more statements that *cannot* throw as 66 compared to those that can. We should be saving some amount 67 of space by only allocating memory for those that can throw. */ 68 69 /* Add statement T in function IFUN to landing pad NUM. */ 70 71 void 72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num) 73 { 74 struct throw_stmt_node *n; 75 void **slot; 76 77 gcc_assert (num != 0); 78 79 n = ggc_alloc_throw_stmt_node (); 80 n->stmt = t; 81 n->lp_nr = num; 82 83 if (!get_eh_throw_stmt_table (ifun)) 84 set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash, 85 struct_ptr_eq, 86 ggc_free)); 87 88 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT); 89 gcc_assert (!*slot); 90 *slot = n; 91 } 92 93 /* Add statement T in the current function (cfun) to EH landing pad NUM. */ 94 95 void 96 add_stmt_to_eh_lp (gimple t, int num) 97 { 98 add_stmt_to_eh_lp_fn (cfun, t, num); 99 } 100 101 /* Add statement T to the single EH landing pad in REGION. */ 102 103 static void 104 record_stmt_eh_region (eh_region region, gimple t) 105 { 106 if (region == NULL) 107 return; 108 if (region->type == ERT_MUST_NOT_THROW) 109 add_stmt_to_eh_lp_fn (cfun, t, -region->index); 110 else 111 { 112 eh_landing_pad lp = region->landing_pads; 113 if (lp == NULL) 114 lp = gen_eh_landing_pad (region); 115 else 116 gcc_assert (lp->next_lp == NULL); 117 add_stmt_to_eh_lp_fn (cfun, t, lp->index); 118 } 119 } 120 121 122 /* Remove statement T in function IFUN from its EH landing pad. */ 123 124 bool 125 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t) 126 { 127 struct throw_stmt_node dummy; 128 void **slot; 129 130 if (!get_eh_throw_stmt_table (ifun)) 131 return false; 132 133 dummy.stmt = t; 134 slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy, 135 NO_INSERT); 136 if (slot) 137 { 138 htab_clear_slot (get_eh_throw_stmt_table (ifun), slot); 139 return true; 140 } 141 else 142 return false; 143 } 144 145 146 /* Remove statement T in the current function (cfun) from its 147 EH landing pad. */ 148 149 bool 150 remove_stmt_from_eh_lp (gimple t) 151 { 152 return remove_stmt_from_eh_lp_fn (cfun, t); 153 } 154 155 /* Determine if statement T is inside an EH region in function IFUN. 156 Positive numbers indicate a landing pad index; negative numbers 157 indicate a MUST_NOT_THROW region index; zero indicates that the 158 statement is not recorded in the region table. */ 159 160 int 161 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t) 162 { 163 struct throw_stmt_node *p, n; 164 165 if (ifun->eh->throw_stmt_table == NULL) 166 return 0; 167 168 n.stmt = t; 169 p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n); 170 return p ? p->lp_nr : 0; 171 } 172 173 /* Likewise, but always use the current function. */ 174 175 int 176 lookup_stmt_eh_lp (gimple t) 177 { 178 /* We can get called from initialized data when -fnon-call-exceptions 179 is on; prevent crash. */ 180 if (!cfun) 181 return 0; 182 return lookup_stmt_eh_lp_fn (cfun, t); 183 } 184 185 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY 186 nodes and LABEL_DECL nodes. We will use this during the second phase to 187 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */ 188 189 struct finally_tree_node 190 { 191 /* When storing a GIMPLE_TRY, we have to record a gimple. However 192 when deciding whether a GOTO to a certain LABEL_DECL (which is a 193 tree) leaves the TRY block, its necessary to record a tree in 194 this field. Thus a treemple is used. */ 195 treemple child; 196 gimple parent; 197 }; 198 199 /* Note that this table is *not* marked GTY. It is short-lived. */ 200 static htab_t finally_tree; 201 202 static void 203 record_in_finally_tree (treemple child, gimple parent) 204 { 205 struct finally_tree_node *n; 206 void **slot; 207 208 n = XNEW (struct finally_tree_node); 209 n->child = child; 210 n->parent = parent; 211 212 slot = htab_find_slot (finally_tree, n, INSERT); 213 gcc_assert (!*slot); 214 *slot = n; 215 } 216 217 static void 218 collect_finally_tree (gimple stmt, gimple region); 219 220 /* Go through the gimple sequence. Works with collect_finally_tree to 221 record all GIMPLE_LABEL and GIMPLE_TRY statements. */ 222 223 static void 224 collect_finally_tree_1 (gimple_seq seq, gimple region) 225 { 226 gimple_stmt_iterator gsi; 227 228 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi)) 229 collect_finally_tree (gsi_stmt (gsi), region); 230 } 231 232 static void 233 collect_finally_tree (gimple stmt, gimple region) 234 { 235 treemple temp; 236 237 switch (gimple_code (stmt)) 238 { 239 case GIMPLE_LABEL: 240 temp.t = gimple_label_label (stmt); 241 record_in_finally_tree (temp, region); 242 break; 243 244 case GIMPLE_TRY: 245 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY) 246 { 247 temp.g = stmt; 248 record_in_finally_tree (temp, region); 249 collect_finally_tree_1 (gimple_try_eval (stmt), stmt); 250 collect_finally_tree_1 (gimple_try_cleanup (stmt), region); 251 } 252 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH) 253 { 254 collect_finally_tree_1 (gimple_try_eval (stmt), region); 255 collect_finally_tree_1 (gimple_try_cleanup (stmt), region); 256 } 257 break; 258 259 case GIMPLE_CATCH: 260 collect_finally_tree_1 (gimple_catch_handler (stmt), region); 261 break; 262 263 case GIMPLE_EH_FILTER: 264 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region); 265 break; 266 267 case GIMPLE_EH_ELSE: 268 collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region); 269 collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region); 270 break; 271 272 default: 273 /* A type, a decl, or some kind of statement that we're not 274 interested in. Don't walk them. */ 275 break; 276 } 277 } 278 279 280 /* Use the finally tree to determine if a jump from START to TARGET 281 would leave the try_finally node that START lives in. */ 282 283 static bool 284 outside_finally_tree (treemple start, gimple target) 285 { 286 struct finally_tree_node n, *p; 287 288 do 289 { 290 n.child = start; 291 p = (struct finally_tree_node *) htab_find (finally_tree, &n); 292 if (!p) 293 return true; 294 start.g = p->parent; 295 } 296 while (start.g != target); 297 298 return false; 299 } 300 301 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY 302 nodes into a set of gotos, magic labels, and eh regions. 303 The eh region creation is straight-forward, but frobbing all the gotos 304 and such into shape isn't. */ 305 306 /* The sequence into which we record all EH stuff. This will be 307 placed at the end of the function when we're all done. */ 308 static gimple_seq eh_seq; 309 310 /* Record whether an EH region contains something that can throw, 311 indexed by EH region number. */ 312 static bitmap eh_region_may_contain_throw_map; 313 314 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN 315 statements that are seen to escape this GIMPLE_TRY_FINALLY node. 316 The idea is to record a gimple statement for everything except for 317 the conditionals, which get their labels recorded. Since labels are 318 of type 'tree', we need this node to store both gimple and tree 319 objects. REPL_STMT is the sequence used to replace the goto/return 320 statement. CONT_STMT is used to store the statement that allows 321 the return/goto to jump to the original destination. */ 322 323 struct goto_queue_node 324 { 325 treemple stmt; 326 gimple_seq repl_stmt; 327 gimple cont_stmt; 328 int index; 329 /* This is used when index >= 0 to indicate that stmt is a label (as 330 opposed to a goto stmt). */ 331 int is_label; 332 }; 333 334 /* State of the world while lowering. */ 335 336 struct leh_state 337 { 338 /* What's "current" while constructing the eh region tree. These 339 correspond to variables of the same name in cfun->eh, which we 340 don't have easy access to. */ 341 eh_region cur_region; 342 343 /* What's "current" for the purposes of __builtin_eh_pointer. For 344 a CATCH, this is the associated TRY. For an EH_FILTER, this is 345 the associated ALLOWED_EXCEPTIONS, etc. */ 346 eh_region ehp_region; 347 348 /* Processing of TRY_FINALLY requires a bit more state. This is 349 split out into a separate structure so that we don't have to 350 copy so much when processing other nodes. */ 351 struct leh_tf_state *tf; 352 }; 353 354 struct leh_tf_state 355 { 356 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The 357 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain 358 this so that outside_finally_tree can reliably reference the tree used 359 in the collect_finally_tree data structures. */ 360 gimple try_finally_expr; 361 gimple top_p; 362 363 /* While lowering a top_p usually it is expanded into multiple statements, 364 thus we need the following field to store them. */ 365 gimple_seq top_p_seq; 366 367 /* The state outside this try_finally node. */ 368 struct leh_state *outer; 369 370 /* The exception region created for it. */ 371 eh_region region; 372 373 /* The goto queue. */ 374 struct goto_queue_node *goto_queue; 375 size_t goto_queue_size; 376 size_t goto_queue_active; 377 378 /* Pointer map to help in searching goto_queue when it is large. */ 379 struct pointer_map_t *goto_queue_map; 380 381 /* The set of unique labels seen as entries in the goto queue. */ 382 VEC(tree,heap) *dest_array; 383 384 /* A label to be added at the end of the completed transformed 385 sequence. It will be set if may_fallthru was true *at one time*, 386 though subsequent transformations may have cleared that flag. */ 387 tree fallthru_label; 388 389 /* True if it is possible to fall out the bottom of the try block. 390 Cleared if the fallthru is converted to a goto. */ 391 bool may_fallthru; 392 393 /* True if any entry in goto_queue is a GIMPLE_RETURN. */ 394 bool may_return; 395 396 /* True if the finally block can receive an exception edge. 397 Cleared if the exception case is handled by code duplication. */ 398 bool may_throw; 399 }; 400 401 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple); 402 403 /* Search for STMT in the goto queue. Return the replacement, 404 or null if the statement isn't in the queue. */ 405 406 #define LARGE_GOTO_QUEUE 20 407 408 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq); 409 410 static gimple_seq 411 find_goto_replacement (struct leh_tf_state *tf, treemple stmt) 412 { 413 unsigned int i; 414 void **slot; 415 416 if (tf->goto_queue_active < LARGE_GOTO_QUEUE) 417 { 418 for (i = 0; i < tf->goto_queue_active; i++) 419 if ( tf->goto_queue[i].stmt.g == stmt.g) 420 return tf->goto_queue[i].repl_stmt; 421 return NULL; 422 } 423 424 /* If we have a large number of entries in the goto_queue, create a 425 pointer map and use that for searching. */ 426 427 if (!tf->goto_queue_map) 428 { 429 tf->goto_queue_map = pointer_map_create (); 430 for (i = 0; i < tf->goto_queue_active; i++) 431 { 432 slot = pointer_map_insert (tf->goto_queue_map, 433 tf->goto_queue[i].stmt.g); 434 gcc_assert (*slot == NULL); 435 *slot = &tf->goto_queue[i]; 436 } 437 } 438 439 slot = pointer_map_contains (tf->goto_queue_map, stmt.g); 440 if (slot != NULL) 441 return (((struct goto_queue_node *) *slot)->repl_stmt); 442 443 return NULL; 444 } 445 446 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a 447 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto, 448 then we can just splat it in, otherwise we add the new stmts immediately 449 after the GIMPLE_COND and redirect. */ 450 451 static void 452 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf, 453 gimple_stmt_iterator *gsi) 454 { 455 tree label; 456 gimple_seq new_seq; 457 treemple temp; 458 location_t loc = gimple_location (gsi_stmt (*gsi)); 459 460 temp.tp = tp; 461 new_seq = find_goto_replacement (tf, temp); 462 if (!new_seq) 463 return; 464 465 if (gimple_seq_singleton_p (new_seq) 466 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO) 467 { 468 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq)); 469 return; 470 } 471 472 label = create_artificial_label (loc); 473 /* Set the new label for the GIMPLE_COND */ 474 *tp = label; 475 476 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); 477 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING); 478 } 479 480 /* The real work of replace_goto_queue. Returns with TSI updated to 481 point to the next statement. */ 482 483 static void replace_goto_queue_stmt_list (gimple_seq, struct leh_tf_state *); 484 485 static void 486 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf, 487 gimple_stmt_iterator *gsi) 488 { 489 gimple_seq seq; 490 treemple temp; 491 temp.g = NULL; 492 493 switch (gimple_code (stmt)) 494 { 495 case GIMPLE_GOTO: 496 case GIMPLE_RETURN: 497 temp.g = stmt; 498 seq = find_goto_replacement (tf, temp); 499 if (seq) 500 { 501 gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT); 502 gsi_remove (gsi, false); 503 return; 504 } 505 break; 506 507 case GIMPLE_COND: 508 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi); 509 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi); 510 break; 511 512 case GIMPLE_TRY: 513 replace_goto_queue_stmt_list (gimple_try_eval (stmt), tf); 514 replace_goto_queue_stmt_list (gimple_try_cleanup (stmt), tf); 515 break; 516 case GIMPLE_CATCH: 517 replace_goto_queue_stmt_list (gimple_catch_handler (stmt), tf); 518 break; 519 case GIMPLE_EH_FILTER: 520 replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf); 521 break; 522 case GIMPLE_EH_ELSE: 523 replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf); 524 replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf); 525 break; 526 527 default: 528 /* These won't have gotos in them. */ 529 break; 530 } 531 532 gsi_next (gsi); 533 } 534 535 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */ 536 537 static void 538 replace_goto_queue_stmt_list (gimple_seq seq, struct leh_tf_state *tf) 539 { 540 gimple_stmt_iterator gsi = gsi_start (seq); 541 542 while (!gsi_end_p (gsi)) 543 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi); 544 } 545 546 /* Replace all goto queue members. */ 547 548 static void 549 replace_goto_queue (struct leh_tf_state *tf) 550 { 551 if (tf->goto_queue_active == 0) 552 return; 553 replace_goto_queue_stmt_list (tf->top_p_seq, tf); 554 replace_goto_queue_stmt_list (eh_seq, tf); 555 } 556 557 /* Add a new record to the goto queue contained in TF. NEW_STMT is the 558 data to be added, IS_LABEL indicates whether NEW_STMT is a label or 559 a gimple return. */ 560 561 static void 562 record_in_goto_queue (struct leh_tf_state *tf, 563 treemple new_stmt, 564 int index, 565 bool is_label) 566 { 567 size_t active, size; 568 struct goto_queue_node *q; 569 570 gcc_assert (!tf->goto_queue_map); 571 572 active = tf->goto_queue_active; 573 size = tf->goto_queue_size; 574 if (active >= size) 575 { 576 size = (size ? size * 2 : 32); 577 tf->goto_queue_size = size; 578 tf->goto_queue 579 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size); 580 } 581 582 q = &tf->goto_queue[active]; 583 tf->goto_queue_active = active + 1; 584 585 memset (q, 0, sizeof (*q)); 586 q->stmt = new_stmt; 587 q->index = index; 588 q->is_label = is_label; 589 } 590 591 /* Record the LABEL label in the goto queue contained in TF. 592 TF is not null. */ 593 594 static void 595 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label) 596 { 597 int index; 598 treemple temp, new_stmt; 599 600 if (!label) 601 return; 602 603 /* Computed and non-local gotos do not get processed. Given 604 their nature we can neither tell whether we've escaped the 605 finally block nor redirect them if we knew. */ 606 if (TREE_CODE (label) != LABEL_DECL) 607 return; 608 609 /* No need to record gotos that don't leave the try block. */ 610 temp.t = label; 611 if (!outside_finally_tree (temp, tf->try_finally_expr)) 612 return; 613 614 if (! tf->dest_array) 615 { 616 tf->dest_array = VEC_alloc (tree, heap, 10); 617 VEC_quick_push (tree, tf->dest_array, label); 618 index = 0; 619 } 620 else 621 { 622 int n = VEC_length (tree, tf->dest_array); 623 for (index = 0; index < n; ++index) 624 if (VEC_index (tree, tf->dest_array, index) == label) 625 break; 626 if (index == n) 627 VEC_safe_push (tree, heap, tf->dest_array, label); 628 } 629 630 /* In the case of a GOTO we want to record the destination label, 631 since with a GIMPLE_COND we have an easy access to the then/else 632 labels. */ 633 new_stmt = stmt; 634 record_in_goto_queue (tf, new_stmt, index, true); 635 } 636 637 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally 638 node, and if so record that fact in the goto queue associated with that 639 try_finally node. */ 640 641 static void 642 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt) 643 { 644 struct leh_tf_state *tf = state->tf; 645 treemple new_stmt; 646 647 if (!tf) 648 return; 649 650 switch (gimple_code (stmt)) 651 { 652 case GIMPLE_COND: 653 new_stmt.tp = gimple_op_ptr (stmt, 2); 654 record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt)); 655 new_stmt.tp = gimple_op_ptr (stmt, 3); 656 record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt)); 657 break; 658 case GIMPLE_GOTO: 659 new_stmt.g = stmt; 660 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt)); 661 break; 662 663 case GIMPLE_RETURN: 664 tf->may_return = true; 665 new_stmt.g = stmt; 666 record_in_goto_queue (tf, new_stmt, -1, false); 667 break; 668 669 default: 670 gcc_unreachable (); 671 } 672 } 673 674 675 #ifdef ENABLE_CHECKING 676 /* We do not process GIMPLE_SWITCHes for now. As long as the original source 677 was in fact structured, and we've not yet done jump threading, then none 678 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */ 679 680 static void 681 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr) 682 { 683 struct leh_tf_state *tf = state->tf; 684 size_t i, n; 685 686 if (!tf) 687 return; 688 689 n = gimple_switch_num_labels (switch_expr); 690 691 for (i = 0; i < n; ++i) 692 { 693 treemple temp; 694 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i)); 695 temp.t = lab; 696 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr)); 697 } 698 } 699 #else 700 #define verify_norecord_switch_expr(state, switch_expr) 701 #endif 702 703 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is 704 non-null, insert it before the new branch. */ 705 706 static void 707 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod) 708 { 709 gimple x; 710 711 /* In the case of a return, the queue node must be a gimple statement. */ 712 gcc_assert (!q->is_label); 713 714 /* Note that the return value may have already been computed, e.g., 715 716 int x; 717 int foo (void) 718 { 719 x = 0; 720 try { 721 return x; 722 } finally { 723 x++; 724 } 725 } 726 727 should return 0, not 1. We don't have to do anything to make 728 this happens because the return value has been placed in the 729 RESULT_DECL already. */ 730 731 q->cont_stmt = q->stmt.g; 732 733 if (!q->repl_stmt) 734 q->repl_stmt = gimple_seq_alloc (); 735 736 if (mod) 737 gimple_seq_add_seq (&q->repl_stmt, mod); 738 739 x = gimple_build_goto (finlab); 740 gimple_seq_add_stmt (&q->repl_stmt, x); 741 } 742 743 /* Similar, but easier, for GIMPLE_GOTO. */ 744 745 static void 746 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod, 747 struct leh_tf_state *tf) 748 { 749 gimple x; 750 751 gcc_assert (q->is_label); 752 if (!q->repl_stmt) 753 q->repl_stmt = gimple_seq_alloc (); 754 755 q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index)); 756 757 if (mod) 758 gimple_seq_add_seq (&q->repl_stmt, mod); 759 760 x = gimple_build_goto (finlab); 761 gimple_seq_add_stmt (&q->repl_stmt, x); 762 } 763 764 /* Emit a standard landing pad sequence into SEQ for REGION. */ 765 766 static void 767 emit_post_landing_pad (gimple_seq *seq, eh_region region) 768 { 769 eh_landing_pad lp = region->landing_pads; 770 gimple x; 771 772 if (lp == NULL) 773 lp = gen_eh_landing_pad (region); 774 775 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION); 776 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index; 777 778 x = gimple_build_label (lp->post_landing_pad); 779 gimple_seq_add_stmt (seq, x); 780 } 781 782 /* Emit a RESX statement into SEQ for REGION. */ 783 784 static void 785 emit_resx (gimple_seq *seq, eh_region region) 786 { 787 gimple x = gimple_build_resx (region->index); 788 gimple_seq_add_stmt (seq, x); 789 if (region->outer) 790 record_stmt_eh_region (region->outer, x); 791 } 792 793 /* Emit an EH_DISPATCH statement into SEQ for REGION. */ 794 795 static void 796 emit_eh_dispatch (gimple_seq *seq, eh_region region) 797 { 798 gimple x = gimple_build_eh_dispatch (region->index); 799 gimple_seq_add_stmt (seq, x); 800 } 801 802 /* Note that the current EH region may contain a throw, or a 803 call to a function which itself may contain a throw. */ 804 805 static void 806 note_eh_region_may_contain_throw (eh_region region) 807 { 808 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index)) 809 { 810 if (region->type == ERT_MUST_NOT_THROW) 811 break; 812 region = region->outer; 813 if (region == NULL) 814 break; 815 } 816 } 817 818 /* Check if REGION has been marked as containing a throw. If REGION is 819 NULL, this predicate is false. */ 820 821 static inline bool 822 eh_region_may_contain_throw (eh_region r) 823 { 824 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index); 825 } 826 827 /* We want to transform 828 try { body; } catch { stuff; } 829 to 830 normal_seqence: 831 body; 832 over: 833 eh_seqence: 834 landing_pad: 835 stuff; 836 goto over; 837 838 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad 839 should be placed before the second operand, or NULL. OVER is 840 an existing label that should be put at the exit, or NULL. */ 841 842 static gimple_seq 843 frob_into_branch_around (gimple tp, eh_region region, tree over) 844 { 845 gimple x; 846 gimple_seq cleanup, result; 847 location_t loc = gimple_location (tp); 848 849 cleanup = gimple_try_cleanup (tp); 850 result = gimple_try_eval (tp); 851 852 if (region) 853 emit_post_landing_pad (&eh_seq, region); 854 855 if (gimple_seq_may_fallthru (cleanup)) 856 { 857 if (!over) 858 over = create_artificial_label (loc); 859 x = gimple_build_goto (over); 860 gimple_seq_add_stmt (&cleanup, x); 861 } 862 gimple_seq_add_seq (&eh_seq, cleanup); 863 864 if (over) 865 { 866 x = gimple_build_label (over); 867 gimple_seq_add_stmt (&result, x); 868 } 869 return result; 870 } 871 872 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T. 873 Make sure to record all new labels found. */ 874 875 static gimple_seq 876 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state) 877 { 878 gimple region = NULL; 879 gimple_seq new_seq; 880 881 new_seq = copy_gimple_seq_and_replace_locals (seq); 882 883 if (outer_state->tf) 884 region = outer_state->tf->try_finally_expr; 885 collect_finally_tree_1 (new_seq, region); 886 887 return new_seq; 888 } 889 890 /* A subroutine of lower_try_finally. Create a fallthru label for 891 the given try_finally state. The only tricky bit here is that 892 we have to make sure to record the label in our outer context. */ 893 894 static tree 895 lower_try_finally_fallthru_label (struct leh_tf_state *tf) 896 { 897 tree label = tf->fallthru_label; 898 treemple temp; 899 900 if (!label) 901 { 902 label = create_artificial_label (gimple_location (tf->try_finally_expr)); 903 tf->fallthru_label = label; 904 if (tf->outer->tf) 905 { 906 temp.t = label; 907 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr); 908 } 909 } 910 return label; 911 } 912 913 /* A subroutine of lower_try_finally. If FINALLY consits of a 914 GIMPLE_EH_ELSE node, return it. */ 915 916 static inline gimple 917 get_eh_else (gimple_seq finally) 918 { 919 gimple x = gimple_seq_first_stmt (finally); 920 if (gimple_code (x) == GIMPLE_EH_ELSE) 921 { 922 gcc_assert (gimple_seq_singleton_p (finally)); 923 return x; 924 } 925 return NULL; 926 } 927 928 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions 929 langhook returns non-null, then the language requires that the exception 930 path out of a try_finally be treated specially. To wit: the code within 931 the finally block may not itself throw an exception. We have two choices 932 here. First we can duplicate the finally block and wrap it in a 933 must_not_throw region. Second, we can generate code like 934 935 try { 936 finally_block; 937 } catch { 938 if (fintmp == eh_edge) 939 protect_cleanup_actions; 940 } 941 942 where "fintmp" is the temporary used in the switch statement generation 943 alternative considered below. For the nonce, we always choose the first 944 option. 945 946 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */ 947 948 static void 949 honor_protect_cleanup_actions (struct leh_state *outer_state, 950 struct leh_state *this_state, 951 struct leh_tf_state *tf) 952 { 953 tree protect_cleanup_actions; 954 gimple_stmt_iterator gsi; 955 bool finally_may_fallthru; 956 gimple_seq finally; 957 gimple x, eh_else; 958 959 /* First check for nothing to do. */ 960 if (lang_hooks.eh_protect_cleanup_actions == NULL) 961 return; 962 protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions (); 963 if (protect_cleanup_actions == NULL) 964 return; 965 966 finally = gimple_try_cleanup (tf->top_p); 967 eh_else = get_eh_else (finally); 968 969 /* Duplicate the FINALLY block. Only need to do this for try-finally, 970 and not for cleanups. If we've got an EH_ELSE, extract it now. */ 971 if (eh_else) 972 { 973 finally = gimple_eh_else_e_body (eh_else); 974 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else)); 975 } 976 else if (this_state) 977 finally = lower_try_finally_dup_block (finally, outer_state); 978 finally_may_fallthru = gimple_seq_may_fallthru (finally); 979 980 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP 981 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought 982 to be in an enclosing scope, but needs to be implemented at this level 983 to avoid a nesting violation (see wrap_temporary_cleanups in 984 cp/decl.c). Since it's logically at an outer level, we should call 985 terminate before we get to it, so strip it away before adding the 986 MUST_NOT_THROW filter. */ 987 gsi = gsi_start (finally); 988 x = gsi_stmt (gsi); 989 if (gimple_code (x) == GIMPLE_TRY 990 && gimple_try_kind (x) == GIMPLE_TRY_CATCH 991 && gimple_try_catch_is_cleanup (x)) 992 { 993 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT); 994 gsi_remove (&gsi, false); 995 } 996 997 /* Wrap the block with protect_cleanup_actions as the action. */ 998 x = gimple_build_eh_must_not_throw (protect_cleanup_actions); 999 x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x), 1000 GIMPLE_TRY_CATCH); 1001 finally = lower_eh_must_not_throw (outer_state, x); 1002 1003 /* Drop all of this into the exception sequence. */ 1004 emit_post_landing_pad (&eh_seq, tf->region); 1005 gimple_seq_add_seq (&eh_seq, finally); 1006 if (finally_may_fallthru) 1007 emit_resx (&eh_seq, tf->region); 1008 1009 /* Having now been handled, EH isn't to be considered with 1010 the rest of the outgoing edges. */ 1011 tf->may_throw = false; 1012 } 1013 1014 /* A subroutine of lower_try_finally. We have determined that there is 1015 no fallthru edge out of the finally block. This means that there is 1016 no outgoing edge corresponding to any incoming edge. Restructure the 1017 try_finally node for this special case. */ 1018 1019 static void 1020 lower_try_finally_nofallthru (struct leh_state *state, 1021 struct leh_tf_state *tf) 1022 { 1023 tree lab; 1024 gimple x, eh_else; 1025 gimple_seq finally; 1026 struct goto_queue_node *q, *qe; 1027 1028 lab = create_artificial_label (gimple_location (tf->try_finally_expr)); 1029 1030 /* We expect that tf->top_p is a GIMPLE_TRY. */ 1031 finally = gimple_try_cleanup (tf->top_p); 1032 tf->top_p_seq = gimple_try_eval (tf->top_p); 1033 1034 x = gimple_build_label (lab); 1035 gimple_seq_add_stmt (&tf->top_p_seq, x); 1036 1037 q = tf->goto_queue; 1038 qe = q + tf->goto_queue_active; 1039 for (; q < qe; ++q) 1040 if (q->index < 0) 1041 do_return_redirection (q, lab, NULL); 1042 else 1043 do_goto_redirection (q, lab, NULL, tf); 1044 1045 replace_goto_queue (tf); 1046 1047 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */ 1048 eh_else = get_eh_else (finally); 1049 if (eh_else) 1050 { 1051 finally = gimple_eh_else_n_body (eh_else); 1052 lower_eh_constructs_1 (state, finally); 1053 gimple_seq_add_seq (&tf->top_p_seq, finally); 1054 1055 if (tf->may_throw) 1056 { 1057 finally = gimple_eh_else_e_body (eh_else); 1058 lower_eh_constructs_1 (state, finally); 1059 1060 emit_post_landing_pad (&eh_seq, tf->region); 1061 gimple_seq_add_seq (&eh_seq, finally); 1062 } 1063 } 1064 else 1065 { 1066 lower_eh_constructs_1 (state, finally); 1067 gimple_seq_add_seq (&tf->top_p_seq, finally); 1068 1069 if (tf->may_throw) 1070 { 1071 emit_post_landing_pad (&eh_seq, tf->region); 1072 1073 x = gimple_build_goto (lab); 1074 gimple_seq_add_stmt (&eh_seq, x); 1075 } 1076 } 1077 } 1078 1079 /* A subroutine of lower_try_finally. We have determined that there is 1080 exactly one destination of the finally block. Restructure the 1081 try_finally node for this special case. */ 1082 1083 static void 1084 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf) 1085 { 1086 struct goto_queue_node *q, *qe; 1087 gimple x; 1088 gimple_seq finally; 1089 tree finally_label; 1090 location_t loc = gimple_location (tf->try_finally_expr); 1091 1092 finally = gimple_try_cleanup (tf->top_p); 1093 tf->top_p_seq = gimple_try_eval (tf->top_p); 1094 1095 /* Since there's only one destination, and the destination edge can only 1096 either be EH or non-EH, that implies that all of our incoming edges 1097 are of the same type. Therefore we can lower EH_ELSE immediately. */ 1098 x = get_eh_else (finally); 1099 if (x) 1100 { 1101 if (tf->may_throw) 1102 finally = gimple_eh_else_e_body (x); 1103 else 1104 finally = gimple_eh_else_n_body (x); 1105 } 1106 1107 lower_eh_constructs_1 (state, finally); 1108 1109 if (tf->may_throw) 1110 { 1111 /* Only reachable via the exception edge. Add the given label to 1112 the head of the FINALLY block. Append a RESX at the end. */ 1113 emit_post_landing_pad (&eh_seq, tf->region); 1114 gimple_seq_add_seq (&eh_seq, finally); 1115 emit_resx (&eh_seq, tf->region); 1116 return; 1117 } 1118 1119 if (tf->may_fallthru) 1120 { 1121 /* Only reachable via the fallthru edge. Do nothing but let 1122 the two blocks run together; we'll fall out the bottom. */ 1123 gimple_seq_add_seq (&tf->top_p_seq, finally); 1124 return; 1125 } 1126 1127 finally_label = create_artificial_label (loc); 1128 x = gimple_build_label (finally_label); 1129 gimple_seq_add_stmt (&tf->top_p_seq, x); 1130 1131 gimple_seq_add_seq (&tf->top_p_seq, finally); 1132 1133 q = tf->goto_queue; 1134 qe = q + tf->goto_queue_active; 1135 1136 if (tf->may_return) 1137 { 1138 /* Reachable by return expressions only. Redirect them. */ 1139 for (; q < qe; ++q) 1140 do_return_redirection (q, finally_label, NULL); 1141 replace_goto_queue (tf); 1142 } 1143 else 1144 { 1145 /* Reachable by goto expressions only. Redirect them. */ 1146 for (; q < qe; ++q) 1147 do_goto_redirection (q, finally_label, NULL, tf); 1148 replace_goto_queue (tf); 1149 1150 if (VEC_index (tree, tf->dest_array, 0) == tf->fallthru_label) 1151 { 1152 /* Reachable by goto to fallthru label only. Redirect it 1153 to the new label (already created, sadly), and do not 1154 emit the final branch out, or the fallthru label. */ 1155 tf->fallthru_label = NULL; 1156 return; 1157 } 1158 } 1159 1160 /* Place the original return/goto to the original destination 1161 immediately after the finally block. */ 1162 x = tf->goto_queue[0].cont_stmt; 1163 gimple_seq_add_stmt (&tf->top_p_seq, x); 1164 maybe_record_in_goto_queue (state, x); 1165 } 1166 1167 /* A subroutine of lower_try_finally. There are multiple edges incoming 1168 and outgoing from the finally block. Implement this by duplicating the 1169 finally block for every destination. */ 1170 1171 static void 1172 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf) 1173 { 1174 gimple_seq finally; 1175 gimple_seq new_stmt; 1176 gimple_seq seq; 1177 gimple x, eh_else; 1178 tree tmp; 1179 location_t tf_loc = gimple_location (tf->try_finally_expr); 1180 1181 finally = gimple_try_cleanup (tf->top_p); 1182 1183 /* Notice EH_ELSE, and simplify some of the remaining code 1184 by considering FINALLY to be the normal return path only. */ 1185 eh_else = get_eh_else (finally); 1186 if (eh_else) 1187 finally = gimple_eh_else_n_body (eh_else); 1188 1189 tf->top_p_seq = gimple_try_eval (tf->top_p); 1190 new_stmt = NULL; 1191 1192 if (tf->may_fallthru) 1193 { 1194 seq = lower_try_finally_dup_block (finally, state); 1195 lower_eh_constructs_1 (state, seq); 1196 gimple_seq_add_seq (&new_stmt, seq); 1197 1198 tmp = lower_try_finally_fallthru_label (tf); 1199 x = gimple_build_goto (tmp); 1200 gimple_seq_add_stmt (&new_stmt, x); 1201 } 1202 1203 if (tf->may_throw) 1204 { 1205 /* We don't need to copy the EH path of EH_ELSE, 1206 since it is only emitted once. */ 1207 if (eh_else) 1208 seq = gimple_eh_else_e_body (eh_else); 1209 else 1210 seq = lower_try_finally_dup_block (finally, state); 1211 lower_eh_constructs_1 (state, seq); 1212 1213 emit_post_landing_pad (&eh_seq, tf->region); 1214 gimple_seq_add_seq (&eh_seq, seq); 1215 emit_resx (&eh_seq, tf->region); 1216 } 1217 1218 if (tf->goto_queue) 1219 { 1220 struct goto_queue_node *q, *qe; 1221 int return_index, index; 1222 struct labels_s 1223 { 1224 struct goto_queue_node *q; 1225 tree label; 1226 } *labels; 1227 1228 return_index = VEC_length (tree, tf->dest_array); 1229 labels = XCNEWVEC (struct labels_s, return_index + 1); 1230 1231 q = tf->goto_queue; 1232 qe = q + tf->goto_queue_active; 1233 for (; q < qe; q++) 1234 { 1235 index = q->index < 0 ? return_index : q->index; 1236 1237 if (!labels[index].q) 1238 labels[index].q = q; 1239 } 1240 1241 for (index = 0; index < return_index + 1; index++) 1242 { 1243 tree lab; 1244 1245 q = labels[index].q; 1246 if (! q) 1247 continue; 1248 1249 lab = labels[index].label 1250 = create_artificial_label (tf_loc); 1251 1252 if (index == return_index) 1253 do_return_redirection (q, lab, NULL); 1254 else 1255 do_goto_redirection (q, lab, NULL, tf); 1256 1257 x = gimple_build_label (lab); 1258 gimple_seq_add_stmt (&new_stmt, x); 1259 1260 seq = lower_try_finally_dup_block (finally, state); 1261 lower_eh_constructs_1 (state, seq); 1262 gimple_seq_add_seq (&new_stmt, seq); 1263 1264 gimple_seq_add_stmt (&new_stmt, q->cont_stmt); 1265 maybe_record_in_goto_queue (state, q->cont_stmt); 1266 } 1267 1268 for (q = tf->goto_queue; q < qe; q++) 1269 { 1270 tree lab; 1271 1272 index = q->index < 0 ? return_index : q->index; 1273 1274 if (labels[index].q == q) 1275 continue; 1276 1277 lab = labels[index].label; 1278 1279 if (index == return_index) 1280 do_return_redirection (q, lab, NULL); 1281 else 1282 do_goto_redirection (q, lab, NULL, tf); 1283 } 1284 1285 replace_goto_queue (tf); 1286 free (labels); 1287 } 1288 1289 /* Need to link new stmts after running replace_goto_queue due 1290 to not wanting to process the same goto stmts twice. */ 1291 gimple_seq_add_seq (&tf->top_p_seq, new_stmt); 1292 } 1293 1294 /* A subroutine of lower_try_finally. There are multiple edges incoming 1295 and outgoing from the finally block. Implement this by instrumenting 1296 each incoming edge and creating a switch statement at the end of the 1297 finally block that branches to the appropriate destination. */ 1298 1299 static void 1300 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf) 1301 { 1302 struct goto_queue_node *q, *qe; 1303 tree finally_tmp, finally_label; 1304 int return_index, eh_index, fallthru_index; 1305 int nlabels, ndests, j, last_case_index; 1306 tree last_case; 1307 VEC (tree,heap) *case_label_vec; 1308 gimple_seq switch_body; 1309 gimple x, eh_else; 1310 tree tmp; 1311 gimple switch_stmt; 1312 gimple_seq finally; 1313 struct pointer_map_t *cont_map = NULL; 1314 /* The location of the TRY_FINALLY stmt. */ 1315 location_t tf_loc = gimple_location (tf->try_finally_expr); 1316 /* The location of the finally block. */ 1317 location_t finally_loc; 1318 1319 switch_body = gimple_seq_alloc (); 1320 finally = gimple_try_cleanup (tf->top_p); 1321 eh_else = get_eh_else (finally); 1322 1323 /* Mash the TRY block to the head of the chain. */ 1324 tf->top_p_seq = gimple_try_eval (tf->top_p); 1325 1326 /* The location of the finally is either the last stmt in the finally 1327 block or the location of the TRY_FINALLY itself. */ 1328 x = gimple_seq_last_stmt (finally); 1329 finally_loc = x ? gimple_location (x) : tf_loc; 1330 1331 /* Lower the finally block itself. */ 1332 lower_eh_constructs_1 (state, finally); 1333 1334 /* Prepare for switch statement generation. */ 1335 nlabels = VEC_length (tree, tf->dest_array); 1336 return_index = nlabels; 1337 eh_index = return_index + tf->may_return; 1338 fallthru_index = eh_index + (tf->may_throw && !eh_else); 1339 ndests = fallthru_index + tf->may_fallthru; 1340 1341 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp"); 1342 finally_label = create_artificial_label (finally_loc); 1343 1344 /* We use VEC_quick_push on case_label_vec throughout this function, 1345 since we know the size in advance and allocate precisely as muce 1346 space as needed. */ 1347 case_label_vec = VEC_alloc (tree, heap, ndests); 1348 last_case = NULL; 1349 last_case_index = 0; 1350 1351 /* Begin inserting code for getting to the finally block. Things 1352 are done in this order to correspond to the sequence the code is 1353 layed out. */ 1354 1355 if (tf->may_fallthru) 1356 { 1357 x = gimple_build_assign (finally_tmp, 1358 build_int_cst (integer_type_node, 1359 fallthru_index)); 1360 gimple_seq_add_stmt (&tf->top_p_seq, x); 1361 1362 tmp = build_int_cst (integer_type_node, fallthru_index); 1363 last_case = build_case_label (tmp, NULL, 1364 create_artificial_label (tf_loc)); 1365 VEC_quick_push (tree, case_label_vec, last_case); 1366 last_case_index++; 1367 1368 x = gimple_build_label (CASE_LABEL (last_case)); 1369 gimple_seq_add_stmt (&switch_body, x); 1370 1371 tmp = lower_try_finally_fallthru_label (tf); 1372 x = gimple_build_goto (tmp); 1373 gimple_seq_add_stmt (&switch_body, x); 1374 } 1375 1376 /* For EH_ELSE, emit the exception path (plus resx) now, then 1377 subsequently we only need consider the normal path. */ 1378 if (eh_else) 1379 { 1380 if (tf->may_throw) 1381 { 1382 finally = gimple_eh_else_e_body (eh_else); 1383 lower_eh_constructs_1 (state, finally); 1384 1385 emit_post_landing_pad (&eh_seq, tf->region); 1386 gimple_seq_add_seq (&eh_seq, finally); 1387 emit_resx (&eh_seq, tf->region); 1388 } 1389 1390 finally = gimple_eh_else_n_body (eh_else); 1391 } 1392 else if (tf->may_throw) 1393 { 1394 emit_post_landing_pad (&eh_seq, tf->region); 1395 1396 x = gimple_build_assign (finally_tmp, 1397 build_int_cst (integer_type_node, eh_index)); 1398 gimple_seq_add_stmt (&eh_seq, x); 1399 1400 x = gimple_build_goto (finally_label); 1401 gimple_seq_add_stmt (&eh_seq, x); 1402 1403 tmp = build_int_cst (integer_type_node, eh_index); 1404 last_case = build_case_label (tmp, NULL, 1405 create_artificial_label (tf_loc)); 1406 VEC_quick_push (tree, case_label_vec, last_case); 1407 last_case_index++; 1408 1409 x = gimple_build_label (CASE_LABEL (last_case)); 1410 gimple_seq_add_stmt (&eh_seq, x); 1411 emit_resx (&eh_seq, tf->region); 1412 } 1413 1414 x = gimple_build_label (finally_label); 1415 gimple_seq_add_stmt (&tf->top_p_seq, x); 1416 1417 gimple_seq_add_seq (&tf->top_p_seq, finally); 1418 1419 /* Redirect each incoming goto edge. */ 1420 q = tf->goto_queue; 1421 qe = q + tf->goto_queue_active; 1422 j = last_case_index + tf->may_return; 1423 /* Prepare the assignments to finally_tmp that are executed upon the 1424 entrance through a particular edge. */ 1425 for (; q < qe; ++q) 1426 { 1427 gimple_seq mod; 1428 int switch_id; 1429 unsigned int case_index; 1430 1431 mod = gimple_seq_alloc (); 1432 1433 if (q->index < 0) 1434 { 1435 x = gimple_build_assign (finally_tmp, 1436 build_int_cst (integer_type_node, 1437 return_index)); 1438 gimple_seq_add_stmt (&mod, x); 1439 do_return_redirection (q, finally_label, mod); 1440 switch_id = return_index; 1441 } 1442 else 1443 { 1444 x = gimple_build_assign (finally_tmp, 1445 build_int_cst (integer_type_node, q->index)); 1446 gimple_seq_add_stmt (&mod, x); 1447 do_goto_redirection (q, finally_label, mod, tf); 1448 switch_id = q->index; 1449 } 1450 1451 case_index = j + q->index; 1452 if (VEC_length (tree, case_label_vec) <= case_index 1453 || !VEC_index (tree, case_label_vec, case_index)) 1454 { 1455 tree case_lab; 1456 void **slot; 1457 tmp = build_int_cst (integer_type_node, switch_id); 1458 case_lab = build_case_label (tmp, NULL, 1459 create_artificial_label (tf_loc)); 1460 /* We store the cont_stmt in the pointer map, so that we can recover 1461 it in the loop below. */ 1462 if (!cont_map) 1463 cont_map = pointer_map_create (); 1464 slot = pointer_map_insert (cont_map, case_lab); 1465 *slot = q->cont_stmt; 1466 VEC_quick_push (tree, case_label_vec, case_lab); 1467 } 1468 } 1469 for (j = last_case_index; j < last_case_index + nlabels; j++) 1470 { 1471 gimple cont_stmt; 1472 void **slot; 1473 1474 last_case = VEC_index (tree, case_label_vec, j); 1475 1476 gcc_assert (last_case); 1477 gcc_assert (cont_map); 1478 1479 slot = pointer_map_contains (cont_map, last_case); 1480 gcc_assert (slot); 1481 cont_stmt = *(gimple *) slot; 1482 1483 x = gimple_build_label (CASE_LABEL (last_case)); 1484 gimple_seq_add_stmt (&switch_body, x); 1485 gimple_seq_add_stmt (&switch_body, cont_stmt); 1486 maybe_record_in_goto_queue (state, cont_stmt); 1487 } 1488 if (cont_map) 1489 pointer_map_destroy (cont_map); 1490 1491 replace_goto_queue (tf); 1492 1493 /* Make sure that the last case is the default label, as one is required. 1494 Then sort the labels, which is also required in GIMPLE. */ 1495 CASE_LOW (last_case) = NULL; 1496 sort_case_labels (case_label_vec); 1497 1498 /* Build the switch statement, setting last_case to be the default 1499 label. */ 1500 switch_stmt = gimple_build_switch_vec (finally_tmp, last_case, 1501 case_label_vec); 1502 gimple_set_location (switch_stmt, finally_loc); 1503 1504 /* Need to link SWITCH_STMT after running replace_goto_queue 1505 due to not wanting to process the same goto stmts twice. */ 1506 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt); 1507 gimple_seq_add_seq (&tf->top_p_seq, switch_body); 1508 } 1509 1510 /* Decide whether or not we are going to duplicate the finally block. 1511 There are several considerations. 1512 1513 First, if this is Java, then the finally block contains code 1514 written by the user. It has line numbers associated with it, 1515 so duplicating the block means it's difficult to set a breakpoint. 1516 Since controlling code generation via -g is verboten, we simply 1517 never duplicate code without optimization. 1518 1519 Second, we'd like to prevent egregious code growth. One way to 1520 do this is to estimate the size of the finally block, multiply 1521 that by the number of copies we'd need to make, and compare against 1522 the estimate of the size of the switch machinery we'd have to add. */ 1523 1524 static bool 1525 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally) 1526 { 1527 int f_estimate, sw_estimate; 1528 gimple eh_else; 1529 1530 /* If there's an EH_ELSE involved, the exception path is separate 1531 and really doesn't come into play for this computation. */ 1532 eh_else = get_eh_else (finally); 1533 if (eh_else) 1534 { 1535 ndests -= may_throw; 1536 finally = gimple_eh_else_n_body (eh_else); 1537 } 1538 1539 if (!optimize) 1540 { 1541 gimple_stmt_iterator gsi; 1542 1543 if (ndests == 1) 1544 return true; 1545 1546 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi)) 1547 { 1548 gimple stmt = gsi_stmt (gsi); 1549 if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt)) 1550 return false; 1551 } 1552 return true; 1553 } 1554 1555 /* Finally estimate N times, plus N gotos. */ 1556 f_estimate = count_insns_seq (finally, &eni_size_weights); 1557 f_estimate = (f_estimate + 1) * ndests; 1558 1559 /* Switch statement (cost 10), N variable assignments, N gotos. */ 1560 sw_estimate = 10 + 2 * ndests; 1561 1562 /* Optimize for size clearly wants our best guess. */ 1563 if (optimize_function_for_size_p (cfun)) 1564 return f_estimate < sw_estimate; 1565 1566 /* ??? These numbers are completely made up so far. */ 1567 if (optimize > 1) 1568 return f_estimate < 100 || f_estimate < sw_estimate * 2; 1569 else 1570 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3; 1571 } 1572 1573 /* REG is the enclosing region for a possible cleanup region, or the region 1574 itself. Returns TRUE if such a region would be unreachable. 1575 1576 Cleanup regions within a must-not-throw region aren't actually reachable 1577 even if there are throwing stmts within them, because the personality 1578 routine will call terminate before unwinding. */ 1579 1580 static bool 1581 cleanup_is_dead_in (eh_region reg) 1582 { 1583 while (reg && reg->type == ERT_CLEANUP) 1584 reg = reg->outer; 1585 return (reg && reg->type == ERT_MUST_NOT_THROW); 1586 } 1587 1588 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes 1589 to a sequence of labels and blocks, plus the exception region trees 1590 that record all the magic. This is complicated by the need to 1591 arrange for the FINALLY block to be executed on all exits. */ 1592 1593 static gimple_seq 1594 lower_try_finally (struct leh_state *state, gimple tp) 1595 { 1596 struct leh_tf_state this_tf; 1597 struct leh_state this_state; 1598 int ndests; 1599 gimple_seq old_eh_seq; 1600 1601 /* Process the try block. */ 1602 1603 memset (&this_tf, 0, sizeof (this_tf)); 1604 this_tf.try_finally_expr = tp; 1605 this_tf.top_p = tp; 1606 this_tf.outer = state; 1607 if (using_eh_for_cleanups_p && !cleanup_is_dead_in (state->cur_region)) 1608 { 1609 this_tf.region = gen_eh_region_cleanup (state->cur_region); 1610 this_state.cur_region = this_tf.region; 1611 } 1612 else 1613 { 1614 this_tf.region = NULL; 1615 this_state.cur_region = state->cur_region; 1616 } 1617 1618 this_state.ehp_region = state->ehp_region; 1619 this_state.tf = &this_tf; 1620 1621 old_eh_seq = eh_seq; 1622 eh_seq = NULL; 1623 1624 lower_eh_constructs_1 (&this_state, gimple_try_eval(tp)); 1625 1626 /* Determine if the try block is escaped through the bottom. */ 1627 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp)); 1628 1629 /* Determine if any exceptions are possible within the try block. */ 1630 if (this_tf.region) 1631 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region); 1632 if (this_tf.may_throw) 1633 honor_protect_cleanup_actions (state, &this_state, &this_tf); 1634 1635 /* Determine how many edges (still) reach the finally block. Or rather, 1636 how many destinations are reached by the finally block. Use this to 1637 determine how we process the finally block itself. */ 1638 1639 ndests = VEC_length (tree, this_tf.dest_array); 1640 ndests += this_tf.may_fallthru; 1641 ndests += this_tf.may_return; 1642 ndests += this_tf.may_throw; 1643 1644 /* If the FINALLY block is not reachable, dike it out. */ 1645 if (ndests == 0) 1646 { 1647 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp)); 1648 gimple_try_set_cleanup (tp, NULL); 1649 } 1650 /* If the finally block doesn't fall through, then any destination 1651 we might try to impose there isn't reached either. There may be 1652 some minor amount of cleanup and redirection still needed. */ 1653 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp))) 1654 lower_try_finally_nofallthru (state, &this_tf); 1655 1656 /* We can easily special-case redirection to a single destination. */ 1657 else if (ndests == 1) 1658 lower_try_finally_onedest (state, &this_tf); 1659 else if (decide_copy_try_finally (ndests, this_tf.may_throw, 1660 gimple_try_cleanup (tp))) 1661 lower_try_finally_copy (state, &this_tf); 1662 else 1663 lower_try_finally_switch (state, &this_tf); 1664 1665 /* If someone requested we add a label at the end of the transformed 1666 block, do so. */ 1667 if (this_tf.fallthru_label) 1668 { 1669 /* This must be reached only if ndests == 0. */ 1670 gimple x = gimple_build_label (this_tf.fallthru_label); 1671 gimple_seq_add_stmt (&this_tf.top_p_seq, x); 1672 } 1673 1674 VEC_free (tree, heap, this_tf.dest_array); 1675 free (this_tf.goto_queue); 1676 if (this_tf.goto_queue_map) 1677 pointer_map_destroy (this_tf.goto_queue_map); 1678 1679 /* If there was an old (aka outer) eh_seq, append the current eh_seq. 1680 If there was no old eh_seq, then the append is trivially already done. */ 1681 if (old_eh_seq) 1682 { 1683 if (eh_seq == NULL) 1684 eh_seq = old_eh_seq; 1685 else 1686 { 1687 gimple_seq new_eh_seq = eh_seq; 1688 eh_seq = old_eh_seq; 1689 gimple_seq_add_seq(&eh_seq, new_eh_seq); 1690 } 1691 } 1692 1693 return this_tf.top_p_seq; 1694 } 1695 1696 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a 1697 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the 1698 exception region trees that records all the magic. */ 1699 1700 static gimple_seq 1701 lower_catch (struct leh_state *state, gimple tp) 1702 { 1703 eh_region try_region = NULL; 1704 struct leh_state this_state = *state; 1705 gimple_stmt_iterator gsi; 1706 tree out_label; 1707 gimple_seq new_seq; 1708 gimple x; 1709 location_t try_catch_loc = gimple_location (tp); 1710 1711 if (flag_exceptions) 1712 { 1713 try_region = gen_eh_region_try (state->cur_region); 1714 this_state.cur_region = try_region; 1715 } 1716 1717 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1718 1719 if (!eh_region_may_contain_throw (try_region)) 1720 return gimple_try_eval (tp); 1721 1722 new_seq = NULL; 1723 emit_eh_dispatch (&new_seq, try_region); 1724 emit_resx (&new_seq, try_region); 1725 1726 this_state.cur_region = state->cur_region; 1727 this_state.ehp_region = try_region; 1728 1729 out_label = NULL; 1730 for (gsi = gsi_start (gimple_try_cleanup (tp)); 1731 !gsi_end_p (gsi); 1732 gsi_next (&gsi)) 1733 { 1734 eh_catch c; 1735 gimple gcatch; 1736 gimple_seq handler; 1737 1738 gcatch = gsi_stmt (gsi); 1739 c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch)); 1740 1741 handler = gimple_catch_handler (gcatch); 1742 lower_eh_constructs_1 (&this_state, handler); 1743 1744 c->label = create_artificial_label (UNKNOWN_LOCATION); 1745 x = gimple_build_label (c->label); 1746 gimple_seq_add_stmt (&new_seq, x); 1747 1748 gimple_seq_add_seq (&new_seq, handler); 1749 1750 if (gimple_seq_may_fallthru (new_seq)) 1751 { 1752 if (!out_label) 1753 out_label = create_artificial_label (try_catch_loc); 1754 1755 x = gimple_build_goto (out_label); 1756 gimple_seq_add_stmt (&new_seq, x); 1757 } 1758 if (!c->type_list) 1759 break; 1760 } 1761 1762 gimple_try_set_cleanup (tp, new_seq); 1763 1764 return frob_into_branch_around (tp, try_region, out_label); 1765 } 1766 1767 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a 1768 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception 1769 region trees that record all the magic. */ 1770 1771 static gimple_seq 1772 lower_eh_filter (struct leh_state *state, gimple tp) 1773 { 1774 struct leh_state this_state = *state; 1775 eh_region this_region = NULL; 1776 gimple inner, x; 1777 gimple_seq new_seq; 1778 1779 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp)); 1780 1781 if (flag_exceptions) 1782 { 1783 this_region = gen_eh_region_allowed (state->cur_region, 1784 gimple_eh_filter_types (inner)); 1785 this_state.cur_region = this_region; 1786 } 1787 1788 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1789 1790 if (!eh_region_may_contain_throw (this_region)) 1791 return gimple_try_eval (tp); 1792 1793 new_seq = NULL; 1794 this_state.cur_region = state->cur_region; 1795 this_state.ehp_region = this_region; 1796 1797 emit_eh_dispatch (&new_seq, this_region); 1798 emit_resx (&new_seq, this_region); 1799 1800 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION); 1801 x = gimple_build_label (this_region->u.allowed.label); 1802 gimple_seq_add_stmt (&new_seq, x); 1803 1804 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure (inner)); 1805 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner)); 1806 1807 gimple_try_set_cleanup (tp, new_seq); 1808 1809 return frob_into_branch_around (tp, this_region, NULL); 1810 } 1811 1812 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with 1813 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks, 1814 plus the exception region trees that record all the magic. */ 1815 1816 static gimple_seq 1817 lower_eh_must_not_throw (struct leh_state *state, gimple tp) 1818 { 1819 struct leh_state this_state = *state; 1820 1821 if (flag_exceptions) 1822 { 1823 gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp)); 1824 eh_region this_region; 1825 1826 this_region = gen_eh_region_must_not_throw (state->cur_region); 1827 this_region->u.must_not_throw.failure_decl 1828 = gimple_eh_must_not_throw_fndecl (inner); 1829 this_region->u.must_not_throw.failure_loc = gimple_location (tp); 1830 1831 /* In order to get mangling applied to this decl, we must mark it 1832 used now. Otherwise, pass_ipa_free_lang_data won't think it 1833 needs to happen. */ 1834 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1; 1835 1836 this_state.cur_region = this_region; 1837 } 1838 1839 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1840 1841 return gimple_try_eval (tp); 1842 } 1843 1844 /* Implement a cleanup expression. This is similar to try-finally, 1845 except that we only execute the cleanup block for exception edges. */ 1846 1847 static gimple_seq 1848 lower_cleanup (struct leh_state *state, gimple tp) 1849 { 1850 struct leh_state this_state = *state; 1851 eh_region this_region = NULL; 1852 struct leh_tf_state fake_tf; 1853 gimple_seq result; 1854 bool cleanup_dead = cleanup_is_dead_in (state->cur_region); 1855 1856 if (flag_exceptions && !cleanup_dead) 1857 { 1858 this_region = gen_eh_region_cleanup (state->cur_region); 1859 this_state.cur_region = this_region; 1860 } 1861 1862 lower_eh_constructs_1 (&this_state, gimple_try_eval (tp)); 1863 1864 if (cleanup_dead || !eh_region_may_contain_throw (this_region)) 1865 return gimple_try_eval (tp); 1866 1867 /* Build enough of a try-finally state so that we can reuse 1868 honor_protect_cleanup_actions. */ 1869 memset (&fake_tf, 0, sizeof (fake_tf)); 1870 fake_tf.top_p = fake_tf.try_finally_expr = tp; 1871 fake_tf.outer = state; 1872 fake_tf.region = this_region; 1873 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp)); 1874 fake_tf.may_throw = true; 1875 1876 honor_protect_cleanup_actions (state, NULL, &fake_tf); 1877 1878 if (fake_tf.may_throw) 1879 { 1880 /* In this case honor_protect_cleanup_actions had nothing to do, 1881 and we should process this normally. */ 1882 lower_eh_constructs_1 (state, gimple_try_cleanup (tp)); 1883 result = frob_into_branch_around (tp, this_region, 1884 fake_tf.fallthru_label); 1885 } 1886 else 1887 { 1888 /* In this case honor_protect_cleanup_actions did nearly all of 1889 the work. All we have left is to append the fallthru_label. */ 1890 1891 result = gimple_try_eval (tp); 1892 if (fake_tf.fallthru_label) 1893 { 1894 gimple x = gimple_build_label (fake_tf.fallthru_label); 1895 gimple_seq_add_stmt (&result, x); 1896 } 1897 } 1898 return result; 1899 } 1900 1901 /* Main loop for lowering eh constructs. Also moves gsi to the next 1902 statement. */ 1903 1904 static void 1905 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi) 1906 { 1907 gimple_seq replace; 1908 gimple x; 1909 gimple stmt = gsi_stmt (*gsi); 1910 1911 switch (gimple_code (stmt)) 1912 { 1913 case GIMPLE_CALL: 1914 { 1915 tree fndecl = gimple_call_fndecl (stmt); 1916 tree rhs, lhs; 1917 1918 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) 1919 switch (DECL_FUNCTION_CODE (fndecl)) 1920 { 1921 case BUILT_IN_EH_POINTER: 1922 /* The front end may have generated a call to 1923 __builtin_eh_pointer (0) within a catch region. Replace 1924 this zero argument with the current catch region number. */ 1925 if (state->ehp_region) 1926 { 1927 tree nr = build_int_cst (integer_type_node, 1928 state->ehp_region->index); 1929 gimple_call_set_arg (stmt, 0, nr); 1930 } 1931 else 1932 { 1933 /* The user has dome something silly. Remove it. */ 1934 rhs = null_pointer_node; 1935 goto do_replace; 1936 } 1937 break; 1938 1939 case BUILT_IN_EH_FILTER: 1940 /* ??? This should never appear, but since it's a builtin it 1941 is accessible to abuse by users. Just remove it and 1942 replace the use with the arbitrary value zero. */ 1943 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0); 1944 do_replace: 1945 lhs = gimple_call_lhs (stmt); 1946 x = gimple_build_assign (lhs, rhs); 1947 gsi_insert_before (gsi, x, GSI_SAME_STMT); 1948 /* FALLTHRU */ 1949 1950 case BUILT_IN_EH_COPY_VALUES: 1951 /* Likewise this should not appear. Remove it. */ 1952 gsi_remove (gsi, true); 1953 return; 1954 1955 default: 1956 break; 1957 } 1958 } 1959 /* FALLTHRU */ 1960 1961 case GIMPLE_ASSIGN: 1962 /* If the stmt can throw use a new temporary for the assignment 1963 to a LHS. This makes sure the old value of the LHS is 1964 available on the EH edge. Only do so for statements that 1965 potentially fall thru (no noreturn calls e.g.), otherwise 1966 this new assignment might create fake fallthru regions. */ 1967 if (stmt_could_throw_p (stmt) 1968 && gimple_has_lhs (stmt) 1969 && gimple_stmt_may_fallthru (stmt) 1970 && !tree_could_throw_p (gimple_get_lhs (stmt)) 1971 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt)))) 1972 { 1973 tree lhs = gimple_get_lhs (stmt); 1974 tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL); 1975 gimple s = gimple_build_assign (lhs, tmp); 1976 gimple_set_location (s, gimple_location (stmt)); 1977 gimple_set_block (s, gimple_block (stmt)); 1978 gimple_set_lhs (stmt, tmp); 1979 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE 1980 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE) 1981 DECL_GIMPLE_REG_P (tmp) = 1; 1982 gsi_insert_after (gsi, s, GSI_SAME_STMT); 1983 } 1984 /* Look for things that can throw exceptions, and record them. */ 1985 if (state->cur_region && stmt_could_throw_p (stmt)) 1986 { 1987 record_stmt_eh_region (state->cur_region, stmt); 1988 note_eh_region_may_contain_throw (state->cur_region); 1989 } 1990 break; 1991 1992 case GIMPLE_COND: 1993 case GIMPLE_GOTO: 1994 case GIMPLE_RETURN: 1995 maybe_record_in_goto_queue (state, stmt); 1996 break; 1997 1998 case GIMPLE_SWITCH: 1999 verify_norecord_switch_expr (state, stmt); 2000 break; 2001 2002 case GIMPLE_TRY: 2003 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY) 2004 replace = lower_try_finally (state, stmt); 2005 else 2006 { 2007 x = gimple_seq_first_stmt (gimple_try_cleanup (stmt)); 2008 if (!x) 2009 { 2010 replace = gimple_try_eval (stmt); 2011 lower_eh_constructs_1 (state, replace); 2012 } 2013 else 2014 switch (gimple_code (x)) 2015 { 2016 case GIMPLE_CATCH: 2017 replace = lower_catch (state, stmt); 2018 break; 2019 case GIMPLE_EH_FILTER: 2020 replace = lower_eh_filter (state, stmt); 2021 break; 2022 case GIMPLE_EH_MUST_NOT_THROW: 2023 replace = lower_eh_must_not_throw (state, stmt); 2024 break; 2025 case GIMPLE_EH_ELSE: 2026 /* This code is only valid with GIMPLE_TRY_FINALLY. */ 2027 gcc_unreachable (); 2028 default: 2029 replace = lower_cleanup (state, stmt); 2030 break; 2031 } 2032 } 2033 2034 /* Remove the old stmt and insert the transformed sequence 2035 instead. */ 2036 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT); 2037 gsi_remove (gsi, true); 2038 2039 /* Return since we don't want gsi_next () */ 2040 return; 2041 2042 case GIMPLE_EH_ELSE: 2043 /* We should be eliminating this in lower_try_finally et al. */ 2044 gcc_unreachable (); 2045 2046 default: 2047 /* A type, a decl, or some kind of statement that we're not 2048 interested in. Don't walk them. */ 2049 break; 2050 } 2051 2052 gsi_next (gsi); 2053 } 2054 2055 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */ 2056 2057 static void 2058 lower_eh_constructs_1 (struct leh_state *state, gimple_seq seq) 2059 { 2060 gimple_stmt_iterator gsi; 2061 for (gsi = gsi_start (seq); !gsi_end_p (gsi);) 2062 lower_eh_constructs_2 (state, &gsi); 2063 } 2064 2065 static unsigned int 2066 lower_eh_constructs (void) 2067 { 2068 struct leh_state null_state; 2069 gimple_seq bodyp; 2070 2071 bodyp = gimple_body (current_function_decl); 2072 if (bodyp == NULL) 2073 return 0; 2074 2075 finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free); 2076 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL); 2077 memset (&null_state, 0, sizeof (null_state)); 2078 2079 collect_finally_tree_1 (bodyp, NULL); 2080 lower_eh_constructs_1 (&null_state, bodyp); 2081 2082 /* We assume there's a return statement, or something, at the end of 2083 the function, and thus ploping the EH sequence afterward won't 2084 change anything. */ 2085 gcc_assert (!gimple_seq_may_fallthru (bodyp)); 2086 gimple_seq_add_seq (&bodyp, eh_seq); 2087 2088 /* We assume that since BODYP already existed, adding EH_SEQ to it 2089 didn't change its value, and we don't have to re-set the function. */ 2090 gcc_assert (bodyp == gimple_body (current_function_decl)); 2091 2092 htab_delete (finally_tree); 2093 BITMAP_FREE (eh_region_may_contain_throw_map); 2094 eh_seq = NULL; 2095 2096 /* If this function needs a language specific EH personality routine 2097 and the frontend didn't already set one do so now. */ 2098 if (function_needs_eh_personality (cfun) == eh_personality_lang 2099 && !DECL_FUNCTION_PERSONALITY (current_function_decl)) 2100 DECL_FUNCTION_PERSONALITY (current_function_decl) 2101 = lang_hooks.eh_personality (); 2102 2103 return 0; 2104 } 2105 2106 struct gimple_opt_pass pass_lower_eh = 2107 { 2108 { 2109 GIMPLE_PASS, 2110 "eh", /* name */ 2111 NULL, /* gate */ 2112 lower_eh_constructs, /* execute */ 2113 NULL, /* sub */ 2114 NULL, /* next */ 2115 0, /* static_pass_number */ 2116 TV_TREE_EH, /* tv_id */ 2117 PROP_gimple_lcf, /* properties_required */ 2118 PROP_gimple_leh, /* properties_provided */ 2119 0, /* properties_destroyed */ 2120 0, /* todo_flags_start */ 2121 0 /* todo_flags_finish */ 2122 } 2123 }; 2124 2125 /* Create the multiple edges from an EH_DISPATCH statement to all of 2126 the possible handlers for its EH region. Return true if there's 2127 no fallthru edge; false if there is. */ 2128 2129 bool 2130 make_eh_dispatch_edges (gimple stmt) 2131 { 2132 eh_region r; 2133 eh_catch c; 2134 basic_block src, dst; 2135 2136 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); 2137 src = gimple_bb (stmt); 2138 2139 switch (r->type) 2140 { 2141 case ERT_TRY: 2142 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 2143 { 2144 dst = label_to_block (c->label); 2145 make_edge (src, dst, 0); 2146 2147 /* A catch-all handler doesn't have a fallthru. */ 2148 if (c->type_list == NULL) 2149 return false; 2150 } 2151 break; 2152 2153 case ERT_ALLOWED_EXCEPTIONS: 2154 dst = label_to_block (r->u.allowed.label); 2155 make_edge (src, dst, 0); 2156 break; 2157 2158 default: 2159 gcc_unreachable (); 2160 } 2161 2162 return true; 2163 } 2164 2165 /* Create the single EH edge from STMT to its nearest landing pad, 2166 if there is such a landing pad within the current function. */ 2167 2168 void 2169 make_eh_edges (gimple stmt) 2170 { 2171 basic_block src, dst; 2172 eh_landing_pad lp; 2173 int lp_nr; 2174 2175 lp_nr = lookup_stmt_eh_lp (stmt); 2176 if (lp_nr <= 0) 2177 return; 2178 2179 lp = get_eh_landing_pad_from_number (lp_nr); 2180 gcc_assert (lp != NULL); 2181 2182 src = gimple_bb (stmt); 2183 dst = label_to_block (lp->post_landing_pad); 2184 make_edge (src, dst, EDGE_EH); 2185 } 2186 2187 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree; 2188 do not actually perform the final edge redirection. 2189 2190 CHANGE_REGION is true when we're being called from cleanup_empty_eh and 2191 we intend to change the destination EH region as well; this means 2192 EH_LANDING_PAD_NR must already be set on the destination block label. 2193 If false, we're being called from generic cfg manipulation code and we 2194 should preserve our place within the region tree. */ 2195 2196 static void 2197 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region) 2198 { 2199 eh_landing_pad old_lp, new_lp; 2200 basic_block old_bb; 2201 gimple throw_stmt; 2202 int old_lp_nr, new_lp_nr; 2203 tree old_label, new_label; 2204 edge_iterator ei; 2205 edge e; 2206 2207 old_bb = edge_in->dest; 2208 old_label = gimple_block_label (old_bb); 2209 old_lp_nr = EH_LANDING_PAD_NR (old_label); 2210 gcc_assert (old_lp_nr > 0); 2211 old_lp = get_eh_landing_pad_from_number (old_lp_nr); 2212 2213 throw_stmt = last_stmt (edge_in->src); 2214 gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr); 2215 2216 new_label = gimple_block_label (new_bb); 2217 2218 /* Look for an existing region that might be using NEW_BB already. */ 2219 new_lp_nr = EH_LANDING_PAD_NR (new_label); 2220 if (new_lp_nr) 2221 { 2222 new_lp = get_eh_landing_pad_from_number (new_lp_nr); 2223 gcc_assert (new_lp); 2224 2225 /* Unless CHANGE_REGION is true, the new and old landing pad 2226 had better be associated with the same EH region. */ 2227 gcc_assert (change_region || new_lp->region == old_lp->region); 2228 } 2229 else 2230 { 2231 new_lp = NULL; 2232 gcc_assert (!change_region); 2233 } 2234 2235 /* Notice when we redirect the last EH edge away from OLD_BB. */ 2236 FOR_EACH_EDGE (e, ei, old_bb->preds) 2237 if (e != edge_in && (e->flags & EDGE_EH)) 2238 break; 2239 2240 if (new_lp) 2241 { 2242 /* NEW_LP already exists. If there are still edges into OLD_LP, 2243 there's nothing to do with the EH tree. If there are no more 2244 edges into OLD_LP, then we want to remove OLD_LP as it is unused. 2245 If CHANGE_REGION is true, then our caller is expecting to remove 2246 the landing pad. */ 2247 if (e == NULL && !change_region) 2248 remove_eh_landing_pad (old_lp); 2249 } 2250 else 2251 { 2252 /* No correct landing pad exists. If there are no more edges 2253 into OLD_LP, then we can simply re-use the existing landing pad. 2254 Otherwise, we have to create a new landing pad. */ 2255 if (e == NULL) 2256 { 2257 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0; 2258 new_lp = old_lp; 2259 } 2260 else 2261 new_lp = gen_eh_landing_pad (old_lp->region); 2262 new_lp->post_landing_pad = new_label; 2263 EH_LANDING_PAD_NR (new_label) = new_lp->index; 2264 } 2265 2266 /* Maybe move the throwing statement to the new region. */ 2267 if (old_lp != new_lp) 2268 { 2269 remove_stmt_from_eh_lp (throw_stmt); 2270 add_stmt_to_eh_lp (throw_stmt, new_lp->index); 2271 } 2272 } 2273 2274 /* Redirect EH edge E to NEW_BB. */ 2275 2276 edge 2277 redirect_eh_edge (edge edge_in, basic_block new_bb) 2278 { 2279 redirect_eh_edge_1 (edge_in, new_bb, false); 2280 return ssa_redirect_edge (edge_in, new_bb); 2281 } 2282 2283 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the 2284 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB. 2285 The actual edge update will happen in the caller. */ 2286 2287 void 2288 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb) 2289 { 2290 tree new_lab = gimple_block_label (new_bb); 2291 bool any_changed = false; 2292 basic_block old_bb; 2293 eh_region r; 2294 eh_catch c; 2295 2296 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); 2297 switch (r->type) 2298 { 2299 case ERT_TRY: 2300 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 2301 { 2302 old_bb = label_to_block (c->label); 2303 if (old_bb == e->dest) 2304 { 2305 c->label = new_lab; 2306 any_changed = true; 2307 } 2308 } 2309 break; 2310 2311 case ERT_ALLOWED_EXCEPTIONS: 2312 old_bb = label_to_block (r->u.allowed.label); 2313 gcc_assert (old_bb == e->dest); 2314 r->u.allowed.label = new_lab; 2315 any_changed = true; 2316 break; 2317 2318 default: 2319 gcc_unreachable (); 2320 } 2321 2322 gcc_assert (any_changed); 2323 } 2324 2325 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */ 2326 2327 bool 2328 operation_could_trap_helper_p (enum tree_code op, 2329 bool fp_operation, 2330 bool honor_trapv, 2331 bool honor_nans, 2332 bool honor_snans, 2333 tree divisor, 2334 bool *handled) 2335 { 2336 *handled = true; 2337 switch (op) 2338 { 2339 case TRUNC_DIV_EXPR: 2340 case CEIL_DIV_EXPR: 2341 case FLOOR_DIV_EXPR: 2342 case ROUND_DIV_EXPR: 2343 case EXACT_DIV_EXPR: 2344 case CEIL_MOD_EXPR: 2345 case FLOOR_MOD_EXPR: 2346 case ROUND_MOD_EXPR: 2347 case TRUNC_MOD_EXPR: 2348 case RDIV_EXPR: 2349 if (honor_snans || honor_trapv) 2350 return true; 2351 if (fp_operation) 2352 return flag_trapping_math; 2353 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor)) 2354 return true; 2355 return false; 2356 2357 case LT_EXPR: 2358 case LE_EXPR: 2359 case GT_EXPR: 2360 case GE_EXPR: 2361 case LTGT_EXPR: 2362 /* Some floating point comparisons may trap. */ 2363 return honor_nans; 2364 2365 case EQ_EXPR: 2366 case NE_EXPR: 2367 case UNORDERED_EXPR: 2368 case ORDERED_EXPR: 2369 case UNLT_EXPR: 2370 case UNLE_EXPR: 2371 case UNGT_EXPR: 2372 case UNGE_EXPR: 2373 case UNEQ_EXPR: 2374 return honor_snans; 2375 2376 case CONVERT_EXPR: 2377 case FIX_TRUNC_EXPR: 2378 /* Conversion of floating point might trap. */ 2379 return honor_nans; 2380 2381 case NEGATE_EXPR: 2382 case ABS_EXPR: 2383 case CONJ_EXPR: 2384 /* These operations don't trap with floating point. */ 2385 if (honor_trapv) 2386 return true; 2387 return false; 2388 2389 case PLUS_EXPR: 2390 case MINUS_EXPR: 2391 case MULT_EXPR: 2392 /* Any floating arithmetic may trap. */ 2393 if (fp_operation && flag_trapping_math) 2394 return true; 2395 if (honor_trapv) 2396 return true; 2397 return false; 2398 2399 case COMPLEX_EXPR: 2400 case CONSTRUCTOR: 2401 /* Constructing an object cannot trap. */ 2402 return false; 2403 2404 default: 2405 /* Any floating arithmetic may trap. */ 2406 if (fp_operation && flag_trapping_math) 2407 return true; 2408 2409 *handled = false; 2410 return false; 2411 } 2412 } 2413 2414 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied 2415 on floating-point values. HONOR_TRAPV is true if OP is applied on integer 2416 type operands that may trap. If OP is a division operator, DIVISOR contains 2417 the value of the divisor. */ 2418 2419 bool 2420 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv, 2421 tree divisor) 2422 { 2423 bool honor_nans = (fp_operation && flag_trapping_math 2424 && !flag_finite_math_only); 2425 bool honor_snans = fp_operation && flag_signaling_nans != 0; 2426 bool handled; 2427 2428 if (TREE_CODE_CLASS (op) != tcc_comparison 2429 && TREE_CODE_CLASS (op) != tcc_unary 2430 && TREE_CODE_CLASS (op) != tcc_binary) 2431 return false; 2432 2433 return operation_could_trap_helper_p (op, fp_operation, honor_trapv, 2434 honor_nans, honor_snans, divisor, 2435 &handled); 2436 } 2437 2438 /* Return true if EXPR can trap, as in dereferencing an invalid pointer 2439 location or floating point arithmetic. C.f. the rtl version, may_trap_p. 2440 This routine expects only GIMPLE lhs or rhs input. */ 2441 2442 bool 2443 tree_could_trap_p (tree expr) 2444 { 2445 enum tree_code code; 2446 bool fp_operation = false; 2447 bool honor_trapv = false; 2448 tree t, base, div = NULL_TREE; 2449 2450 if (!expr) 2451 return false; 2452 2453 code = TREE_CODE (expr); 2454 t = TREE_TYPE (expr); 2455 2456 if (t) 2457 { 2458 if (COMPARISON_CLASS_P (expr)) 2459 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0))); 2460 else 2461 fp_operation = FLOAT_TYPE_P (t); 2462 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t); 2463 } 2464 2465 if (TREE_CODE_CLASS (code) == tcc_binary) 2466 div = TREE_OPERAND (expr, 1); 2467 if (operation_could_trap_p (code, fp_operation, honor_trapv, div)) 2468 return true; 2469 2470 restart: 2471 switch (code) 2472 { 2473 case TARGET_MEM_REF: 2474 if (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR 2475 && !TMR_INDEX (expr) && !TMR_INDEX2 (expr)) 2476 return false; 2477 return !TREE_THIS_NOTRAP (expr); 2478 2479 case COMPONENT_REF: 2480 case REALPART_EXPR: 2481 case IMAGPART_EXPR: 2482 case BIT_FIELD_REF: 2483 case VIEW_CONVERT_EXPR: 2484 case WITH_SIZE_EXPR: 2485 expr = TREE_OPERAND (expr, 0); 2486 code = TREE_CODE (expr); 2487 goto restart; 2488 2489 case ARRAY_RANGE_REF: 2490 base = TREE_OPERAND (expr, 0); 2491 if (tree_could_trap_p (base)) 2492 return true; 2493 if (TREE_THIS_NOTRAP (expr)) 2494 return false; 2495 return !range_in_array_bounds_p (expr); 2496 2497 case ARRAY_REF: 2498 base = TREE_OPERAND (expr, 0); 2499 if (tree_could_trap_p (base)) 2500 return true; 2501 if (TREE_THIS_NOTRAP (expr)) 2502 return false; 2503 return !in_array_bounds_p (expr); 2504 2505 case MEM_REF: 2506 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR) 2507 return false; 2508 /* Fallthru. */ 2509 case INDIRECT_REF: 2510 return !TREE_THIS_NOTRAP (expr); 2511 2512 case ASM_EXPR: 2513 return TREE_THIS_VOLATILE (expr); 2514 2515 case CALL_EXPR: 2516 t = get_callee_fndecl (expr); 2517 /* Assume that calls to weak functions may trap. */ 2518 if (!t || !DECL_P (t)) 2519 return true; 2520 if (DECL_WEAK (t)) 2521 return tree_could_trap_p (t); 2522 return false; 2523 2524 case FUNCTION_DECL: 2525 /* Assume that accesses to weak functions may trap, unless we know 2526 they are certainly defined in current TU or in some other 2527 LTO partition. */ 2528 if (DECL_WEAK (expr)) 2529 { 2530 struct cgraph_node *node; 2531 if (!DECL_EXTERNAL (expr)) 2532 return false; 2533 node = cgraph_function_node (cgraph_get_node (expr), NULL); 2534 if (node && node->in_other_partition) 2535 return false; 2536 return true; 2537 } 2538 return false; 2539 2540 case VAR_DECL: 2541 /* Assume that accesses to weak vars may trap, unless we know 2542 they are certainly defined in current TU or in some other 2543 LTO partition. */ 2544 if (DECL_WEAK (expr)) 2545 { 2546 struct varpool_node *node; 2547 if (!DECL_EXTERNAL (expr)) 2548 return false; 2549 node = varpool_variable_node (varpool_get_node (expr), NULL); 2550 if (node && node->in_other_partition) 2551 return false; 2552 return true; 2553 } 2554 return false; 2555 2556 default: 2557 return false; 2558 } 2559 } 2560 2561 2562 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a 2563 an assignment or a conditional) may throw. */ 2564 2565 static bool 2566 stmt_could_throw_1_p (gimple stmt) 2567 { 2568 enum tree_code code = gimple_expr_code (stmt); 2569 bool honor_nans = false; 2570 bool honor_snans = false; 2571 bool fp_operation = false; 2572 bool honor_trapv = false; 2573 tree t; 2574 size_t i; 2575 bool handled, ret; 2576 2577 if (TREE_CODE_CLASS (code) == tcc_comparison 2578 || TREE_CODE_CLASS (code) == tcc_unary 2579 || TREE_CODE_CLASS (code) == tcc_binary) 2580 { 2581 if (is_gimple_assign (stmt) 2582 && TREE_CODE_CLASS (code) == tcc_comparison) 2583 t = TREE_TYPE (gimple_assign_rhs1 (stmt)); 2584 else if (gimple_code (stmt) == GIMPLE_COND) 2585 t = TREE_TYPE (gimple_cond_lhs (stmt)); 2586 else 2587 t = gimple_expr_type (stmt); 2588 fp_operation = FLOAT_TYPE_P (t); 2589 if (fp_operation) 2590 { 2591 honor_nans = flag_trapping_math && !flag_finite_math_only; 2592 honor_snans = flag_signaling_nans != 0; 2593 } 2594 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t)) 2595 honor_trapv = true; 2596 } 2597 2598 /* Check if the main expression may trap. */ 2599 t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL; 2600 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv, 2601 honor_nans, honor_snans, t, 2602 &handled); 2603 if (handled) 2604 return ret; 2605 2606 /* If the expression does not trap, see if any of the individual operands may 2607 trap. */ 2608 for (i = 0; i < gimple_num_ops (stmt); i++) 2609 if (tree_could_trap_p (gimple_op (stmt, i))) 2610 return true; 2611 2612 return false; 2613 } 2614 2615 2616 /* Return true if statement STMT could throw an exception. */ 2617 2618 bool 2619 stmt_could_throw_p (gimple stmt) 2620 { 2621 if (!flag_exceptions) 2622 return false; 2623 2624 /* The only statements that can throw an exception are assignments, 2625 conditionals, calls, resx, and asms. */ 2626 switch (gimple_code (stmt)) 2627 { 2628 case GIMPLE_RESX: 2629 return true; 2630 2631 case GIMPLE_CALL: 2632 return !gimple_call_nothrow_p (stmt); 2633 2634 case GIMPLE_ASSIGN: 2635 case GIMPLE_COND: 2636 if (!cfun->can_throw_non_call_exceptions) 2637 return false; 2638 return stmt_could_throw_1_p (stmt); 2639 2640 case GIMPLE_ASM: 2641 if (!cfun->can_throw_non_call_exceptions) 2642 return false; 2643 return gimple_asm_volatile_p (stmt); 2644 2645 default: 2646 return false; 2647 } 2648 } 2649 2650 2651 /* Return true if expression T could throw an exception. */ 2652 2653 bool 2654 tree_could_throw_p (tree t) 2655 { 2656 if (!flag_exceptions) 2657 return false; 2658 if (TREE_CODE (t) == MODIFY_EXPR) 2659 { 2660 if (cfun->can_throw_non_call_exceptions 2661 && tree_could_trap_p (TREE_OPERAND (t, 0))) 2662 return true; 2663 t = TREE_OPERAND (t, 1); 2664 } 2665 2666 if (TREE_CODE (t) == WITH_SIZE_EXPR) 2667 t = TREE_OPERAND (t, 0); 2668 if (TREE_CODE (t) == CALL_EXPR) 2669 return (call_expr_flags (t) & ECF_NOTHROW) == 0; 2670 if (cfun->can_throw_non_call_exceptions) 2671 return tree_could_trap_p (t); 2672 return false; 2673 } 2674 2675 /* Return true if STMT can throw an exception that is not caught within 2676 the current function (CFUN). */ 2677 2678 bool 2679 stmt_can_throw_external (gimple stmt) 2680 { 2681 int lp_nr; 2682 2683 if (!stmt_could_throw_p (stmt)) 2684 return false; 2685 2686 lp_nr = lookup_stmt_eh_lp (stmt); 2687 return lp_nr == 0; 2688 } 2689 2690 /* Return true if STMT can throw an exception that is caught within 2691 the current function (CFUN). */ 2692 2693 bool 2694 stmt_can_throw_internal (gimple stmt) 2695 { 2696 int lp_nr; 2697 2698 if (!stmt_could_throw_p (stmt)) 2699 return false; 2700 2701 lp_nr = lookup_stmt_eh_lp (stmt); 2702 return lp_nr > 0; 2703 } 2704 2705 /* Given a statement STMT in IFUN, if STMT can no longer throw, then 2706 remove any entry it might have from the EH table. Return true if 2707 any change was made. */ 2708 2709 bool 2710 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt) 2711 { 2712 if (stmt_could_throw_p (stmt)) 2713 return false; 2714 return remove_stmt_from_eh_lp_fn (ifun, stmt); 2715 } 2716 2717 /* Likewise, but always use the current function. */ 2718 2719 bool 2720 maybe_clean_eh_stmt (gimple stmt) 2721 { 2722 return maybe_clean_eh_stmt_fn (cfun, stmt); 2723 } 2724 2725 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced 2726 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT 2727 in the table if it should be in there. Return TRUE if a replacement was 2728 done that my require an EH edge purge. */ 2729 2730 bool 2731 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt) 2732 { 2733 int lp_nr = lookup_stmt_eh_lp (old_stmt); 2734 2735 if (lp_nr != 0) 2736 { 2737 bool new_stmt_could_throw = stmt_could_throw_p (new_stmt); 2738 2739 if (new_stmt == old_stmt && new_stmt_could_throw) 2740 return false; 2741 2742 remove_stmt_from_eh_lp (old_stmt); 2743 if (new_stmt_could_throw) 2744 { 2745 add_stmt_to_eh_lp (new_stmt, lp_nr); 2746 return false; 2747 } 2748 else 2749 return true; 2750 } 2751 2752 return false; 2753 } 2754 2755 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statment NEW_STMT 2756 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP 2757 operand is the return value of duplicate_eh_regions. */ 2758 2759 bool 2760 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt, 2761 struct function *old_fun, gimple old_stmt, 2762 struct pointer_map_t *map, int default_lp_nr) 2763 { 2764 int old_lp_nr, new_lp_nr; 2765 void **slot; 2766 2767 if (!stmt_could_throw_p (new_stmt)) 2768 return false; 2769 2770 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt); 2771 if (old_lp_nr == 0) 2772 { 2773 if (default_lp_nr == 0) 2774 return false; 2775 new_lp_nr = default_lp_nr; 2776 } 2777 else if (old_lp_nr > 0) 2778 { 2779 eh_landing_pad old_lp, new_lp; 2780 2781 old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr); 2782 slot = pointer_map_contains (map, old_lp); 2783 new_lp = (eh_landing_pad) *slot; 2784 new_lp_nr = new_lp->index; 2785 } 2786 else 2787 { 2788 eh_region old_r, new_r; 2789 2790 old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr); 2791 slot = pointer_map_contains (map, old_r); 2792 new_r = (eh_region) *slot; 2793 new_lp_nr = -new_r->index; 2794 } 2795 2796 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr); 2797 return true; 2798 } 2799 2800 /* Similar, but both OLD_STMT and NEW_STMT are within the current function, 2801 and thus no remapping is required. */ 2802 2803 bool 2804 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt) 2805 { 2806 int lp_nr; 2807 2808 if (!stmt_could_throw_p (new_stmt)) 2809 return false; 2810 2811 lp_nr = lookup_stmt_eh_lp (old_stmt); 2812 if (lp_nr == 0) 2813 return false; 2814 2815 add_stmt_to_eh_lp (new_stmt, lp_nr); 2816 return true; 2817 } 2818 2819 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of 2820 GIMPLE_TRY) that are similar enough to be considered the same. Currently 2821 this only handles handlers consisting of a single call, as that's the 2822 important case for C++: a destructor call for a particular object showing 2823 up in multiple handlers. */ 2824 2825 static bool 2826 same_handler_p (gimple_seq oneh, gimple_seq twoh) 2827 { 2828 gimple_stmt_iterator gsi; 2829 gimple ones, twos; 2830 unsigned int ai; 2831 2832 gsi = gsi_start (oneh); 2833 if (!gsi_one_before_end_p (gsi)) 2834 return false; 2835 ones = gsi_stmt (gsi); 2836 2837 gsi = gsi_start (twoh); 2838 if (!gsi_one_before_end_p (gsi)) 2839 return false; 2840 twos = gsi_stmt (gsi); 2841 2842 if (!is_gimple_call (ones) 2843 || !is_gimple_call (twos) 2844 || gimple_call_lhs (ones) 2845 || gimple_call_lhs (twos) 2846 || gimple_call_chain (ones) 2847 || gimple_call_chain (twos) 2848 || !gimple_call_same_target_p (ones, twos) 2849 || gimple_call_num_args (ones) != gimple_call_num_args (twos)) 2850 return false; 2851 2852 for (ai = 0; ai < gimple_call_num_args (ones); ++ai) 2853 if (!operand_equal_p (gimple_call_arg (ones, ai), 2854 gimple_call_arg (twos, ai), 0)) 2855 return false; 2856 2857 return true; 2858 } 2859 2860 /* Optimize 2861 try { A() } finally { try { ~B() } catch { ~A() } } 2862 try { ... } finally { ~A() } 2863 into 2864 try { A() } catch { ~B() } 2865 try { ~B() ... } finally { ~A() } 2866 2867 This occurs frequently in C++, where A is a local variable and B is a 2868 temporary used in the initializer for A. */ 2869 2870 static void 2871 optimize_double_finally (gimple one, gimple two) 2872 { 2873 gimple oneh; 2874 gimple_stmt_iterator gsi; 2875 2876 gsi = gsi_start (gimple_try_cleanup (one)); 2877 if (!gsi_one_before_end_p (gsi)) 2878 return; 2879 2880 oneh = gsi_stmt (gsi); 2881 if (gimple_code (oneh) != GIMPLE_TRY 2882 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH) 2883 return; 2884 2885 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two))) 2886 { 2887 gimple_seq seq = gimple_try_eval (oneh); 2888 2889 gimple_try_set_cleanup (one, seq); 2890 gimple_try_set_kind (one, GIMPLE_TRY_CATCH); 2891 seq = copy_gimple_seq_and_replace_locals (seq); 2892 gimple_seq_add_seq (&seq, gimple_try_eval (two)); 2893 gimple_try_set_eval (two, seq); 2894 } 2895 } 2896 2897 /* Perform EH refactoring optimizations that are simpler to do when code 2898 flow has been lowered but EH structures haven't. */ 2899 2900 static void 2901 refactor_eh_r (gimple_seq seq) 2902 { 2903 gimple_stmt_iterator gsi; 2904 gimple one, two; 2905 2906 one = NULL; 2907 two = NULL; 2908 gsi = gsi_start (seq); 2909 while (1) 2910 { 2911 one = two; 2912 if (gsi_end_p (gsi)) 2913 two = NULL; 2914 else 2915 two = gsi_stmt (gsi); 2916 if (one 2917 && two 2918 && gimple_code (one) == GIMPLE_TRY 2919 && gimple_code (two) == GIMPLE_TRY 2920 && gimple_try_kind (one) == GIMPLE_TRY_FINALLY 2921 && gimple_try_kind (two) == GIMPLE_TRY_FINALLY) 2922 optimize_double_finally (one, two); 2923 if (one) 2924 switch (gimple_code (one)) 2925 { 2926 case GIMPLE_TRY: 2927 refactor_eh_r (gimple_try_eval (one)); 2928 refactor_eh_r (gimple_try_cleanup (one)); 2929 break; 2930 case GIMPLE_CATCH: 2931 refactor_eh_r (gimple_catch_handler (one)); 2932 break; 2933 case GIMPLE_EH_FILTER: 2934 refactor_eh_r (gimple_eh_filter_failure (one)); 2935 break; 2936 case GIMPLE_EH_ELSE: 2937 refactor_eh_r (gimple_eh_else_n_body (one)); 2938 refactor_eh_r (gimple_eh_else_e_body (one)); 2939 break; 2940 default: 2941 break; 2942 } 2943 if (two) 2944 gsi_next (&gsi); 2945 else 2946 break; 2947 } 2948 } 2949 2950 static unsigned 2951 refactor_eh (void) 2952 { 2953 refactor_eh_r (gimple_body (current_function_decl)); 2954 return 0; 2955 } 2956 2957 static bool 2958 gate_refactor_eh (void) 2959 { 2960 return flag_exceptions != 0; 2961 } 2962 2963 struct gimple_opt_pass pass_refactor_eh = 2964 { 2965 { 2966 GIMPLE_PASS, 2967 "ehopt", /* name */ 2968 gate_refactor_eh, /* gate */ 2969 refactor_eh, /* execute */ 2970 NULL, /* sub */ 2971 NULL, /* next */ 2972 0, /* static_pass_number */ 2973 TV_TREE_EH, /* tv_id */ 2974 PROP_gimple_lcf, /* properties_required */ 2975 0, /* properties_provided */ 2976 0, /* properties_destroyed */ 2977 0, /* todo_flags_start */ 2978 0 /* todo_flags_finish */ 2979 } 2980 }; 2981 2982 /* At the end of gimple optimization, we can lower RESX. */ 2983 2984 static bool 2985 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map) 2986 { 2987 int lp_nr; 2988 eh_region src_r, dst_r; 2989 gimple_stmt_iterator gsi; 2990 gimple x; 2991 tree fn, src_nr; 2992 bool ret = false; 2993 2994 lp_nr = lookup_stmt_eh_lp (stmt); 2995 if (lp_nr != 0) 2996 dst_r = get_eh_region_from_lp_number (lp_nr); 2997 else 2998 dst_r = NULL; 2999 3000 src_r = get_eh_region_from_number (gimple_resx_region (stmt)); 3001 gsi = gsi_last_bb (bb); 3002 3003 if (src_r == NULL) 3004 { 3005 /* We can wind up with no source region when pass_cleanup_eh shows 3006 that there are no entries into an eh region and deletes it, but 3007 then the block that contains the resx isn't removed. This can 3008 happen without optimization when the switch statement created by 3009 lower_try_finally_switch isn't simplified to remove the eh case. 3010 3011 Resolve this by expanding the resx node to an abort. */ 3012 3013 fn = builtin_decl_implicit (BUILT_IN_TRAP); 3014 x = gimple_build_call (fn, 0); 3015 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3016 3017 while (EDGE_COUNT (bb->succs) > 0) 3018 remove_edge (EDGE_SUCC (bb, 0)); 3019 } 3020 else if (dst_r) 3021 { 3022 /* When we have a destination region, we resolve this by copying 3023 the excptr and filter values into place, and changing the edge 3024 to immediately after the landing pad. */ 3025 edge e; 3026 3027 if (lp_nr < 0) 3028 { 3029 basic_block new_bb; 3030 void **slot; 3031 tree lab; 3032 3033 /* We are resuming into a MUST_NOT_CALL region. Expand a call to 3034 the failure decl into a new block, if needed. */ 3035 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW); 3036 3037 slot = pointer_map_contains (mnt_map, dst_r); 3038 if (slot == NULL) 3039 { 3040 gimple_stmt_iterator gsi2; 3041 3042 new_bb = create_empty_bb (bb); 3043 lab = gimple_block_label (new_bb); 3044 gsi2 = gsi_start_bb (new_bb); 3045 3046 fn = dst_r->u.must_not_throw.failure_decl; 3047 x = gimple_build_call (fn, 0); 3048 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc); 3049 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING); 3050 3051 slot = pointer_map_insert (mnt_map, dst_r); 3052 *slot = lab; 3053 } 3054 else 3055 { 3056 lab = (tree) *slot; 3057 new_bb = label_to_block (lab); 3058 } 3059 3060 gcc_assert (EDGE_COUNT (bb->succs) == 0); 3061 e = make_edge (bb, new_bb, EDGE_FALLTHRU); 3062 e->count = bb->count; 3063 e->probability = REG_BR_PROB_BASE; 3064 } 3065 else 3066 { 3067 edge_iterator ei; 3068 tree dst_nr = build_int_cst (integer_type_node, dst_r->index); 3069 3070 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES); 3071 src_nr = build_int_cst (integer_type_node, src_r->index); 3072 x = gimple_build_call (fn, 2, dst_nr, src_nr); 3073 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3074 3075 /* Update the flags for the outgoing edge. */ 3076 e = single_succ_edge (bb); 3077 gcc_assert (e->flags & EDGE_EH); 3078 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU; 3079 3080 /* If there are no more EH users of the landing pad, delete it. */ 3081 FOR_EACH_EDGE (e, ei, e->dest->preds) 3082 if (e->flags & EDGE_EH) 3083 break; 3084 if (e == NULL) 3085 { 3086 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr); 3087 remove_eh_landing_pad (lp); 3088 } 3089 } 3090 3091 ret = true; 3092 } 3093 else 3094 { 3095 tree var; 3096 3097 /* When we don't have a destination region, this exception escapes 3098 up the call chain. We resolve this by generating a call to the 3099 _Unwind_Resume library function. */ 3100 3101 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup 3102 with no arguments for C++ and Java. Check for that. */ 3103 if (src_r->use_cxa_end_cleanup) 3104 { 3105 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP); 3106 x = gimple_build_call (fn, 0); 3107 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3108 } 3109 else 3110 { 3111 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER); 3112 src_nr = build_int_cst (integer_type_node, src_r->index); 3113 x = gimple_build_call (fn, 1, src_nr); 3114 var = create_tmp_var (ptr_type_node, NULL); 3115 var = make_ssa_name (var, x); 3116 gimple_call_set_lhs (x, var); 3117 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3118 3119 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME); 3120 x = gimple_build_call (fn, 1, var); 3121 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3122 } 3123 3124 gcc_assert (EDGE_COUNT (bb->succs) == 0); 3125 } 3126 3127 gsi_remove (&gsi, true); 3128 3129 return ret; 3130 } 3131 3132 static unsigned 3133 execute_lower_resx (void) 3134 { 3135 basic_block bb; 3136 struct pointer_map_t *mnt_map; 3137 bool dominance_invalidated = false; 3138 bool any_rewritten = false; 3139 3140 mnt_map = pointer_map_create (); 3141 3142 FOR_EACH_BB (bb) 3143 { 3144 gimple last = last_stmt (bb); 3145 if (last && is_gimple_resx (last)) 3146 { 3147 dominance_invalidated |= lower_resx (bb, last, mnt_map); 3148 any_rewritten = true; 3149 } 3150 } 3151 3152 pointer_map_destroy (mnt_map); 3153 3154 if (dominance_invalidated) 3155 { 3156 free_dominance_info (CDI_DOMINATORS); 3157 free_dominance_info (CDI_POST_DOMINATORS); 3158 } 3159 3160 return any_rewritten ? TODO_update_ssa_only_virtuals : 0; 3161 } 3162 3163 static bool 3164 gate_lower_resx (void) 3165 { 3166 return flag_exceptions != 0; 3167 } 3168 3169 struct gimple_opt_pass pass_lower_resx = 3170 { 3171 { 3172 GIMPLE_PASS, 3173 "resx", /* name */ 3174 gate_lower_resx, /* gate */ 3175 execute_lower_resx, /* execute */ 3176 NULL, /* sub */ 3177 NULL, /* next */ 3178 0, /* static_pass_number */ 3179 TV_TREE_EH, /* tv_id */ 3180 PROP_gimple_lcf, /* properties_required */ 3181 0, /* properties_provided */ 3182 0, /* properties_destroyed */ 3183 0, /* todo_flags_start */ 3184 TODO_verify_flow /* todo_flags_finish */ 3185 } 3186 }; 3187 3188 /* Try to optimize var = {v} {CLOBBER} stmts followed just by 3189 external throw. */ 3190 3191 static void 3192 optimize_clobbers (basic_block bb) 3193 { 3194 gimple_stmt_iterator gsi = gsi_last_bb (bb); 3195 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) 3196 { 3197 gimple stmt = gsi_stmt (gsi); 3198 if (is_gimple_debug (stmt)) 3199 continue; 3200 if (!gimple_clobber_p (stmt) 3201 || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) 3202 return; 3203 unlink_stmt_vdef (stmt); 3204 gsi_remove (&gsi, true); 3205 release_defs (stmt); 3206 } 3207 } 3208 3209 /* Try to sink var = {v} {CLOBBER} stmts followed just by 3210 internal throw to successor BB. */ 3211 3212 static int 3213 sink_clobbers (basic_block bb) 3214 { 3215 edge e; 3216 edge_iterator ei; 3217 gimple_stmt_iterator gsi, dgsi; 3218 basic_block succbb; 3219 bool any_clobbers = false; 3220 3221 /* Only optimize if BB has a single EH successor and 3222 all predecessor edges are EH too. */ 3223 if (!single_succ_p (bb) 3224 || (single_succ_edge (bb)->flags & EDGE_EH) == 0) 3225 return 0; 3226 3227 FOR_EACH_EDGE (e, ei, bb->preds) 3228 { 3229 if ((e->flags & EDGE_EH) == 0) 3230 return 0; 3231 } 3232 3233 /* And BB contains only CLOBBER stmts before the final 3234 RESX. */ 3235 gsi = gsi_last_bb (bb); 3236 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) 3237 { 3238 gimple stmt = gsi_stmt (gsi); 3239 if (is_gimple_debug (stmt)) 3240 continue; 3241 if (gimple_code (stmt) == GIMPLE_LABEL) 3242 break; 3243 if (!gimple_clobber_p (stmt) 3244 || TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME) 3245 return 0; 3246 any_clobbers = true; 3247 } 3248 if (!any_clobbers) 3249 return 0; 3250 3251 succbb = single_succ (bb); 3252 dgsi = gsi_after_labels (succbb); 3253 gsi = gsi_last_bb (bb); 3254 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi)) 3255 { 3256 gimple stmt = gsi_stmt (gsi); 3257 tree vdef; 3258 if (is_gimple_debug (stmt)) 3259 continue; 3260 if (gimple_code (stmt) == GIMPLE_LABEL) 3261 break; 3262 unlink_stmt_vdef (stmt); 3263 gsi_remove (&gsi, false); 3264 vdef = gimple_vdef (stmt); 3265 if (vdef && TREE_CODE (vdef) == SSA_NAME) 3266 { 3267 vdef = SSA_NAME_VAR (vdef); 3268 mark_sym_for_renaming (vdef); 3269 gimple_set_vdef (stmt, vdef); 3270 gimple_set_vuse (stmt, vdef); 3271 } 3272 release_defs (stmt); 3273 gsi_insert_before (&dgsi, stmt, GSI_SAME_STMT); 3274 } 3275 3276 return TODO_update_ssa_only_virtuals; 3277 } 3278 3279 /* At the end of inlining, we can lower EH_DISPATCH. Return true when 3280 we have found some duplicate labels and removed some edges. */ 3281 3282 static bool 3283 lower_eh_dispatch (basic_block src, gimple stmt) 3284 { 3285 gimple_stmt_iterator gsi; 3286 int region_nr; 3287 eh_region r; 3288 tree filter, fn; 3289 gimple x; 3290 bool redirected = false; 3291 3292 region_nr = gimple_eh_dispatch_region (stmt); 3293 r = get_eh_region_from_number (region_nr); 3294 3295 gsi = gsi_last_bb (src); 3296 3297 switch (r->type) 3298 { 3299 case ERT_TRY: 3300 { 3301 VEC (tree, heap) *labels = NULL; 3302 tree default_label = NULL; 3303 eh_catch c; 3304 edge_iterator ei; 3305 edge e; 3306 struct pointer_set_t *seen_values = pointer_set_create (); 3307 3308 /* Collect the labels for a switch. Zero the post_landing_pad 3309 field becase we'll no longer have anything keeping these labels 3310 in existance and the optimizer will be free to merge these 3311 blocks at will. */ 3312 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 3313 { 3314 tree tp_node, flt_node, lab = c->label; 3315 bool have_label = false; 3316 3317 c->label = NULL; 3318 tp_node = c->type_list; 3319 flt_node = c->filter_list; 3320 3321 if (tp_node == NULL) 3322 { 3323 default_label = lab; 3324 break; 3325 } 3326 do 3327 { 3328 /* Filter out duplicate labels that arise when this handler 3329 is shadowed by an earlier one. When no labels are 3330 attached to the handler anymore, we remove 3331 the corresponding edge and then we delete unreachable 3332 blocks at the end of this pass. */ 3333 if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node))) 3334 { 3335 tree t = build_case_label (TREE_VALUE (flt_node), 3336 NULL, lab); 3337 VEC_safe_push (tree, heap, labels, t); 3338 pointer_set_insert (seen_values, TREE_VALUE (flt_node)); 3339 have_label = true; 3340 } 3341 3342 tp_node = TREE_CHAIN (tp_node); 3343 flt_node = TREE_CHAIN (flt_node); 3344 } 3345 while (tp_node); 3346 if (! have_label) 3347 { 3348 remove_edge (find_edge (src, label_to_block (lab))); 3349 redirected = true; 3350 } 3351 } 3352 3353 /* Clean up the edge flags. */ 3354 FOR_EACH_EDGE (e, ei, src->succs) 3355 { 3356 if (e->flags & EDGE_FALLTHRU) 3357 { 3358 /* If there was no catch-all, use the fallthru edge. */ 3359 if (default_label == NULL) 3360 default_label = gimple_block_label (e->dest); 3361 e->flags &= ~EDGE_FALLTHRU; 3362 } 3363 } 3364 gcc_assert (default_label != NULL); 3365 3366 /* Don't generate a switch if there's only a default case. 3367 This is common in the form of try { A; } catch (...) { B; }. */ 3368 if (labels == NULL) 3369 { 3370 e = single_succ_edge (src); 3371 e->flags |= EDGE_FALLTHRU; 3372 } 3373 else 3374 { 3375 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER); 3376 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, 3377 region_nr)); 3378 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL); 3379 filter = make_ssa_name (filter, x); 3380 gimple_call_set_lhs (x, filter); 3381 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3382 3383 /* Turn the default label into a default case. */ 3384 default_label = build_case_label (NULL, NULL, default_label); 3385 sort_case_labels (labels); 3386 3387 x = gimple_build_switch_vec (filter, default_label, labels); 3388 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3389 3390 VEC_free (tree, heap, labels); 3391 } 3392 pointer_set_destroy (seen_values); 3393 } 3394 break; 3395 3396 case ERT_ALLOWED_EXCEPTIONS: 3397 { 3398 edge b_e = BRANCH_EDGE (src); 3399 edge f_e = FALLTHRU_EDGE (src); 3400 3401 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER); 3402 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, 3403 region_nr)); 3404 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL); 3405 filter = make_ssa_name (filter, x); 3406 gimple_call_set_lhs (x, filter); 3407 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3408 3409 r->u.allowed.label = NULL; 3410 x = gimple_build_cond (EQ_EXPR, filter, 3411 build_int_cst (TREE_TYPE (filter), 3412 r->u.allowed.filter), 3413 NULL_TREE, NULL_TREE); 3414 gsi_insert_before (&gsi, x, GSI_SAME_STMT); 3415 3416 b_e->flags = b_e->flags | EDGE_TRUE_VALUE; 3417 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE; 3418 } 3419 break; 3420 3421 default: 3422 gcc_unreachable (); 3423 } 3424 3425 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */ 3426 gsi_remove (&gsi, true); 3427 return redirected; 3428 } 3429 3430 static unsigned 3431 execute_lower_eh_dispatch (void) 3432 { 3433 basic_block bb; 3434 int flags = 0; 3435 bool redirected = false; 3436 3437 assign_filter_values (); 3438 3439 FOR_EACH_BB (bb) 3440 { 3441 gimple last = last_stmt (bb); 3442 if (last == NULL) 3443 continue; 3444 if (gimple_code (last) == GIMPLE_EH_DISPATCH) 3445 { 3446 redirected |= lower_eh_dispatch (bb, last); 3447 flags |= TODO_update_ssa_only_virtuals; 3448 } 3449 else if (gimple_code (last) == GIMPLE_RESX) 3450 { 3451 if (stmt_can_throw_external (last)) 3452 optimize_clobbers (bb); 3453 else 3454 flags |= sink_clobbers (bb); 3455 } 3456 } 3457 3458 if (redirected) 3459 delete_unreachable_blocks (); 3460 return flags; 3461 } 3462 3463 static bool 3464 gate_lower_eh_dispatch (void) 3465 { 3466 return cfun->eh->region_tree != NULL; 3467 } 3468 3469 struct gimple_opt_pass pass_lower_eh_dispatch = 3470 { 3471 { 3472 GIMPLE_PASS, 3473 "ehdisp", /* name */ 3474 gate_lower_eh_dispatch, /* gate */ 3475 execute_lower_eh_dispatch, /* execute */ 3476 NULL, /* sub */ 3477 NULL, /* next */ 3478 0, /* static_pass_number */ 3479 TV_TREE_EH, /* tv_id */ 3480 PROP_gimple_lcf, /* properties_required */ 3481 0, /* properties_provided */ 3482 0, /* properties_destroyed */ 3483 0, /* todo_flags_start */ 3484 TODO_verify_flow /* todo_flags_finish */ 3485 } 3486 }; 3487 3488 /* Walk statements, see what regions are really referenced and remove 3489 those that are unused. */ 3490 3491 static void 3492 remove_unreachable_handlers (void) 3493 { 3494 sbitmap r_reachable, lp_reachable; 3495 eh_region region; 3496 eh_landing_pad lp; 3497 basic_block bb; 3498 int lp_nr, r_nr; 3499 3500 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array)); 3501 lp_reachable 3502 = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array)); 3503 sbitmap_zero (r_reachable); 3504 sbitmap_zero (lp_reachable); 3505 3506 FOR_EACH_BB (bb) 3507 { 3508 gimple_stmt_iterator gsi; 3509 3510 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 3511 { 3512 gimple stmt = gsi_stmt (gsi); 3513 lp_nr = lookup_stmt_eh_lp (stmt); 3514 3515 /* Negative LP numbers are MUST_NOT_THROW regions which 3516 are not considered BB enders. */ 3517 if (lp_nr < 0) 3518 SET_BIT (r_reachable, -lp_nr); 3519 3520 /* Positive LP numbers are real landing pads, are are BB enders. */ 3521 else if (lp_nr > 0) 3522 { 3523 gcc_assert (gsi_one_before_end_p (gsi)); 3524 region = get_eh_region_from_lp_number (lp_nr); 3525 SET_BIT (r_reachable, region->index); 3526 SET_BIT (lp_reachable, lp_nr); 3527 } 3528 3529 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */ 3530 switch (gimple_code (stmt)) 3531 { 3532 case GIMPLE_RESX: 3533 SET_BIT (r_reachable, gimple_resx_region (stmt)); 3534 break; 3535 case GIMPLE_EH_DISPATCH: 3536 SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt)); 3537 break; 3538 default: 3539 break; 3540 } 3541 } 3542 } 3543 3544 if (dump_file) 3545 { 3546 fprintf (dump_file, "Before removal of unreachable regions:\n"); 3547 dump_eh_tree (dump_file, cfun); 3548 fprintf (dump_file, "Reachable regions: "); 3549 dump_sbitmap_file (dump_file, r_reachable); 3550 fprintf (dump_file, "Reachable landing pads: "); 3551 dump_sbitmap_file (dump_file, lp_reachable); 3552 } 3553 3554 for (r_nr = 1; 3555 VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr) 3556 if (region && !TEST_BIT (r_reachable, r_nr)) 3557 { 3558 if (dump_file) 3559 fprintf (dump_file, "Removing unreachable region %d\n", r_nr); 3560 remove_eh_handler (region); 3561 } 3562 3563 for (lp_nr = 1; 3564 VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr) 3565 if (lp && !TEST_BIT (lp_reachable, lp_nr)) 3566 { 3567 if (dump_file) 3568 fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr); 3569 remove_eh_landing_pad (lp); 3570 } 3571 3572 if (dump_file) 3573 { 3574 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n"); 3575 dump_eh_tree (dump_file, cfun); 3576 fprintf (dump_file, "\n\n"); 3577 } 3578 3579 sbitmap_free (r_reachable); 3580 sbitmap_free (lp_reachable); 3581 3582 #ifdef ENABLE_CHECKING 3583 verify_eh_tree (cfun); 3584 #endif 3585 } 3586 3587 /* Remove unreachable handlers if any landing pads have been removed after 3588 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */ 3589 3590 void 3591 maybe_remove_unreachable_handlers (void) 3592 { 3593 eh_landing_pad lp; 3594 int i; 3595 3596 if (cfun->eh == NULL) 3597 return; 3598 3599 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i) 3600 if (lp && lp->post_landing_pad) 3601 { 3602 if (label_to_block (lp->post_landing_pad) == NULL) 3603 { 3604 remove_unreachable_handlers (); 3605 return; 3606 } 3607 } 3608 } 3609 3610 /* Remove regions that do not have landing pads. This assumes 3611 that remove_unreachable_handlers has already been run, and 3612 that we've just manipulated the landing pads since then. */ 3613 3614 static void 3615 remove_unreachable_handlers_no_lp (void) 3616 { 3617 eh_region r; 3618 int i; 3619 sbitmap r_reachable; 3620 basic_block bb; 3621 3622 r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array)); 3623 sbitmap_zero (r_reachable); 3624 3625 FOR_EACH_BB (bb) 3626 { 3627 gimple stmt = last_stmt (bb); 3628 if (stmt) 3629 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */ 3630 switch (gimple_code (stmt)) 3631 { 3632 case GIMPLE_RESX: 3633 SET_BIT (r_reachable, gimple_resx_region (stmt)); 3634 break; 3635 case GIMPLE_EH_DISPATCH: 3636 SET_BIT (r_reachable, gimple_eh_dispatch_region (stmt)); 3637 break; 3638 default: 3639 break; 3640 } 3641 } 3642 3643 for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i) 3644 if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW 3645 && !TEST_BIT (r_reachable, i)) 3646 { 3647 if (dump_file) 3648 fprintf (dump_file, "Removing unreachable region %d\n", i); 3649 remove_eh_handler (r); 3650 } 3651 3652 sbitmap_free (r_reachable); 3653 } 3654 3655 /* Undo critical edge splitting on an EH landing pad. Earlier, we 3656 optimisticaly split all sorts of edges, including EH edges. The 3657 optimization passes in between may not have needed them; if not, 3658 we should undo the split. 3659 3660 Recognize this case by having one EH edge incoming to the BB and 3661 one normal edge outgoing; BB should be empty apart from the 3662 post_landing_pad label. 3663 3664 Note that this is slightly different from the empty handler case 3665 handled by cleanup_empty_eh, in that the actual handler may yet 3666 have actual code but the landing pad has been separated from the 3667 handler. As such, cleanup_empty_eh relies on this transformation 3668 having been done first. */ 3669 3670 static bool 3671 unsplit_eh (eh_landing_pad lp) 3672 { 3673 basic_block bb = label_to_block (lp->post_landing_pad); 3674 gimple_stmt_iterator gsi; 3675 edge e_in, e_out; 3676 3677 /* Quickly check the edge counts on BB for singularity. */ 3678 if (EDGE_COUNT (bb->preds) != 1 || EDGE_COUNT (bb->succs) != 1) 3679 return false; 3680 e_in = EDGE_PRED (bb, 0); 3681 e_out = EDGE_SUCC (bb, 0); 3682 3683 /* Input edge must be EH and output edge must be normal. */ 3684 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0) 3685 return false; 3686 3687 /* The block must be empty except for the labels and debug insns. */ 3688 gsi = gsi_after_labels (bb); 3689 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) 3690 gsi_next_nondebug (&gsi); 3691 if (!gsi_end_p (gsi)) 3692 return false; 3693 3694 /* The destination block must not already have a landing pad 3695 for a different region. */ 3696 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 3697 { 3698 gimple stmt = gsi_stmt (gsi); 3699 tree lab; 3700 int lp_nr; 3701 3702 if (gimple_code (stmt) != GIMPLE_LABEL) 3703 break; 3704 lab = gimple_label_label (stmt); 3705 lp_nr = EH_LANDING_PAD_NR (lab); 3706 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) 3707 return false; 3708 } 3709 3710 /* The new destination block must not already be a destination of 3711 the source block, lest we merge fallthru and eh edges and get 3712 all sorts of confused. */ 3713 if (find_edge (e_in->src, e_out->dest)) 3714 return false; 3715 3716 /* ??? We can get degenerate phis due to cfg cleanups. I would have 3717 thought this should have been cleaned up by a phicprop pass, but 3718 that doesn't appear to handle virtuals. Propagate by hand. */ 3719 if (!gimple_seq_empty_p (phi_nodes (bb))) 3720 { 3721 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); ) 3722 { 3723 gimple use_stmt, phi = gsi_stmt (gsi); 3724 tree lhs = gimple_phi_result (phi); 3725 tree rhs = gimple_phi_arg_def (phi, 0); 3726 use_operand_p use_p; 3727 imm_use_iterator iter; 3728 3729 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) 3730 { 3731 FOR_EACH_IMM_USE_ON_STMT (use_p, iter) 3732 SET_USE (use_p, rhs); 3733 } 3734 3735 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) 3736 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1; 3737 3738 remove_phi_node (&gsi, true); 3739 } 3740 } 3741 3742 if (dump_file && (dump_flags & TDF_DETAILS)) 3743 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n", 3744 lp->index, e_out->dest->index); 3745 3746 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving 3747 a successor edge, humor it. But do the real CFG change with the 3748 predecessor of E_OUT in order to preserve the ordering of arguments 3749 to the PHI nodes in E_OUT->DEST. */ 3750 redirect_eh_edge_1 (e_in, e_out->dest, false); 3751 redirect_edge_pred (e_out, e_in->src); 3752 e_out->flags = e_in->flags; 3753 e_out->probability = e_in->probability; 3754 e_out->count = e_in->count; 3755 remove_edge (e_in); 3756 3757 return true; 3758 } 3759 3760 /* Examine each landing pad block and see if it matches unsplit_eh. */ 3761 3762 static bool 3763 unsplit_all_eh (void) 3764 { 3765 bool changed = false; 3766 eh_landing_pad lp; 3767 int i; 3768 3769 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i) 3770 if (lp) 3771 changed |= unsplit_eh (lp); 3772 3773 return changed; 3774 } 3775 3776 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming 3777 to OLD_BB to NEW_BB; return true on success, false on failure. 3778 3779 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any 3780 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT. 3781 Virtual PHIs may be deleted and marked for renaming. */ 3782 3783 static bool 3784 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb, 3785 edge old_bb_out, bool change_region) 3786 { 3787 gimple_stmt_iterator ngsi, ogsi; 3788 edge_iterator ei; 3789 edge e; 3790 bitmap rename_virts; 3791 bitmap ophi_handled; 3792 3793 /* The destination block must not be a regular successor for any 3794 of the preds of the landing pad. Thus, avoid turning 3795 <..> 3796 | \ EH 3797 | <..> 3798 | / 3799 <..> 3800 into 3801 <..> 3802 | | EH 3803 <..> 3804 which CFG verification would choke on. See PR45172 and PR51089. */ 3805 FOR_EACH_EDGE (e, ei, old_bb->preds) 3806 if (find_edge (e->src, new_bb)) 3807 return false; 3808 3809 FOR_EACH_EDGE (e, ei, old_bb->preds) 3810 redirect_edge_var_map_clear (e); 3811 3812 ophi_handled = BITMAP_ALLOC (NULL); 3813 rename_virts = BITMAP_ALLOC (NULL); 3814 3815 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map 3816 for the edges we're going to move. */ 3817 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi)) 3818 { 3819 gimple ophi, nphi = gsi_stmt (ngsi); 3820 tree nresult, nop; 3821 3822 nresult = gimple_phi_result (nphi); 3823 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx); 3824 3825 /* Find the corresponding PHI in OLD_BB so we can forward-propagate 3826 the source ssa_name. */ 3827 ophi = NULL; 3828 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi)) 3829 { 3830 ophi = gsi_stmt (ogsi); 3831 if (gimple_phi_result (ophi) == nop) 3832 break; 3833 ophi = NULL; 3834 } 3835 3836 /* If we did find the corresponding PHI, copy those inputs. */ 3837 if (ophi) 3838 { 3839 /* If NOP is used somewhere else beyond phis in new_bb, give up. */ 3840 if (!has_single_use (nop)) 3841 { 3842 imm_use_iterator imm_iter; 3843 use_operand_p use_p; 3844 3845 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop) 3846 { 3847 if (!gimple_debug_bind_p (USE_STMT (use_p)) 3848 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI 3849 || gimple_bb (USE_STMT (use_p)) != new_bb)) 3850 goto fail; 3851 } 3852 } 3853 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop)); 3854 FOR_EACH_EDGE (e, ei, old_bb->preds) 3855 { 3856 location_t oloc; 3857 tree oop; 3858 3859 if ((e->flags & EDGE_EH) == 0) 3860 continue; 3861 oop = gimple_phi_arg_def (ophi, e->dest_idx); 3862 oloc = gimple_phi_arg_location (ophi, e->dest_idx); 3863 redirect_edge_var_map_add (e, nresult, oop, oloc); 3864 } 3865 } 3866 /* If we didn't find the PHI, but it's a VOP, remember to rename 3867 it later, assuming all other tests succeed. */ 3868 else if (!is_gimple_reg (nresult)) 3869 bitmap_set_bit (rename_virts, SSA_NAME_VERSION (nresult)); 3870 /* If we didn't find the PHI, and it's a real variable, we know 3871 from the fact that OLD_BB is tree_empty_eh_handler_p that the 3872 variable is unchanged from input to the block and we can simply 3873 re-use the input to NEW_BB from the OLD_BB_OUT edge. */ 3874 else 3875 { 3876 location_t nloc 3877 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx); 3878 FOR_EACH_EDGE (e, ei, old_bb->preds) 3879 redirect_edge_var_map_add (e, nresult, nop, nloc); 3880 } 3881 } 3882 3883 /* Second, verify that all PHIs from OLD_BB have been handled. If not, 3884 we don't know what values from the other edges into NEW_BB to use. */ 3885 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi)) 3886 { 3887 gimple ophi = gsi_stmt (ogsi); 3888 tree oresult = gimple_phi_result (ophi); 3889 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult))) 3890 goto fail; 3891 } 3892 3893 /* At this point we know that the merge will succeed. Remove the PHI 3894 nodes for the virtuals that we want to rename. */ 3895 if (!bitmap_empty_p (rename_virts)) 3896 { 3897 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); ) 3898 { 3899 gimple nphi = gsi_stmt (ngsi); 3900 tree nresult = gimple_phi_result (nphi); 3901 if (bitmap_bit_p (rename_virts, SSA_NAME_VERSION (nresult))) 3902 { 3903 mark_virtual_phi_result_for_renaming (nphi); 3904 remove_phi_node (&ngsi, true); 3905 } 3906 else 3907 gsi_next (&ngsi); 3908 } 3909 } 3910 3911 /* Finally, move the edges and update the PHIs. */ 3912 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); ) 3913 if (e->flags & EDGE_EH) 3914 { 3915 redirect_eh_edge_1 (e, new_bb, change_region); 3916 redirect_edge_succ (e, new_bb); 3917 flush_pending_stmts (e); 3918 } 3919 else 3920 ei_next (&ei); 3921 3922 BITMAP_FREE (ophi_handled); 3923 BITMAP_FREE (rename_virts); 3924 return true; 3925 3926 fail: 3927 FOR_EACH_EDGE (e, ei, old_bb->preds) 3928 redirect_edge_var_map_clear (e); 3929 BITMAP_FREE (ophi_handled); 3930 BITMAP_FREE (rename_virts); 3931 return false; 3932 } 3933 3934 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its 3935 old region to NEW_REGION at BB. */ 3936 3937 static void 3938 cleanup_empty_eh_move_lp (basic_block bb, edge e_out, 3939 eh_landing_pad lp, eh_region new_region) 3940 { 3941 gimple_stmt_iterator gsi; 3942 eh_landing_pad *pp; 3943 3944 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp) 3945 continue; 3946 *pp = lp->next_lp; 3947 3948 lp->region = new_region; 3949 lp->next_lp = new_region->landing_pads; 3950 new_region->landing_pads = lp; 3951 3952 /* Delete the RESX that was matched within the empty handler block. */ 3953 gsi = gsi_last_bb (bb); 3954 mark_virtual_ops_for_renaming (gsi_stmt (gsi)); 3955 gsi_remove (&gsi, true); 3956 3957 /* Clean up E_OUT for the fallthru. */ 3958 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU; 3959 e_out->probability = REG_BR_PROB_BASE; 3960 } 3961 3962 /* A subroutine of cleanup_empty_eh. Handle more complex cases of 3963 unsplitting than unsplit_eh was prepared to handle, e.g. when 3964 multiple incoming edges and phis are involved. */ 3965 3966 static bool 3967 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp) 3968 { 3969 gimple_stmt_iterator gsi; 3970 tree lab; 3971 3972 /* We really ought not have totally lost everything following 3973 a landing pad label. Given that BB is empty, there had better 3974 be a successor. */ 3975 gcc_assert (e_out != NULL); 3976 3977 /* The destination block must not already have a landing pad 3978 for a different region. */ 3979 lab = NULL; 3980 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi)) 3981 { 3982 gimple stmt = gsi_stmt (gsi); 3983 int lp_nr; 3984 3985 if (gimple_code (stmt) != GIMPLE_LABEL) 3986 break; 3987 lab = gimple_label_label (stmt); 3988 lp_nr = EH_LANDING_PAD_NR (lab); 3989 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) 3990 return false; 3991 } 3992 3993 /* Attempt to move the PHIs into the successor block. */ 3994 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false)) 3995 { 3996 if (dump_file && (dump_flags & TDF_DETAILS)) 3997 fprintf (dump_file, 3998 "Unsplit EH landing pad %d to block %i " 3999 "(via cleanup_empty_eh).\n", 4000 lp->index, e_out->dest->index); 4001 return true; 4002 } 4003 4004 return false; 4005 } 4006 4007 /* Return true if edge E_FIRST is part of an empty infinite loop 4008 or leads to such a loop through a series of single successor 4009 empty bbs. */ 4010 4011 static bool 4012 infinite_empty_loop_p (edge e_first) 4013 { 4014 bool inf_loop = false; 4015 edge e; 4016 4017 if (e_first->dest == e_first->src) 4018 return true; 4019 4020 e_first->src->aux = (void *) 1; 4021 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest)) 4022 { 4023 gimple_stmt_iterator gsi; 4024 if (e->dest->aux) 4025 { 4026 inf_loop = true; 4027 break; 4028 } 4029 e->dest->aux = (void *) 1; 4030 gsi = gsi_after_labels (e->dest); 4031 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) 4032 gsi_next_nondebug (&gsi); 4033 if (!gsi_end_p (gsi)) 4034 break; 4035 } 4036 e_first->src->aux = NULL; 4037 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest)) 4038 e->dest->aux = NULL; 4039 4040 return inf_loop; 4041 } 4042 4043 /* Examine the block associated with LP to determine if it's an empty 4044 handler for its EH region. If so, attempt to redirect EH edges to 4045 an outer region. Return true the CFG was updated in any way. This 4046 is similar to jump forwarding, just across EH edges. */ 4047 4048 static bool 4049 cleanup_empty_eh (eh_landing_pad lp) 4050 { 4051 basic_block bb = label_to_block (lp->post_landing_pad); 4052 gimple_stmt_iterator gsi; 4053 gimple resx; 4054 eh_region new_region; 4055 edge_iterator ei; 4056 edge e, e_out; 4057 bool has_non_eh_pred; 4058 bool ret = false; 4059 int new_lp_nr; 4060 4061 /* There can be zero or one edges out of BB. This is the quickest test. */ 4062 switch (EDGE_COUNT (bb->succs)) 4063 { 4064 case 0: 4065 e_out = NULL; 4066 break; 4067 case 1: 4068 e_out = EDGE_SUCC (bb, 0); 4069 break; 4070 default: 4071 return false; 4072 } 4073 4074 resx = last_stmt (bb); 4075 if (resx && is_gimple_resx (resx)) 4076 { 4077 if (stmt_can_throw_external (resx)) 4078 optimize_clobbers (bb); 4079 else if (sink_clobbers (bb)) 4080 ret = true; 4081 } 4082 4083 gsi = gsi_after_labels (bb); 4084 4085 /* Make sure to skip debug statements. */ 4086 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) 4087 gsi_next_nondebug (&gsi); 4088 4089 /* If the block is totally empty, look for more unsplitting cases. */ 4090 if (gsi_end_p (gsi)) 4091 { 4092 /* For the degenerate case of an infinite loop bail out. */ 4093 if (infinite_empty_loop_p (e_out)) 4094 return ret; 4095 4096 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp); 4097 } 4098 4099 /* The block should consist only of a single RESX statement, modulo a 4100 preceding call to __builtin_stack_restore if there is no outgoing 4101 edge, since the call can be eliminated in this case. */ 4102 resx = gsi_stmt (gsi); 4103 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE)) 4104 { 4105 gsi_next (&gsi); 4106 resx = gsi_stmt (gsi); 4107 } 4108 if (!is_gimple_resx (resx)) 4109 return ret; 4110 gcc_assert (gsi_one_before_end_p (gsi)); 4111 4112 /* Determine if there are non-EH edges, or resx edges into the handler. */ 4113 has_non_eh_pred = false; 4114 FOR_EACH_EDGE (e, ei, bb->preds) 4115 if (!(e->flags & EDGE_EH)) 4116 has_non_eh_pred = true; 4117 4118 /* Find the handler that's outer of the empty handler by looking at 4119 where the RESX instruction was vectored. */ 4120 new_lp_nr = lookup_stmt_eh_lp (resx); 4121 new_region = get_eh_region_from_lp_number (new_lp_nr); 4122 4123 /* If there's no destination region within the current function, 4124 redirection is trivial via removing the throwing statements from 4125 the EH region, removing the EH edges, and allowing the block 4126 to go unreachable. */ 4127 if (new_region == NULL) 4128 { 4129 gcc_assert (e_out == NULL); 4130 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 4131 if (e->flags & EDGE_EH) 4132 { 4133 gimple stmt = last_stmt (e->src); 4134 remove_stmt_from_eh_lp (stmt); 4135 remove_edge (e); 4136 } 4137 else 4138 ei_next (&ei); 4139 goto succeed; 4140 } 4141 4142 /* If the destination region is a MUST_NOT_THROW, allow the runtime 4143 to handle the abort and allow the blocks to go unreachable. */ 4144 if (new_region->type == ERT_MUST_NOT_THROW) 4145 { 4146 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 4147 if (e->flags & EDGE_EH) 4148 { 4149 gimple stmt = last_stmt (e->src); 4150 remove_stmt_from_eh_lp (stmt); 4151 add_stmt_to_eh_lp (stmt, new_lp_nr); 4152 remove_edge (e); 4153 } 4154 else 4155 ei_next (&ei); 4156 goto succeed; 4157 } 4158 4159 /* Try to redirect the EH edges and merge the PHIs into the destination 4160 landing pad block. If the merge succeeds, we'll already have redirected 4161 all the EH edges. The handler itself will go unreachable if there were 4162 no normal edges. */ 4163 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true)) 4164 goto succeed; 4165 4166 /* Finally, if all input edges are EH edges, then we can (potentially) 4167 reduce the number of transfers from the runtime by moving the landing 4168 pad from the original region to the new region. This is a win when 4169 we remove the last CLEANUP region along a particular exception 4170 propagation path. Since nothing changes except for the region with 4171 which the landing pad is associated, the PHI nodes do not need to be 4172 adjusted at all. */ 4173 if (!has_non_eh_pred) 4174 { 4175 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region); 4176 if (dump_file && (dump_flags & TDF_DETAILS)) 4177 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n", 4178 lp->index, new_region->index); 4179 4180 /* ??? The CFG didn't change, but we may have rendered the 4181 old EH region unreachable. Trigger a cleanup there. */ 4182 return true; 4183 } 4184 4185 return ret; 4186 4187 succeed: 4188 if (dump_file && (dump_flags & TDF_DETAILS)) 4189 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index); 4190 remove_eh_landing_pad (lp); 4191 return true; 4192 } 4193 4194 /* Do a post-order traversal of the EH region tree. Examine each 4195 post_landing_pad block and see if we can eliminate it as empty. */ 4196 4197 static bool 4198 cleanup_all_empty_eh (void) 4199 { 4200 bool changed = false; 4201 eh_landing_pad lp; 4202 int i; 4203 4204 for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i) 4205 if (lp) 4206 changed |= cleanup_empty_eh (lp); 4207 4208 return changed; 4209 } 4210 4211 /* Perform cleanups and lowering of exception handling 4212 1) cleanups regions with handlers doing nothing are optimized out 4213 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out 4214 3) Info about regions that are containing instructions, and regions 4215 reachable via local EH edges is collected 4216 4) Eh tree is pruned for regions no longer neccesary. 4217 4218 TODO: Push MUST_NOT_THROW regions to the root of the EH tree. 4219 Unify those that have the same failure decl and locus. 4220 */ 4221 4222 static unsigned int 4223 execute_cleanup_eh_1 (void) 4224 { 4225 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die 4226 looking up unreachable landing pads. */ 4227 remove_unreachable_handlers (); 4228 4229 /* Watch out for the region tree vanishing due to all unreachable. */ 4230 if (cfun->eh->region_tree && optimize) 4231 { 4232 bool changed = false; 4233 4234 changed |= unsplit_all_eh (); 4235 changed |= cleanup_all_empty_eh (); 4236 4237 if (changed) 4238 { 4239 free_dominance_info (CDI_DOMINATORS); 4240 free_dominance_info (CDI_POST_DOMINATORS); 4241 4242 /* We delayed all basic block deletion, as we may have performed 4243 cleanups on EH edges while non-EH edges were still present. */ 4244 delete_unreachable_blocks (); 4245 4246 /* We manipulated the landing pads. Remove any region that no 4247 longer has a landing pad. */ 4248 remove_unreachable_handlers_no_lp (); 4249 4250 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; 4251 } 4252 } 4253 4254 return 0; 4255 } 4256 4257 static unsigned int 4258 execute_cleanup_eh (void) 4259 { 4260 int ret = execute_cleanup_eh_1 (); 4261 4262 /* If the function no longer needs an EH personality routine 4263 clear it. This exposes cross-language inlining opportunities 4264 and avoids references to a never defined personality routine. */ 4265 if (DECL_FUNCTION_PERSONALITY (current_function_decl) 4266 && function_needs_eh_personality (cfun) != eh_personality_lang) 4267 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE; 4268 4269 return ret; 4270 } 4271 4272 static bool 4273 gate_cleanup_eh (void) 4274 { 4275 return cfun->eh != NULL && cfun->eh->region_tree != NULL; 4276 } 4277 4278 struct gimple_opt_pass pass_cleanup_eh = { 4279 { 4280 GIMPLE_PASS, 4281 "ehcleanup", /* name */ 4282 gate_cleanup_eh, /* gate */ 4283 execute_cleanup_eh, /* execute */ 4284 NULL, /* sub */ 4285 NULL, /* next */ 4286 0, /* static_pass_number */ 4287 TV_TREE_EH, /* tv_id */ 4288 PROP_gimple_lcf, /* properties_required */ 4289 0, /* properties_provided */ 4290 0, /* properties_destroyed */ 4291 0, /* todo_flags_start */ 4292 0 /* todo_flags_finish */ 4293 } 4294 }; 4295 4296 /* Verify that BB containing STMT as the last statement, has precisely the 4297 edge that make_eh_edges would create. */ 4298 4299 DEBUG_FUNCTION bool 4300 verify_eh_edges (gimple stmt) 4301 { 4302 basic_block bb = gimple_bb (stmt); 4303 eh_landing_pad lp = NULL; 4304 int lp_nr; 4305 edge_iterator ei; 4306 edge e, eh_edge; 4307 4308 lp_nr = lookup_stmt_eh_lp (stmt); 4309 if (lp_nr > 0) 4310 lp = get_eh_landing_pad_from_number (lp_nr); 4311 4312 eh_edge = NULL; 4313 FOR_EACH_EDGE (e, ei, bb->succs) 4314 { 4315 if (e->flags & EDGE_EH) 4316 { 4317 if (eh_edge) 4318 { 4319 error ("BB %i has multiple EH edges", bb->index); 4320 return true; 4321 } 4322 else 4323 eh_edge = e; 4324 } 4325 } 4326 4327 if (lp == NULL) 4328 { 4329 if (eh_edge) 4330 { 4331 error ("BB %i can not throw but has an EH edge", bb->index); 4332 return true; 4333 } 4334 return false; 4335 } 4336 4337 if (!stmt_could_throw_p (stmt)) 4338 { 4339 error ("BB %i last statement has incorrectly set lp", bb->index); 4340 return true; 4341 } 4342 4343 if (eh_edge == NULL) 4344 { 4345 error ("BB %i is missing an EH edge", bb->index); 4346 return true; 4347 } 4348 4349 if (eh_edge->dest != label_to_block (lp->post_landing_pad)) 4350 { 4351 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index); 4352 return true; 4353 } 4354 4355 return false; 4356 } 4357 4358 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */ 4359 4360 DEBUG_FUNCTION bool 4361 verify_eh_dispatch_edge (gimple stmt) 4362 { 4363 eh_region r; 4364 eh_catch c; 4365 basic_block src, dst; 4366 bool want_fallthru = true; 4367 edge_iterator ei; 4368 edge e, fall_edge; 4369 4370 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt)); 4371 src = gimple_bb (stmt); 4372 4373 FOR_EACH_EDGE (e, ei, src->succs) 4374 gcc_assert (e->aux == NULL); 4375 4376 switch (r->type) 4377 { 4378 case ERT_TRY: 4379 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 4380 { 4381 dst = label_to_block (c->label); 4382 e = find_edge (src, dst); 4383 if (e == NULL) 4384 { 4385 error ("BB %i is missing an edge", src->index); 4386 return true; 4387 } 4388 e->aux = (void *)e; 4389 4390 /* A catch-all handler doesn't have a fallthru. */ 4391 if (c->type_list == NULL) 4392 { 4393 want_fallthru = false; 4394 break; 4395 } 4396 } 4397 break; 4398 4399 case ERT_ALLOWED_EXCEPTIONS: 4400 dst = label_to_block (r->u.allowed.label); 4401 e = find_edge (src, dst); 4402 if (e == NULL) 4403 { 4404 error ("BB %i is missing an edge", src->index); 4405 return true; 4406 } 4407 e->aux = (void *)e; 4408 break; 4409 4410 default: 4411 gcc_unreachable (); 4412 } 4413 4414 fall_edge = NULL; 4415 FOR_EACH_EDGE (e, ei, src->succs) 4416 { 4417 if (e->flags & EDGE_FALLTHRU) 4418 { 4419 if (fall_edge != NULL) 4420 { 4421 error ("BB %i too many fallthru edges", src->index); 4422 return true; 4423 } 4424 fall_edge = e; 4425 } 4426 else if (e->aux) 4427 e->aux = NULL; 4428 else 4429 { 4430 error ("BB %i has incorrect edge", src->index); 4431 return true; 4432 } 4433 } 4434 if ((fall_edge != NULL) ^ want_fallthru) 4435 { 4436 error ("BB %i has incorrect fallthru edge", src->index); 4437 return true; 4438 } 4439 4440 return false; 4441 } 4442