1 /* Implements exception handling. 2 Copyright (C) 1989-2018 Free Software Foundation, Inc. 3 Contributed by Mike Stump <mrs@cygnus.com>. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 22 /* An exception is an event that can be "thrown" from within a 23 function. This event can then be "caught" by the callers of 24 the function. 25 26 The representation of exceptions changes several times during 27 the compilation process: 28 29 In the beginning, in the front end, we have the GENERIC trees 30 TRY_CATCH_EXPR, TRY_FINALLY_EXPR, WITH_CLEANUP_EXPR, 31 CLEANUP_POINT_EXPR, CATCH_EXPR, and EH_FILTER_EXPR. 32 33 During initial gimplification (gimplify.c) these are lowered 34 to the GIMPLE_TRY, GIMPLE_CATCH, and GIMPLE_EH_FILTER nodes. 35 The WITH_CLEANUP_EXPR and CLEANUP_POINT_EXPR nodes are converted 36 into GIMPLE_TRY_FINALLY nodes; the others are a more direct 1-1 37 conversion. 38 39 During pass_lower_eh (tree-eh.c) we record the nested structure 40 of the TRY nodes in EH_REGION nodes in CFUN->EH->REGION_TREE. 41 We expand the eh_protect_cleanup_actions langhook into MUST_NOT_THROW 42 regions at this time. We can then flatten the statements within 43 the TRY nodes to straight-line code. Statements that had been within 44 TRY nodes that can throw are recorded within CFUN->EH->THROW_STMT_TABLE, 45 so that we may remember what action is supposed to be taken if 46 a given statement does throw. During this lowering process, 47 we create an EH_LANDING_PAD node for each EH_REGION that has 48 some code within the function that needs to be executed if a 49 throw does happen. We also create RESX statements that are 50 used to transfer control from an inner EH_REGION to an outer 51 EH_REGION. We also create EH_DISPATCH statements as placeholders 52 for a runtime type comparison that should be made in order to 53 select the action to perform among different CATCH and EH_FILTER 54 regions. 55 56 During pass_lower_eh_dispatch (tree-eh.c), which is run after 57 all inlining is complete, we are able to run assign_filter_values, 58 which allows us to map the set of types manipulated by all of the 59 CATCH and EH_FILTER regions to a set of integers. This set of integers 60 will be how the exception runtime communicates with the code generated 61 within the function. We then expand the GIMPLE_EH_DISPATCH statements 62 to a switch or conditional branches that use the argument provided by 63 the runtime (__builtin_eh_filter) and the set of integers we computed 64 in assign_filter_values. 65 66 During pass_lower_resx (tree-eh.c), which is run near the end 67 of optimization, we expand RESX statements. If the eh region 68 that is outer to the RESX statement is a MUST_NOT_THROW, then 69 the RESX expands to some form of abort statement. If the eh 70 region that is outer to the RESX statement is within the current 71 function, then the RESX expands to a bookkeeping call 72 (__builtin_eh_copy_values) and a goto. Otherwise, the next 73 handler for the exception must be within a function somewhere 74 up the call chain, so we call back into the exception runtime 75 (__builtin_unwind_resume). 76 77 During pass_expand (cfgexpand.c), we generate REG_EH_REGION notes 78 that create an rtl to eh_region mapping that corresponds to the 79 gimple to eh_region mapping that had been recorded in the 80 THROW_STMT_TABLE. 81 82 Then, via finish_eh_generation, we generate the real landing pads 83 to which the runtime will actually transfer control. These new 84 landing pads perform whatever bookkeeping is needed by the target 85 backend in order to resume execution within the current function. 86 Each of these new landing pads falls through into the post_landing_pad 87 label which had been used within the CFG up to this point. All 88 exception edges within the CFG are redirected to the new landing pads. 89 If the target uses setjmp to implement exceptions, the various extra 90 calls into the runtime to register and unregister the current stack 91 frame are emitted at this time. 92 93 During pass_convert_to_eh_region_ranges (except.c), we transform 94 the REG_EH_REGION notes attached to individual insns into 95 non-overlapping ranges of insns bounded by NOTE_INSN_EH_REGION_BEG 96 and NOTE_INSN_EH_REGION_END. Each insn within such ranges has the 97 same associated action within the exception region tree, meaning 98 that (1) the exception is caught by the same landing pad within the 99 current function, (2) the exception is blocked by the runtime with 100 a MUST_NOT_THROW region, or (3) the exception is not handled at all 101 within the current function. 102 103 Finally, during assembly generation, we call 104 output_function_exception_table (except.c) to emit the tables with 105 which the exception runtime can determine if a given stack frame 106 handles a given exception, and if so what filter value to provide 107 to the function when the non-local control transfer is effected. 108 If the target uses dwarf2 unwinding to implement exceptions, then 109 output_call_frame_info (dwarf2out.c) emits the required unwind data. */ 110 111 112 #include "config.h" 113 #include "system.h" 114 #include "coretypes.h" 115 #include "backend.h" 116 #include "target.h" 117 #include "rtl.h" 118 #include "tree.h" 119 #include "cfghooks.h" 120 #include "tree-pass.h" 121 #include "memmodel.h" 122 #include "tm_p.h" 123 #include "stringpool.h" 124 #include "expmed.h" 125 #include "optabs.h" 126 #include "emit-rtl.h" 127 #include "cgraph.h" 128 #include "diagnostic.h" 129 #include "fold-const.h" 130 #include "stor-layout.h" 131 #include "explow.h" 132 #include "stmt.h" 133 #include "expr.h" 134 #include "calls.h" 135 #include "libfuncs.h" 136 #include "except.h" 137 #include "output.h" 138 #include "dwarf2asm.h" 139 #include "dwarf2out.h" 140 #include "common/common-target.h" 141 #include "langhooks.h" 142 #include "cfgrtl.h" 143 #include "tree-pretty-print.h" 144 #include "cfgloop.h" 145 #include "builtins.h" 146 #include "tree-hash-traits.h" 147 148 static GTY(()) int call_site_base; 149 150 static GTY(()) hash_map<tree_hash, tree> *type_to_runtime_map; 151 152 static GTY(()) tree setjmp_fn; 153 154 /* Describe the SjLj_Function_Context structure. */ 155 static GTY(()) tree sjlj_fc_type_node; 156 static int sjlj_fc_call_site_ofs; 157 static int sjlj_fc_data_ofs; 158 static int sjlj_fc_personality_ofs; 159 static int sjlj_fc_lsda_ofs; 160 static int sjlj_fc_jbuf_ofs; 161 162 163 struct GTY(()) call_site_record_d 164 { 165 rtx landing_pad; 166 int action; 167 }; 168 169 /* In the following structure and associated functions, 170 we represent entries in the action table as 1-based indices. 171 Special cases are: 172 173 0: null action record, non-null landing pad; implies cleanups 174 -1: null action record, null landing pad; implies no action 175 -2: no call-site entry; implies must_not_throw 176 -3: we have yet to process outer regions 177 178 Further, no special cases apply to the "next" field of the record. 179 For next, 0 means end of list. */ 180 181 struct action_record 182 { 183 int offset; 184 int filter; 185 int next; 186 }; 187 188 /* Hashtable helpers. */ 189 190 struct action_record_hasher : free_ptr_hash <action_record> 191 { 192 static inline hashval_t hash (const action_record *); 193 static inline bool equal (const action_record *, const action_record *); 194 }; 195 196 inline hashval_t 197 action_record_hasher::hash (const action_record *entry) 198 { 199 return entry->next * 1009 + entry->filter; 200 } 201 202 inline bool 203 action_record_hasher::equal (const action_record *entry, 204 const action_record *data) 205 { 206 return entry->filter == data->filter && entry->next == data->next; 207 } 208 209 typedef hash_table<action_record_hasher> action_hash_type; 210 211 static bool get_eh_region_and_lp_from_rtx (const_rtx, eh_region *, 212 eh_landing_pad *); 213 214 static void dw2_build_landing_pads (void); 215 216 static int collect_one_action_chain (action_hash_type *, eh_region); 217 static int add_call_site (rtx, int, int); 218 219 static void push_uleb128 (vec<uchar, va_gc> **, unsigned int); 220 static void push_sleb128 (vec<uchar, va_gc> **, int); 221 static int dw2_size_of_call_site_table (int); 222 static int sjlj_size_of_call_site_table (void); 223 static void dw2_output_call_site_table (int, int); 224 static void sjlj_output_call_site_table (void); 225 226 227 void 228 init_eh (void) 229 { 230 if (! flag_exceptions) 231 return; 232 233 type_to_runtime_map = hash_map<tree_hash, tree>::create_ggc (31); 234 235 /* Create the SjLj_Function_Context structure. This should match 236 the definition in unwind-sjlj.c. */ 237 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) 238 { 239 tree f_jbuf, f_per, f_lsda, f_prev, f_cs, f_data, tmp; 240 241 sjlj_fc_type_node = lang_hooks.types.make_type (RECORD_TYPE); 242 243 f_prev = build_decl (BUILTINS_LOCATION, 244 FIELD_DECL, get_identifier ("__prev"), 245 build_pointer_type (sjlj_fc_type_node)); 246 DECL_FIELD_CONTEXT (f_prev) = sjlj_fc_type_node; 247 248 f_cs = build_decl (BUILTINS_LOCATION, 249 FIELD_DECL, get_identifier ("__call_site"), 250 integer_type_node); 251 DECL_FIELD_CONTEXT (f_cs) = sjlj_fc_type_node; 252 253 tmp = build_index_type (size_int (4 - 1)); 254 tmp = build_array_type (lang_hooks.types.type_for_mode 255 (targetm.unwind_word_mode (), 1), 256 tmp); 257 f_data = build_decl (BUILTINS_LOCATION, 258 FIELD_DECL, get_identifier ("__data"), tmp); 259 DECL_FIELD_CONTEXT (f_data) = sjlj_fc_type_node; 260 261 f_per = build_decl (BUILTINS_LOCATION, 262 FIELD_DECL, get_identifier ("__personality"), 263 ptr_type_node); 264 DECL_FIELD_CONTEXT (f_per) = sjlj_fc_type_node; 265 266 f_lsda = build_decl (BUILTINS_LOCATION, 267 FIELD_DECL, get_identifier ("__lsda"), 268 ptr_type_node); 269 DECL_FIELD_CONTEXT (f_lsda) = sjlj_fc_type_node; 270 271 #ifdef DONT_USE_BUILTIN_SETJMP 272 #ifdef JMP_BUF_SIZE 273 tmp = size_int (JMP_BUF_SIZE - 1); 274 #else 275 /* Should be large enough for most systems, if it is not, 276 JMP_BUF_SIZE should be defined with the proper value. It will 277 also tend to be larger than necessary for most systems, a more 278 optimal port will define JMP_BUF_SIZE. */ 279 tmp = size_int (FIRST_PSEUDO_REGISTER + 2 - 1); 280 #endif 281 #else 282 /* Compute a minimally sized jump buffer. We need room to store at 283 least 3 pointers - stack pointer, frame pointer and return address. 284 Plus for some targets we need room for an extra pointer - in the 285 case of MIPS this is the global pointer. This makes a total of four 286 pointers, but to be safe we actually allocate room for 5. 287 288 If pointers are smaller than words then we allocate enough room for 289 5 words, just in case the backend needs this much room. For more 290 discussion on this issue see: 291 http://gcc.gnu.org/ml/gcc-patches/2014-05/msg00313.html. */ 292 if (POINTER_SIZE > BITS_PER_WORD) 293 tmp = size_int (5 - 1); 294 else 295 tmp = size_int ((5 * BITS_PER_WORD / POINTER_SIZE) - 1); 296 #endif 297 298 tmp = build_index_type (tmp); 299 tmp = build_array_type (ptr_type_node, tmp); 300 f_jbuf = build_decl (BUILTINS_LOCATION, 301 FIELD_DECL, get_identifier ("__jbuf"), tmp); 302 #ifdef DONT_USE_BUILTIN_SETJMP 303 /* We don't know what the alignment requirements of the 304 runtime's jmp_buf has. Overestimate. */ 305 SET_DECL_ALIGN (f_jbuf, BIGGEST_ALIGNMENT); 306 DECL_USER_ALIGN (f_jbuf) = 1; 307 #endif 308 DECL_FIELD_CONTEXT (f_jbuf) = sjlj_fc_type_node; 309 310 TYPE_FIELDS (sjlj_fc_type_node) = f_prev; 311 TREE_CHAIN (f_prev) = f_cs; 312 TREE_CHAIN (f_cs) = f_data; 313 TREE_CHAIN (f_data) = f_per; 314 TREE_CHAIN (f_per) = f_lsda; 315 TREE_CHAIN (f_lsda) = f_jbuf; 316 317 layout_type (sjlj_fc_type_node); 318 319 /* Cache the interesting field offsets so that we have 320 easy access from rtl. */ 321 sjlj_fc_call_site_ofs 322 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_cs)) 323 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_cs)) / BITS_PER_UNIT); 324 sjlj_fc_data_ofs 325 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_data)) 326 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_data)) / BITS_PER_UNIT); 327 sjlj_fc_personality_ofs 328 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_per)) 329 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_per)) / BITS_PER_UNIT); 330 sjlj_fc_lsda_ofs 331 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_lsda)) 332 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_lsda)) / BITS_PER_UNIT); 333 sjlj_fc_jbuf_ofs 334 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_jbuf)) 335 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_jbuf)) / BITS_PER_UNIT); 336 337 #ifdef DONT_USE_BUILTIN_SETJMP 338 tmp = build_function_type_list (integer_type_node, TREE_TYPE (f_jbuf), 339 NULL); 340 setjmp_fn = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, 341 get_identifier ("setjmp"), tmp); 342 TREE_PUBLIC (setjmp_fn) = 1; 343 DECL_EXTERNAL (setjmp_fn) = 1; 344 DECL_ASSEMBLER_NAME (setjmp_fn); 345 #endif 346 } 347 } 348 349 void 350 init_eh_for_function (void) 351 { 352 cfun->eh = ggc_cleared_alloc<eh_status> (); 353 354 /* Make sure zero'th entries are used. */ 355 vec_safe_push (cfun->eh->region_array, (eh_region)0); 356 vec_safe_push (cfun->eh->lp_array, (eh_landing_pad)0); 357 } 358 359 /* Routines to generate the exception tree somewhat directly. 360 These are used from tree-eh.c when processing exception related 361 nodes during tree optimization. */ 362 363 static eh_region 364 gen_eh_region (enum eh_region_type type, eh_region outer) 365 { 366 eh_region new_eh; 367 368 /* Insert a new blank region as a leaf in the tree. */ 369 new_eh = ggc_cleared_alloc<eh_region_d> (); 370 new_eh->type = type; 371 new_eh->outer = outer; 372 if (outer) 373 { 374 new_eh->next_peer = outer->inner; 375 outer->inner = new_eh; 376 } 377 else 378 { 379 new_eh->next_peer = cfun->eh->region_tree; 380 cfun->eh->region_tree = new_eh; 381 } 382 383 new_eh->index = vec_safe_length (cfun->eh->region_array); 384 vec_safe_push (cfun->eh->region_array, new_eh); 385 386 /* Copy the language's notion of whether to use __cxa_end_cleanup. */ 387 if (targetm.arm_eabi_unwinder && lang_hooks.eh_use_cxa_end_cleanup) 388 new_eh->use_cxa_end_cleanup = true; 389 390 return new_eh; 391 } 392 393 eh_region 394 gen_eh_region_cleanup (eh_region outer) 395 { 396 return gen_eh_region (ERT_CLEANUP, outer); 397 } 398 399 eh_region 400 gen_eh_region_try (eh_region outer) 401 { 402 return gen_eh_region (ERT_TRY, outer); 403 } 404 405 eh_catch 406 gen_eh_region_catch (eh_region t, tree type_or_list) 407 { 408 eh_catch c, l; 409 tree type_list, type_node; 410 411 gcc_assert (t->type == ERT_TRY); 412 413 /* Ensure to always end up with a type list to normalize further 414 processing, then register each type against the runtime types map. */ 415 type_list = type_or_list; 416 if (type_or_list) 417 { 418 if (TREE_CODE (type_or_list) != TREE_LIST) 419 type_list = tree_cons (NULL_TREE, type_or_list, NULL_TREE); 420 421 type_node = type_list; 422 for (; type_node; type_node = TREE_CHAIN (type_node)) 423 add_type_for_runtime (TREE_VALUE (type_node)); 424 } 425 426 c = ggc_cleared_alloc<eh_catch_d> (); 427 c->type_list = type_list; 428 l = t->u.eh_try.last_catch; 429 c->prev_catch = l; 430 if (l) 431 l->next_catch = c; 432 else 433 t->u.eh_try.first_catch = c; 434 t->u.eh_try.last_catch = c; 435 436 return c; 437 } 438 439 eh_region 440 gen_eh_region_allowed (eh_region outer, tree allowed) 441 { 442 eh_region region = gen_eh_region (ERT_ALLOWED_EXCEPTIONS, outer); 443 region->u.allowed.type_list = allowed; 444 445 for (; allowed ; allowed = TREE_CHAIN (allowed)) 446 add_type_for_runtime (TREE_VALUE (allowed)); 447 448 return region; 449 } 450 451 eh_region 452 gen_eh_region_must_not_throw (eh_region outer) 453 { 454 return gen_eh_region (ERT_MUST_NOT_THROW, outer); 455 } 456 457 eh_landing_pad 458 gen_eh_landing_pad (eh_region region) 459 { 460 eh_landing_pad lp = ggc_cleared_alloc<eh_landing_pad_d> (); 461 462 lp->next_lp = region->landing_pads; 463 lp->region = region; 464 lp->index = vec_safe_length (cfun->eh->lp_array); 465 region->landing_pads = lp; 466 467 vec_safe_push (cfun->eh->lp_array, lp); 468 469 return lp; 470 } 471 472 eh_region 473 get_eh_region_from_number_fn (struct function *ifun, int i) 474 { 475 return (*ifun->eh->region_array)[i]; 476 } 477 478 eh_region 479 get_eh_region_from_number (int i) 480 { 481 return get_eh_region_from_number_fn (cfun, i); 482 } 483 484 eh_landing_pad 485 get_eh_landing_pad_from_number_fn (struct function *ifun, int i) 486 { 487 return (*ifun->eh->lp_array)[i]; 488 } 489 490 eh_landing_pad 491 get_eh_landing_pad_from_number (int i) 492 { 493 return get_eh_landing_pad_from_number_fn (cfun, i); 494 } 495 496 eh_region 497 get_eh_region_from_lp_number_fn (struct function *ifun, int i) 498 { 499 if (i < 0) 500 return (*ifun->eh->region_array)[-i]; 501 else if (i == 0) 502 return NULL; 503 else 504 { 505 eh_landing_pad lp; 506 lp = (*ifun->eh->lp_array)[i]; 507 return lp->region; 508 } 509 } 510 511 eh_region 512 get_eh_region_from_lp_number (int i) 513 { 514 return get_eh_region_from_lp_number_fn (cfun, i); 515 } 516 517 /* Returns true if the current function has exception handling regions. */ 518 519 bool 520 current_function_has_exception_handlers (void) 521 { 522 return cfun->eh->region_tree != NULL; 523 } 524 525 /* A subroutine of duplicate_eh_regions. Copy the eh_region tree at OLD. 526 Root it at OUTER, and apply LP_OFFSET to the lp numbers. */ 527 528 struct duplicate_eh_regions_data 529 { 530 duplicate_eh_regions_map label_map; 531 void *label_map_data; 532 hash_map<void *, void *> *eh_map; 533 }; 534 535 static void 536 duplicate_eh_regions_1 (struct duplicate_eh_regions_data *data, 537 eh_region old_r, eh_region outer) 538 { 539 eh_landing_pad old_lp, new_lp; 540 eh_region new_r; 541 542 new_r = gen_eh_region (old_r->type, outer); 543 gcc_assert (!data->eh_map->put (old_r, new_r)); 544 545 switch (old_r->type) 546 { 547 case ERT_CLEANUP: 548 break; 549 550 case ERT_TRY: 551 { 552 eh_catch oc, nc; 553 for (oc = old_r->u.eh_try.first_catch; oc ; oc = oc->next_catch) 554 { 555 /* We should be doing all our region duplication before and 556 during inlining, which is before filter lists are created. */ 557 gcc_assert (oc->filter_list == NULL); 558 nc = gen_eh_region_catch (new_r, oc->type_list); 559 nc->label = data->label_map (oc->label, data->label_map_data); 560 } 561 } 562 break; 563 564 case ERT_ALLOWED_EXCEPTIONS: 565 new_r->u.allowed.type_list = old_r->u.allowed.type_list; 566 if (old_r->u.allowed.label) 567 new_r->u.allowed.label 568 = data->label_map (old_r->u.allowed.label, data->label_map_data); 569 else 570 new_r->u.allowed.label = NULL_TREE; 571 break; 572 573 case ERT_MUST_NOT_THROW: 574 new_r->u.must_not_throw.failure_loc = 575 LOCATION_LOCUS (old_r->u.must_not_throw.failure_loc); 576 new_r->u.must_not_throw.failure_decl = 577 old_r->u.must_not_throw.failure_decl; 578 break; 579 } 580 581 for (old_lp = old_r->landing_pads; old_lp ; old_lp = old_lp->next_lp) 582 { 583 /* Don't bother copying unused landing pads. */ 584 if (old_lp->post_landing_pad == NULL) 585 continue; 586 587 new_lp = gen_eh_landing_pad (new_r); 588 gcc_assert (!data->eh_map->put (old_lp, new_lp)); 589 590 new_lp->post_landing_pad 591 = data->label_map (old_lp->post_landing_pad, data->label_map_data); 592 EH_LANDING_PAD_NR (new_lp->post_landing_pad) = new_lp->index; 593 } 594 595 /* Make sure to preserve the original use of __cxa_end_cleanup. */ 596 new_r->use_cxa_end_cleanup = old_r->use_cxa_end_cleanup; 597 598 for (old_r = old_r->inner; old_r ; old_r = old_r->next_peer) 599 duplicate_eh_regions_1 (data, old_r, new_r); 600 } 601 602 /* Duplicate the EH regions from IFUN rooted at COPY_REGION into 603 the current function and root the tree below OUTER_REGION. 604 The special case of COPY_REGION of NULL means all regions. 605 Remap labels using MAP/MAP_DATA callback. Return a pointer map 606 that allows the caller to remap uses of both EH regions and 607 EH landing pads. */ 608 609 hash_map<void *, void *> * 610 duplicate_eh_regions (struct function *ifun, 611 eh_region copy_region, int outer_lp, 612 duplicate_eh_regions_map map, void *map_data) 613 { 614 struct duplicate_eh_regions_data data; 615 eh_region outer_region; 616 617 if (flag_checking) 618 verify_eh_tree (ifun); 619 620 data.label_map = map; 621 data.label_map_data = map_data; 622 data.eh_map = new hash_map<void *, void *>; 623 624 outer_region = get_eh_region_from_lp_number_fn (cfun, outer_lp); 625 626 /* Copy all the regions in the subtree. */ 627 if (copy_region) 628 duplicate_eh_regions_1 (&data, copy_region, outer_region); 629 else 630 { 631 eh_region r; 632 for (r = ifun->eh->region_tree; r ; r = r->next_peer) 633 duplicate_eh_regions_1 (&data, r, outer_region); 634 } 635 636 if (flag_checking) 637 verify_eh_tree (cfun); 638 639 return data.eh_map; 640 } 641 642 /* Return the region that is outer to both REGION_A and REGION_B in IFUN. */ 643 644 eh_region 645 eh_region_outermost (struct function *ifun, eh_region region_a, 646 eh_region region_b) 647 { 648 gcc_assert (ifun->eh->region_array); 649 gcc_assert (ifun->eh->region_tree); 650 651 auto_sbitmap b_outer (ifun->eh->region_array->length ()); 652 bitmap_clear (b_outer); 653 654 do 655 { 656 bitmap_set_bit (b_outer, region_b->index); 657 region_b = region_b->outer; 658 } 659 while (region_b); 660 661 do 662 { 663 if (bitmap_bit_p (b_outer, region_a->index)) 664 break; 665 region_a = region_a->outer; 666 } 667 while (region_a); 668 669 return region_a; 670 } 671 672 void 673 add_type_for_runtime (tree type) 674 { 675 /* If TYPE is NOP_EXPR, it means that it already is a runtime type. */ 676 if (TREE_CODE (type) == NOP_EXPR) 677 return; 678 679 bool existed = false; 680 tree *slot = &type_to_runtime_map->get_or_insert (type, &existed); 681 if (!existed) 682 *slot = lang_hooks.eh_runtime_type (type); 683 } 684 685 tree 686 lookup_type_for_runtime (tree type) 687 { 688 /* If TYPE is NOP_EXPR, it means that it already is a runtime type. */ 689 if (TREE_CODE (type) == NOP_EXPR) 690 return type; 691 692 /* We should have always inserted the data earlier. */ 693 return *type_to_runtime_map->get (type); 694 } 695 696 697 /* Represent an entry in @TTypes for either catch actions 698 or exception filter actions. */ 699 struct ttypes_filter { 700 tree t; 701 int filter; 702 }; 703 704 /* Helper for ttypes_filter hashing. */ 705 706 struct ttypes_filter_hasher : free_ptr_hash <ttypes_filter> 707 { 708 typedef tree_node *compare_type; 709 static inline hashval_t hash (const ttypes_filter *); 710 static inline bool equal (const ttypes_filter *, const tree_node *); 711 }; 712 713 /* Compare ENTRY (a ttypes_filter entry in the hash table) with DATA 714 (a tree) for a @TTypes type node we are thinking about adding. */ 715 716 inline bool 717 ttypes_filter_hasher::equal (const ttypes_filter *entry, const tree_node *data) 718 { 719 return entry->t == data; 720 } 721 722 inline hashval_t 723 ttypes_filter_hasher::hash (const ttypes_filter *entry) 724 { 725 return TREE_HASH (entry->t); 726 } 727 728 typedef hash_table<ttypes_filter_hasher> ttypes_hash_type; 729 730 731 /* Helper for ehspec hashing. */ 732 733 struct ehspec_hasher : free_ptr_hash <ttypes_filter> 734 { 735 static inline hashval_t hash (const ttypes_filter *); 736 static inline bool equal (const ttypes_filter *, const ttypes_filter *); 737 }; 738 739 /* Compare ENTRY with DATA (both struct ttypes_filter) for a @TTypes 740 exception specification list we are thinking about adding. */ 741 /* ??? Currently we use the type lists in the order given. Someone 742 should put these in some canonical order. */ 743 744 inline bool 745 ehspec_hasher::equal (const ttypes_filter *entry, const ttypes_filter *data) 746 { 747 return type_list_equal (entry->t, data->t); 748 } 749 750 /* Hash function for exception specification lists. */ 751 752 inline hashval_t 753 ehspec_hasher::hash (const ttypes_filter *entry) 754 { 755 hashval_t h = 0; 756 tree list; 757 758 for (list = entry->t; list ; list = TREE_CHAIN (list)) 759 h = (h << 5) + (h >> 27) + TREE_HASH (TREE_VALUE (list)); 760 return h; 761 } 762 763 typedef hash_table<ehspec_hasher> ehspec_hash_type; 764 765 766 /* Add TYPE (which may be NULL) to cfun->eh->ttype_data, using TYPES_HASH 767 to speed up the search. Return the filter value to be used. */ 768 769 static int 770 add_ttypes_entry (ttypes_hash_type *ttypes_hash, tree type) 771 { 772 struct ttypes_filter **slot, *n; 773 774 slot = ttypes_hash->find_slot_with_hash (type, (hashval_t) TREE_HASH (type), 775 INSERT); 776 777 if ((n = *slot) == NULL) 778 { 779 /* Filter value is a 1 based table index. */ 780 781 n = XNEW (struct ttypes_filter); 782 n->t = type; 783 n->filter = vec_safe_length (cfun->eh->ttype_data) + 1; 784 *slot = n; 785 786 vec_safe_push (cfun->eh->ttype_data, type); 787 } 788 789 return n->filter; 790 } 791 792 /* Add LIST to cfun->eh->ehspec_data, using EHSPEC_HASH and TYPES_HASH 793 to speed up the search. Return the filter value to be used. */ 794 795 static int 796 add_ehspec_entry (ehspec_hash_type *ehspec_hash, ttypes_hash_type *ttypes_hash, 797 tree list) 798 { 799 struct ttypes_filter **slot, *n; 800 struct ttypes_filter dummy; 801 802 dummy.t = list; 803 slot = ehspec_hash->find_slot (&dummy, INSERT); 804 805 if ((n = *slot) == NULL) 806 { 807 int len; 808 809 if (targetm.arm_eabi_unwinder) 810 len = vec_safe_length (cfun->eh->ehspec_data.arm_eabi); 811 else 812 len = vec_safe_length (cfun->eh->ehspec_data.other); 813 814 /* Filter value is a -1 based byte index into a uleb128 buffer. */ 815 816 n = XNEW (struct ttypes_filter); 817 n->t = list; 818 n->filter = -(len + 1); 819 *slot = n; 820 821 /* Generate a 0 terminated list of filter values. */ 822 for (; list ; list = TREE_CHAIN (list)) 823 { 824 if (targetm.arm_eabi_unwinder) 825 vec_safe_push (cfun->eh->ehspec_data.arm_eabi, TREE_VALUE (list)); 826 else 827 { 828 /* Look up each type in the list and encode its filter 829 value as a uleb128. */ 830 push_uleb128 (&cfun->eh->ehspec_data.other, 831 add_ttypes_entry (ttypes_hash, TREE_VALUE (list))); 832 } 833 } 834 if (targetm.arm_eabi_unwinder) 835 vec_safe_push (cfun->eh->ehspec_data.arm_eabi, NULL_TREE); 836 else 837 vec_safe_push (cfun->eh->ehspec_data.other, (uchar)0); 838 } 839 840 return n->filter; 841 } 842 843 /* Generate the action filter values to be used for CATCH and 844 ALLOWED_EXCEPTIONS regions. When using dwarf2 exception regions, 845 we use lots of landing pads, and so every type or list can share 846 the same filter value, which saves table space. */ 847 848 void 849 assign_filter_values (void) 850 { 851 int i; 852 eh_region r; 853 eh_catch c; 854 855 vec_alloc (cfun->eh->ttype_data, 16); 856 if (targetm.arm_eabi_unwinder) 857 vec_alloc (cfun->eh->ehspec_data.arm_eabi, 64); 858 else 859 vec_alloc (cfun->eh->ehspec_data.other, 64); 860 861 ehspec_hash_type ehspec (31); 862 ttypes_hash_type ttypes (31); 863 864 for (i = 1; vec_safe_iterate (cfun->eh->region_array, i, &r); ++i) 865 { 866 if (r == NULL) 867 continue; 868 869 switch (r->type) 870 { 871 case ERT_TRY: 872 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) 873 { 874 /* Whatever type_list is (NULL or true list), we build a list 875 of filters for the region. */ 876 c->filter_list = NULL_TREE; 877 878 if (c->type_list != NULL) 879 { 880 /* Get a filter value for each of the types caught and store 881 them in the region's dedicated list. */ 882 tree tp_node = c->type_list; 883 884 for ( ; tp_node; tp_node = TREE_CHAIN (tp_node)) 885 { 886 int flt 887 = add_ttypes_entry (&ttypes, TREE_VALUE (tp_node)); 888 tree flt_node = build_int_cst (integer_type_node, flt); 889 890 c->filter_list 891 = tree_cons (NULL_TREE, flt_node, c->filter_list); 892 } 893 } 894 else 895 { 896 /* Get a filter value for the NULL list also since it 897 will need an action record anyway. */ 898 int flt = add_ttypes_entry (&ttypes, NULL); 899 tree flt_node = build_int_cst (integer_type_node, flt); 900 901 c->filter_list 902 = tree_cons (NULL_TREE, flt_node, NULL); 903 } 904 } 905 break; 906 907 case ERT_ALLOWED_EXCEPTIONS: 908 r->u.allowed.filter 909 = add_ehspec_entry (&ehspec, &ttypes, r->u.allowed.type_list); 910 break; 911 912 default: 913 break; 914 } 915 } 916 } 917 918 /* Emit SEQ into basic block just before INSN (that is assumed to be 919 first instruction of some existing BB and return the newly 920 produced block. */ 921 static basic_block 922 emit_to_new_bb_before (rtx_insn *seq, rtx_insn *insn) 923 { 924 rtx_insn *last; 925 basic_block bb; 926 edge e; 927 edge_iterator ei; 928 929 /* If there happens to be a fallthru edge (possibly created by cleanup_cfg 930 call), we don't want it to go into newly created landing pad or other EH 931 construct. */ 932 for (ei = ei_start (BLOCK_FOR_INSN (insn)->preds); (e = ei_safe_edge (ei)); ) 933 if (e->flags & EDGE_FALLTHRU) 934 force_nonfallthru (e); 935 else 936 ei_next (&ei); 937 last = emit_insn_before (seq, insn); 938 if (BARRIER_P (last)) 939 last = PREV_INSN (last); 940 bb = create_basic_block (seq, last, BLOCK_FOR_INSN (insn)->prev_bb); 941 update_bb_for_insn (bb); 942 bb->flags |= BB_SUPERBLOCK; 943 return bb; 944 } 945 946 /* A subroutine of dw2_build_landing_pads, also used for edge splitting 947 at the rtl level. Emit the code required by the target at a landing 948 pad for the given region. */ 949 950 static void 951 expand_dw2_landing_pad_for_region (eh_region region) 952 { 953 if (targetm.have_exception_receiver ()) 954 emit_insn (targetm.gen_exception_receiver ()); 955 else if (targetm.have_nonlocal_goto_receiver ()) 956 emit_insn (targetm.gen_nonlocal_goto_receiver ()); 957 else 958 { /* Nothing */ } 959 960 if (region->exc_ptr_reg) 961 emit_move_insn (region->exc_ptr_reg, 962 gen_rtx_REG (ptr_mode, EH_RETURN_DATA_REGNO (0))); 963 if (region->filter_reg) 964 emit_move_insn (region->filter_reg, 965 gen_rtx_REG (targetm.eh_return_filter_mode (), 966 EH_RETURN_DATA_REGNO (1))); 967 } 968 969 /* Expand the extra code needed at landing pads for dwarf2 unwinding. */ 970 971 static void 972 dw2_build_landing_pads (void) 973 { 974 int i; 975 eh_landing_pad lp; 976 int e_flags = EDGE_FALLTHRU; 977 978 /* If we're going to partition blocks, we need to be able to add 979 new landing pads later, which means that we need to hold on to 980 the post-landing-pad block. Prevent it from being merged away. 981 We'll remove this bit after partitioning. */ 982 if (flag_reorder_blocks_and_partition) 983 e_flags |= EDGE_PRESERVE; 984 985 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) 986 { 987 basic_block bb; 988 rtx_insn *seq; 989 990 if (lp == NULL || lp->post_landing_pad == NULL) 991 continue; 992 993 start_sequence (); 994 995 lp->landing_pad = gen_label_rtx (); 996 emit_label (lp->landing_pad); 997 LABEL_PRESERVE_P (lp->landing_pad) = 1; 998 999 expand_dw2_landing_pad_for_region (lp->region); 1000 1001 seq = get_insns (); 1002 end_sequence (); 1003 1004 bb = emit_to_new_bb_before (seq, label_rtx (lp->post_landing_pad)); 1005 bb->count = bb->next_bb->count; 1006 make_single_succ_edge (bb, bb->next_bb, e_flags); 1007 if (current_loops) 1008 { 1009 struct loop *loop = bb->next_bb->loop_father; 1010 /* If we created a pre-header block, add the new block to the 1011 outer loop, otherwise to the loop itself. */ 1012 if (bb->next_bb == loop->header) 1013 add_bb_to_loop (bb, loop_outer (loop)); 1014 else 1015 add_bb_to_loop (bb, loop); 1016 } 1017 } 1018 } 1019 1020 1021 static vec<int> sjlj_lp_call_site_index; 1022 1023 /* Process all active landing pads. Assign each one a compact dispatch 1024 index, and a call-site index. */ 1025 1026 static int 1027 sjlj_assign_call_site_values (void) 1028 { 1029 action_hash_type ar_hash (31); 1030 int i, disp_index; 1031 eh_landing_pad lp; 1032 1033 vec_alloc (crtl->eh.action_record_data, 64); 1034 1035 disp_index = 0; 1036 call_site_base = 1; 1037 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) 1038 if (lp && lp->post_landing_pad) 1039 { 1040 int action, call_site; 1041 1042 /* First: build the action table. */ 1043 action = collect_one_action_chain (&ar_hash, lp->region); 1044 1045 /* Next: assign call-site values. If dwarf2 terms, this would be 1046 the region number assigned by convert_to_eh_region_ranges, but 1047 handles no-action and must-not-throw differently. */ 1048 /* Map must-not-throw to otherwise unused call-site index 0. */ 1049 if (action == -2) 1050 call_site = 0; 1051 /* Map no-action to otherwise unused call-site index -1. */ 1052 else if (action == -1) 1053 call_site = -1; 1054 /* Otherwise, look it up in the table. */ 1055 else 1056 call_site = add_call_site (GEN_INT (disp_index), action, 0); 1057 sjlj_lp_call_site_index[i] = call_site; 1058 1059 disp_index++; 1060 } 1061 1062 return disp_index; 1063 } 1064 1065 /* Emit code to record the current call-site index before every 1066 insn that can throw. */ 1067 1068 static void 1069 sjlj_mark_call_sites (void) 1070 { 1071 int last_call_site = -2; 1072 rtx_insn *insn; 1073 rtx mem; 1074 1075 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) 1076 { 1077 eh_landing_pad lp; 1078 eh_region r; 1079 bool nothrow; 1080 int this_call_site; 1081 rtx_insn *before, *p; 1082 1083 /* Reset value tracking at extended basic block boundaries. */ 1084 if (LABEL_P (insn)) 1085 last_call_site = -2; 1086 1087 /* If the function allocates dynamic stack space, the context must 1088 be updated after every allocation/deallocation accordingly. */ 1089 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_UPDATE_SJLJ_CONTEXT) 1090 { 1091 rtx buf_addr; 1092 1093 start_sequence (); 1094 buf_addr = plus_constant (Pmode, XEXP (crtl->eh.sjlj_fc, 0), 1095 sjlj_fc_jbuf_ofs); 1096 expand_builtin_update_setjmp_buf (buf_addr); 1097 p = get_insns (); 1098 end_sequence (); 1099 emit_insn_before (p, insn); 1100 } 1101 1102 if (! INSN_P (insn)) 1103 continue; 1104 1105 nothrow = get_eh_region_and_lp_from_rtx (insn, &r, &lp); 1106 if (nothrow) 1107 continue; 1108 if (lp) 1109 this_call_site = sjlj_lp_call_site_index[lp->index]; 1110 else if (r == NULL) 1111 { 1112 /* Calls (and trapping insns) without notes are outside any 1113 exception handling region in this function. Mark them as 1114 no action. */ 1115 this_call_site = -1; 1116 } 1117 else 1118 { 1119 gcc_assert (r->type == ERT_MUST_NOT_THROW); 1120 this_call_site = 0; 1121 } 1122 1123 if (this_call_site != -1) 1124 crtl->uses_eh_lsda = 1; 1125 1126 if (this_call_site == last_call_site) 1127 continue; 1128 1129 /* Don't separate a call from it's argument loads. */ 1130 before = insn; 1131 if (CALL_P (insn)) 1132 before = find_first_parameter_load (insn, NULL); 1133 1134 start_sequence (); 1135 mem = adjust_address (crtl->eh.sjlj_fc, TYPE_MODE (integer_type_node), 1136 sjlj_fc_call_site_ofs); 1137 emit_move_insn (mem, gen_int_mode (this_call_site, GET_MODE (mem))); 1138 p = get_insns (); 1139 end_sequence (); 1140 1141 emit_insn_before (p, before); 1142 last_call_site = this_call_site; 1143 } 1144 } 1145 1146 /* Construct the SjLj_Function_Context. */ 1147 1148 static void 1149 sjlj_emit_function_enter (rtx_code_label *dispatch_label) 1150 { 1151 rtx_insn *fn_begin, *seq; 1152 rtx fc, mem; 1153 bool fn_begin_outside_block; 1154 rtx personality = get_personality_function (current_function_decl); 1155 1156 fc = crtl->eh.sjlj_fc; 1157 1158 start_sequence (); 1159 1160 /* We're storing this libcall's address into memory instead of 1161 calling it directly. Thus, we must call assemble_external_libcall 1162 here, as we can not depend on emit_library_call to do it for us. */ 1163 assemble_external_libcall (personality); 1164 mem = adjust_address (fc, Pmode, sjlj_fc_personality_ofs); 1165 emit_move_insn (mem, personality); 1166 1167 mem = adjust_address (fc, Pmode, sjlj_fc_lsda_ofs); 1168 if (crtl->uses_eh_lsda) 1169 { 1170 char buf[20]; 1171 rtx sym; 1172 1173 ASM_GENERATE_INTERNAL_LABEL (buf, "LLSDA", current_function_funcdef_no); 1174 sym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf)); 1175 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_LOCAL; 1176 emit_move_insn (mem, sym); 1177 } 1178 else 1179 emit_move_insn (mem, const0_rtx); 1180 1181 if (dispatch_label) 1182 { 1183 rtx addr = plus_constant (Pmode, XEXP (fc, 0), sjlj_fc_jbuf_ofs); 1184 1185 #ifdef DONT_USE_BUILTIN_SETJMP 1186 addr = copy_addr_to_reg (addr); 1187 addr = convert_memory_address (ptr_mode, addr); 1188 tree addr_tree = make_tree (ptr_type_node, addr); 1189 1190 tree call_expr = build_call_expr (setjmp_fn, 1, addr_tree); 1191 rtx x = expand_call (call_expr, NULL_RTX, false); 1192 1193 emit_cmp_and_jump_insns (x, const0_rtx, NE, 0, 1194 TYPE_MODE (integer_type_node), 0, 1195 dispatch_label, 1196 profile_probability::unlikely ()); 1197 #else 1198 expand_builtin_setjmp_setup (addr, dispatch_label); 1199 #endif 1200 } 1201 1202 emit_library_call (unwind_sjlj_register_libfunc, LCT_NORMAL, VOIDmode, 1203 XEXP (fc, 0), Pmode); 1204 1205 seq = get_insns (); 1206 end_sequence (); 1207 1208 /* ??? Instead of doing this at the beginning of the function, 1209 do this in a block that is at loop level 0 and dominates all 1210 can_throw_internal instructions. */ 1211 1212 fn_begin_outside_block = true; 1213 for (fn_begin = get_insns (); ; fn_begin = NEXT_INSN (fn_begin)) 1214 if (NOTE_P (fn_begin)) 1215 { 1216 if (NOTE_KIND (fn_begin) == NOTE_INSN_FUNCTION_BEG) 1217 break; 1218 else if (NOTE_INSN_BASIC_BLOCK_P (fn_begin)) 1219 fn_begin_outside_block = false; 1220 } 1221 1222 #ifdef DONT_USE_BUILTIN_SETJMP 1223 if (dispatch_label) 1224 { 1225 /* The sequence contains a branch in the middle so we need to force 1226 the creation of a new basic block by means of BB_SUPERBLOCK. */ 1227 if (fn_begin_outside_block) 1228 { 1229 basic_block bb 1230 = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); 1231 if (JUMP_P (BB_END (bb))) 1232 emit_insn_before (seq, BB_END (bb)); 1233 else 1234 emit_insn_after (seq, BB_END (bb)); 1235 } 1236 else 1237 emit_insn_after (seq, fn_begin); 1238 1239 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->flags |= BB_SUPERBLOCK; 1240 return; 1241 } 1242 #endif 1243 1244 if (fn_begin_outside_block) 1245 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); 1246 else 1247 emit_insn_after (seq, fn_begin); 1248 } 1249 1250 /* Call back from expand_function_end to know where we should put 1251 the call to unwind_sjlj_unregister_libfunc if needed. */ 1252 1253 void 1254 sjlj_emit_function_exit_after (rtx_insn *after) 1255 { 1256 crtl->eh.sjlj_exit_after = after; 1257 } 1258 1259 static void 1260 sjlj_emit_function_exit (void) 1261 { 1262 rtx_insn *seq, *insn; 1263 1264 start_sequence (); 1265 1266 emit_library_call (unwind_sjlj_unregister_libfunc, LCT_NORMAL, VOIDmode, 1267 XEXP (crtl->eh.sjlj_fc, 0), Pmode); 1268 1269 seq = get_insns (); 1270 end_sequence (); 1271 1272 /* ??? Really this can be done in any block at loop level 0 that 1273 post-dominates all can_throw_internal instructions. This is 1274 the last possible moment. */ 1275 1276 insn = crtl->eh.sjlj_exit_after; 1277 if (LABEL_P (insn)) 1278 insn = NEXT_INSN (insn); 1279 1280 emit_insn_after (seq, insn); 1281 } 1282 1283 static void 1284 sjlj_emit_dispatch_table (rtx_code_label *dispatch_label, int num_dispatch) 1285 { 1286 scalar_int_mode unwind_word_mode = targetm.unwind_word_mode (); 1287 scalar_int_mode filter_mode = targetm.eh_return_filter_mode (); 1288 eh_landing_pad lp; 1289 rtx mem, fc, exc_ptr_reg, filter_reg; 1290 rtx_insn *seq; 1291 basic_block bb; 1292 eh_region r; 1293 int i, disp_index; 1294 vec<tree> dispatch_labels = vNULL; 1295 1296 fc = crtl->eh.sjlj_fc; 1297 1298 start_sequence (); 1299 1300 emit_label (dispatch_label); 1301 1302 #ifndef DONT_USE_BUILTIN_SETJMP 1303 expand_builtin_setjmp_receiver (dispatch_label); 1304 1305 /* The caller of expand_builtin_setjmp_receiver is responsible for 1306 making sure that the label doesn't vanish. The only other caller 1307 is the expander for __builtin_setjmp_receiver, which places this 1308 label on the nonlocal_goto_label list. Since we're modeling these 1309 CFG edges more exactly, we can use the forced_labels list instead. */ 1310 LABEL_PRESERVE_P (dispatch_label) = 1; 1311 vec_safe_push<rtx_insn *> (forced_labels, dispatch_label); 1312 #endif 1313 1314 /* Load up exc_ptr and filter values from the function context. */ 1315 mem = adjust_address (fc, unwind_word_mode, sjlj_fc_data_ofs); 1316 if (unwind_word_mode != ptr_mode) 1317 { 1318 #ifdef POINTERS_EXTEND_UNSIGNED 1319 mem = convert_memory_address (ptr_mode, mem); 1320 #else 1321 mem = convert_to_mode (ptr_mode, mem, 0); 1322 #endif 1323 } 1324 exc_ptr_reg = force_reg (ptr_mode, mem); 1325 1326 mem = adjust_address (fc, unwind_word_mode, 1327 sjlj_fc_data_ofs + GET_MODE_SIZE (unwind_word_mode)); 1328 if (unwind_word_mode != filter_mode) 1329 mem = convert_to_mode (filter_mode, mem, 0); 1330 filter_reg = force_reg (filter_mode, mem); 1331 1332 /* Jump to one of the directly reachable regions. */ 1333 1334 disp_index = 0; 1335 rtx_code_label *first_reachable_label = NULL; 1336 1337 /* If there's exactly one call site in the function, don't bother 1338 generating a switch statement. */ 1339 if (num_dispatch > 1) 1340 dispatch_labels.create (num_dispatch); 1341 1342 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) 1343 if (lp && lp->post_landing_pad) 1344 { 1345 rtx_insn *seq2; 1346 rtx_code_label *label; 1347 1348 start_sequence (); 1349 1350 lp->landing_pad = dispatch_label; 1351 1352 if (num_dispatch > 1) 1353 { 1354 tree t_label, case_elt, t; 1355 1356 t_label = create_artificial_label (UNKNOWN_LOCATION); 1357 t = build_int_cst (integer_type_node, disp_index); 1358 case_elt = build_case_label (t, NULL, t_label); 1359 dispatch_labels.quick_push (case_elt); 1360 label = jump_target_rtx (t_label); 1361 } 1362 else 1363 label = gen_label_rtx (); 1364 1365 if (disp_index == 0) 1366 first_reachable_label = label; 1367 emit_label (label); 1368 1369 r = lp->region; 1370 if (r->exc_ptr_reg) 1371 emit_move_insn (r->exc_ptr_reg, exc_ptr_reg); 1372 if (r->filter_reg) 1373 emit_move_insn (r->filter_reg, filter_reg); 1374 1375 seq2 = get_insns (); 1376 end_sequence (); 1377 1378 rtx_insn *before = label_rtx (lp->post_landing_pad); 1379 bb = emit_to_new_bb_before (seq2, before); 1380 make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU); 1381 if (current_loops) 1382 { 1383 struct loop *loop = bb->next_bb->loop_father; 1384 /* If we created a pre-header block, add the new block to the 1385 outer loop, otherwise to the loop itself. */ 1386 if (bb->next_bb == loop->header) 1387 add_bb_to_loop (bb, loop_outer (loop)); 1388 else 1389 add_bb_to_loop (bb, loop); 1390 /* ??? For multiple dispatches we will end up with edges 1391 from the loop tree root into this loop, making it a 1392 multiple-entry loop. Discard all affected loops. */ 1393 if (num_dispatch > 1) 1394 { 1395 for (loop = bb->loop_father; 1396 loop_outer (loop); loop = loop_outer (loop)) 1397 mark_loop_for_removal (loop); 1398 } 1399 } 1400 1401 disp_index++; 1402 } 1403 gcc_assert (disp_index == num_dispatch); 1404 1405 if (num_dispatch > 1) 1406 { 1407 rtx disp = adjust_address (fc, TYPE_MODE (integer_type_node), 1408 sjlj_fc_call_site_ofs); 1409 expand_sjlj_dispatch_table (disp, dispatch_labels); 1410 } 1411 1412 seq = get_insns (); 1413 end_sequence (); 1414 1415 bb = emit_to_new_bb_before (seq, first_reachable_label); 1416 if (num_dispatch == 1) 1417 { 1418 make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU); 1419 if (current_loops) 1420 { 1421 struct loop *loop = bb->next_bb->loop_father; 1422 /* If we created a pre-header block, add the new block to the 1423 outer loop, otherwise to the loop itself. */ 1424 if (bb->next_bb == loop->header) 1425 add_bb_to_loop (bb, loop_outer (loop)); 1426 else 1427 add_bb_to_loop (bb, loop); 1428 } 1429 } 1430 else 1431 { 1432 /* We are not wiring up edges here, but as the dispatcher call 1433 is at function begin simply associate the block with the 1434 outermost (non-)loop. */ 1435 if (current_loops) 1436 add_bb_to_loop (bb, current_loops->tree_root); 1437 } 1438 } 1439 1440 static void 1441 sjlj_build_landing_pads (void) 1442 { 1443 int num_dispatch; 1444 1445 num_dispatch = vec_safe_length (cfun->eh->lp_array); 1446 if (num_dispatch == 0) 1447 return; 1448 sjlj_lp_call_site_index.safe_grow_cleared (num_dispatch); 1449 1450 num_dispatch = sjlj_assign_call_site_values (); 1451 if (num_dispatch > 0) 1452 { 1453 rtx_code_label *dispatch_label = gen_label_rtx (); 1454 int align = STACK_SLOT_ALIGNMENT (sjlj_fc_type_node, 1455 TYPE_MODE (sjlj_fc_type_node), 1456 TYPE_ALIGN (sjlj_fc_type_node)); 1457 crtl->eh.sjlj_fc 1458 = assign_stack_local (TYPE_MODE (sjlj_fc_type_node), 1459 int_size_in_bytes (sjlj_fc_type_node), 1460 align); 1461 1462 sjlj_mark_call_sites (); 1463 sjlj_emit_function_enter (dispatch_label); 1464 sjlj_emit_dispatch_table (dispatch_label, num_dispatch); 1465 sjlj_emit_function_exit (); 1466 } 1467 1468 /* If we do not have any landing pads, we may still need to register a 1469 personality routine and (empty) LSDA to handle must-not-throw regions. */ 1470 else if (function_needs_eh_personality (cfun) != eh_personality_none) 1471 { 1472 int align = STACK_SLOT_ALIGNMENT (sjlj_fc_type_node, 1473 TYPE_MODE (sjlj_fc_type_node), 1474 TYPE_ALIGN (sjlj_fc_type_node)); 1475 crtl->eh.sjlj_fc 1476 = assign_stack_local (TYPE_MODE (sjlj_fc_type_node), 1477 int_size_in_bytes (sjlj_fc_type_node), 1478 align); 1479 1480 sjlj_mark_call_sites (); 1481 sjlj_emit_function_enter (NULL); 1482 sjlj_emit_function_exit (); 1483 } 1484 1485 sjlj_lp_call_site_index.release (); 1486 } 1487 1488 /* Update the sjlj function context. This function should be called 1489 whenever we allocate or deallocate dynamic stack space. */ 1490 1491 void 1492 update_sjlj_context (void) 1493 { 1494 if (!flag_exceptions) 1495 return; 1496 1497 emit_note (NOTE_INSN_UPDATE_SJLJ_CONTEXT); 1498 } 1499 1500 /* After initial rtl generation, call back to finish generating 1501 exception support code. */ 1502 1503 void 1504 finish_eh_generation (void) 1505 { 1506 basic_block bb; 1507 1508 /* Construct the landing pads. */ 1509 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) 1510 sjlj_build_landing_pads (); 1511 else 1512 dw2_build_landing_pads (); 1513 break_superblocks (); 1514 1515 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ 1516 /* Kludge for Alpha (see alpha_gp_save_rtx). */ 1517 || single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->insns.r) 1518 commit_edge_insertions (); 1519 1520 /* Redirect all EH edges from the post_landing_pad to the landing pad. */ 1521 FOR_EACH_BB_FN (bb, cfun) 1522 { 1523 eh_landing_pad lp; 1524 edge_iterator ei; 1525 edge e; 1526 1527 lp = get_eh_landing_pad_from_rtx (BB_END (bb)); 1528 1529 FOR_EACH_EDGE (e, ei, bb->succs) 1530 if (e->flags & EDGE_EH) 1531 break; 1532 1533 /* We should not have generated any new throwing insns during this 1534 pass, and we should not have lost any EH edges, so we only need 1535 to handle two cases here: 1536 (1) reachable handler and an existing edge to post-landing-pad, 1537 (2) no reachable handler and no edge. */ 1538 gcc_assert ((lp != NULL) == (e != NULL)); 1539 if (lp != NULL) 1540 { 1541 gcc_assert (BB_HEAD (e->dest) == label_rtx (lp->post_landing_pad)); 1542 1543 redirect_edge_succ (e, BLOCK_FOR_INSN (lp->landing_pad)); 1544 e->flags |= (CALL_P (BB_END (bb)) 1545 ? EDGE_ABNORMAL | EDGE_ABNORMAL_CALL 1546 : EDGE_ABNORMAL); 1547 } 1548 } 1549 } 1550 1551 /* This section handles removing dead code for flow. */ 1552 1553 void 1554 remove_eh_landing_pad (eh_landing_pad lp) 1555 { 1556 eh_landing_pad *pp; 1557 1558 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp) 1559 continue; 1560 *pp = lp->next_lp; 1561 1562 if (lp->post_landing_pad) 1563 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0; 1564 (*cfun->eh->lp_array)[lp->index] = NULL; 1565 } 1566 1567 /* Splice the EH region at PP from the region tree. */ 1568 1569 static void 1570 remove_eh_handler_splicer (eh_region *pp) 1571 { 1572 eh_region region = *pp; 1573 eh_landing_pad lp; 1574 1575 for (lp = region->landing_pads; lp ; lp = lp->next_lp) 1576 { 1577 if (lp->post_landing_pad) 1578 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0; 1579 (*cfun->eh->lp_array)[lp->index] = NULL; 1580 } 1581 1582 if (region->inner) 1583 { 1584 eh_region p, outer; 1585 outer = region->outer; 1586 1587 *pp = p = region->inner; 1588 do 1589 { 1590 p->outer = outer; 1591 pp = &p->next_peer; 1592 p = *pp; 1593 } 1594 while (p); 1595 } 1596 *pp = region->next_peer; 1597 1598 (*cfun->eh->region_array)[region->index] = NULL; 1599 } 1600 1601 /* Splice a single EH region REGION from the region tree. 1602 1603 To unlink REGION, we need to find the pointer to it with a relatively 1604 expensive search in REGION's outer region. If you are going to 1605 remove a number of handlers, using remove_unreachable_eh_regions may 1606 be a better option. */ 1607 1608 void 1609 remove_eh_handler (eh_region region) 1610 { 1611 eh_region *pp, *pp_start, p, outer; 1612 1613 outer = region->outer; 1614 if (outer) 1615 pp_start = &outer->inner; 1616 else 1617 pp_start = &cfun->eh->region_tree; 1618 for (pp = pp_start, p = *pp; p != region; pp = &p->next_peer, p = *pp) 1619 continue; 1620 1621 remove_eh_handler_splicer (pp); 1622 } 1623 1624 /* Worker for remove_unreachable_eh_regions. 1625 PP is a pointer to the region to start a region tree depth-first 1626 search from. R_REACHABLE is the set of regions that have to be 1627 preserved. */ 1628 1629 static void 1630 remove_unreachable_eh_regions_worker (eh_region *pp, sbitmap r_reachable) 1631 { 1632 while (*pp) 1633 { 1634 eh_region region = *pp; 1635 remove_unreachable_eh_regions_worker (®ion->inner, r_reachable); 1636 if (!bitmap_bit_p (r_reachable, region->index)) 1637 remove_eh_handler_splicer (pp); 1638 else 1639 pp = ®ion->next_peer; 1640 } 1641 } 1642 1643 /* Splice all EH regions *not* marked in R_REACHABLE from the region tree. 1644 Do this by traversing the EH tree top-down and splice out regions that 1645 are not marked. By removing regions from the leaves, we avoid costly 1646 searches in the region tree. */ 1647 1648 void 1649 remove_unreachable_eh_regions (sbitmap r_reachable) 1650 { 1651 remove_unreachable_eh_regions_worker (&cfun->eh->region_tree, r_reachable); 1652 } 1653 1654 /* Invokes CALLBACK for every exception handler landing pad label. 1655 Only used by reload hackery; should not be used by new code. */ 1656 1657 void 1658 for_each_eh_label (void (*callback) (rtx)) 1659 { 1660 eh_landing_pad lp; 1661 int i; 1662 1663 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i) 1664 { 1665 if (lp) 1666 { 1667 rtx_code_label *lab = lp->landing_pad; 1668 if (lab && LABEL_P (lab)) 1669 (*callback) (lab); 1670 } 1671 } 1672 } 1673 1674 /* Create the REG_EH_REGION note for INSN, given its ECF_FLAGS for a 1675 call insn. 1676 1677 At the gimple level, we use LP_NR 1678 > 0 : The statement transfers to landing pad LP_NR 1679 = 0 : The statement is outside any EH region 1680 < 0 : The statement is within MUST_NOT_THROW region -LP_NR. 1681 1682 At the rtl level, we use LP_NR 1683 > 0 : The insn transfers to landing pad LP_NR 1684 = 0 : The insn cannot throw 1685 < 0 : The insn is within MUST_NOT_THROW region -LP_NR 1686 = INT_MIN : The insn cannot throw or execute a nonlocal-goto. 1687 missing note: The insn is outside any EH region. 1688 1689 ??? This difference probably ought to be avoided. We could stand 1690 to record nothrow for arbitrary gimple statements, and so avoid 1691 some moderately complex lookups in stmt_could_throw_p. Perhaps 1692 NOTHROW should be mapped on both sides to INT_MIN. Perhaps the 1693 no-nonlocal-goto property should be recorded elsewhere as a bit 1694 on the call_insn directly. Perhaps we should make more use of 1695 attaching the trees to call_insns (reachable via symbol_ref in 1696 direct call cases) and just pull the data out of the trees. */ 1697 1698 void 1699 make_reg_eh_region_note (rtx_insn *insn, int ecf_flags, int lp_nr) 1700 { 1701 rtx value; 1702 if (ecf_flags & ECF_NOTHROW) 1703 value = const0_rtx; 1704 else if (lp_nr != 0) 1705 value = GEN_INT (lp_nr); 1706 else 1707 return; 1708 add_reg_note (insn, REG_EH_REGION, value); 1709 } 1710 1711 /* Create a REG_EH_REGION note for a CALL_INSN that cannot throw 1712 nor perform a non-local goto. Replace the region note if it 1713 already exists. */ 1714 1715 void 1716 make_reg_eh_region_note_nothrow_nononlocal (rtx_insn *insn) 1717 { 1718 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); 1719 rtx intmin = GEN_INT (INT_MIN); 1720 1721 if (note != 0) 1722 XEXP (note, 0) = intmin; 1723 else 1724 add_reg_note (insn, REG_EH_REGION, intmin); 1725 } 1726 1727 /* Return true if INSN could throw, assuming no REG_EH_REGION note 1728 to the contrary. */ 1729 1730 bool 1731 insn_could_throw_p (const_rtx insn) 1732 { 1733 if (!flag_exceptions) 1734 return false; 1735 if (CALL_P (insn)) 1736 return true; 1737 if (INSN_P (insn) && cfun->can_throw_non_call_exceptions) 1738 return may_trap_p (PATTERN (insn)); 1739 return false; 1740 } 1741 1742 /* Copy an REG_EH_REGION note to each insn that might throw beginning 1743 at FIRST and ending at LAST. NOTE_OR_INSN is either the source insn 1744 to look for a note, or the note itself. */ 1745 1746 void 1747 copy_reg_eh_region_note_forward (rtx note_or_insn, rtx_insn *first, rtx last) 1748 { 1749 rtx_insn *insn; 1750 rtx note = note_or_insn; 1751 1752 if (INSN_P (note_or_insn)) 1753 { 1754 note = find_reg_note (note_or_insn, REG_EH_REGION, NULL_RTX); 1755 if (note == NULL) 1756 return; 1757 } 1758 note = XEXP (note, 0); 1759 1760 for (insn = first; insn != last ; insn = NEXT_INSN (insn)) 1761 if (!find_reg_note (insn, REG_EH_REGION, NULL_RTX) 1762 && insn_could_throw_p (insn)) 1763 add_reg_note (insn, REG_EH_REGION, note); 1764 } 1765 1766 /* Likewise, but iterate backward. */ 1767 1768 void 1769 copy_reg_eh_region_note_backward (rtx note_or_insn, rtx_insn *last, rtx first) 1770 { 1771 rtx_insn *insn; 1772 rtx note = note_or_insn; 1773 1774 if (INSN_P (note_or_insn)) 1775 { 1776 note = find_reg_note (note_or_insn, REG_EH_REGION, NULL_RTX); 1777 if (note == NULL) 1778 return; 1779 } 1780 note = XEXP (note, 0); 1781 1782 for (insn = last; insn != first; insn = PREV_INSN (insn)) 1783 if (insn_could_throw_p (insn)) 1784 add_reg_note (insn, REG_EH_REGION, note); 1785 } 1786 1787 1788 /* Extract all EH information from INSN. Return true if the insn 1789 was marked NOTHROW. */ 1790 1791 static bool 1792 get_eh_region_and_lp_from_rtx (const_rtx insn, eh_region *pr, 1793 eh_landing_pad *plp) 1794 { 1795 eh_landing_pad lp = NULL; 1796 eh_region r = NULL; 1797 bool ret = false; 1798 rtx note; 1799 int lp_nr; 1800 1801 if (! INSN_P (insn)) 1802 goto egress; 1803 1804 if (NONJUMP_INSN_P (insn) 1805 && GET_CODE (PATTERN (insn)) == SEQUENCE) 1806 insn = XVECEXP (PATTERN (insn), 0, 0); 1807 1808 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); 1809 if (!note) 1810 { 1811 ret = !insn_could_throw_p (insn); 1812 goto egress; 1813 } 1814 1815 lp_nr = INTVAL (XEXP (note, 0)); 1816 if (lp_nr == 0 || lp_nr == INT_MIN) 1817 { 1818 ret = true; 1819 goto egress; 1820 } 1821 1822 if (lp_nr < 0) 1823 r = (*cfun->eh->region_array)[-lp_nr]; 1824 else 1825 { 1826 lp = (*cfun->eh->lp_array)[lp_nr]; 1827 r = lp->region; 1828 } 1829 1830 egress: 1831 *plp = lp; 1832 *pr = r; 1833 return ret; 1834 } 1835 1836 /* Return the landing pad to which INSN may go, or NULL if it does not 1837 have a reachable landing pad within this function. */ 1838 1839 eh_landing_pad 1840 get_eh_landing_pad_from_rtx (const_rtx insn) 1841 { 1842 eh_landing_pad lp; 1843 eh_region r; 1844 1845 get_eh_region_and_lp_from_rtx (insn, &r, &lp); 1846 return lp; 1847 } 1848 1849 /* Return the region to which INSN may go, or NULL if it does not 1850 have a reachable region within this function. */ 1851 1852 eh_region 1853 get_eh_region_from_rtx (const_rtx insn) 1854 { 1855 eh_landing_pad lp; 1856 eh_region r; 1857 1858 get_eh_region_and_lp_from_rtx (insn, &r, &lp); 1859 return r; 1860 } 1861 1862 /* Return true if INSN throws and is caught by something in this function. */ 1863 1864 bool 1865 can_throw_internal (const_rtx insn) 1866 { 1867 return get_eh_landing_pad_from_rtx (insn) != NULL; 1868 } 1869 1870 /* Return true if INSN throws and escapes from the current function. */ 1871 1872 bool 1873 can_throw_external (const_rtx insn) 1874 { 1875 eh_landing_pad lp; 1876 eh_region r; 1877 bool nothrow; 1878 1879 if (! INSN_P (insn)) 1880 return false; 1881 1882 if (NONJUMP_INSN_P (insn) 1883 && GET_CODE (PATTERN (insn)) == SEQUENCE) 1884 { 1885 rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn)); 1886 int i, n = seq->len (); 1887 1888 for (i = 0; i < n; i++) 1889 if (can_throw_external (seq->element (i))) 1890 return true; 1891 1892 return false; 1893 } 1894 1895 nothrow = get_eh_region_and_lp_from_rtx (insn, &r, &lp); 1896 1897 /* If we can't throw, we obviously can't throw external. */ 1898 if (nothrow) 1899 return false; 1900 1901 /* If we have an internal landing pad, then we're not external. */ 1902 if (lp != NULL) 1903 return false; 1904 1905 /* If we're not within an EH region, then we are external. */ 1906 if (r == NULL) 1907 return true; 1908 1909 /* The only thing that ought to be left is MUST_NOT_THROW regions, 1910 which don't always have landing pads. */ 1911 gcc_assert (r->type == ERT_MUST_NOT_THROW); 1912 return false; 1913 } 1914 1915 /* Return true if INSN cannot throw at all. */ 1916 1917 bool 1918 insn_nothrow_p (const_rtx insn) 1919 { 1920 eh_landing_pad lp; 1921 eh_region r; 1922 1923 if (! INSN_P (insn)) 1924 return true; 1925 1926 if (NONJUMP_INSN_P (insn) 1927 && GET_CODE (PATTERN (insn)) == SEQUENCE) 1928 { 1929 rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn)); 1930 int i, n = seq->len (); 1931 1932 for (i = 0; i < n; i++) 1933 if (!insn_nothrow_p (seq->element (i))) 1934 return false; 1935 1936 return true; 1937 } 1938 1939 return get_eh_region_and_lp_from_rtx (insn, &r, &lp); 1940 } 1941 1942 /* Return true if INSN can perform a non-local goto. */ 1943 /* ??? This test is here in this file because it (ab)uses REG_EH_REGION. */ 1944 1945 bool 1946 can_nonlocal_goto (const rtx_insn *insn) 1947 { 1948 if (nonlocal_goto_handler_labels && CALL_P (insn)) 1949 { 1950 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); 1951 if (!note || INTVAL (XEXP (note, 0)) != INT_MIN) 1952 return true; 1953 } 1954 return false; 1955 } 1956 1957 /* Set TREE_NOTHROW and crtl->all_throwers_are_sibcalls. */ 1958 1959 static unsigned int 1960 set_nothrow_function_flags (void) 1961 { 1962 rtx_insn *insn; 1963 1964 crtl->nothrow = 1; 1965 1966 /* Assume crtl->all_throwers_are_sibcalls until we encounter 1967 something that can throw an exception. We specifically exempt 1968 CALL_INSNs that are SIBLING_CALL_P, as these are really jumps, 1969 and can't throw. Most CALL_INSNs are not SIBLING_CALL_P, so this 1970 is optimistic. */ 1971 1972 crtl->all_throwers_are_sibcalls = 1; 1973 1974 /* If we don't know that this implementation of the function will 1975 actually be used, then we must not set TREE_NOTHROW, since 1976 callers must not assume that this function does not throw. */ 1977 if (TREE_NOTHROW (current_function_decl)) 1978 return 0; 1979 1980 if (! flag_exceptions) 1981 return 0; 1982 1983 for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) 1984 if (can_throw_external (insn)) 1985 { 1986 crtl->nothrow = 0; 1987 1988 if (!CALL_P (insn) || !SIBLING_CALL_P (insn)) 1989 { 1990 crtl->all_throwers_are_sibcalls = 0; 1991 return 0; 1992 } 1993 } 1994 1995 if (crtl->nothrow 1996 && (cgraph_node::get (current_function_decl)->get_availability () 1997 >= AVAIL_AVAILABLE)) 1998 { 1999 struct cgraph_node *node = cgraph_node::get (current_function_decl); 2000 struct cgraph_edge *e; 2001 for (e = node->callers; e; e = e->next_caller) 2002 e->can_throw_external = false; 2003 node->set_nothrow_flag (true); 2004 2005 if (dump_file) 2006 fprintf (dump_file, "Marking function nothrow: %s\n\n", 2007 current_function_name ()); 2008 } 2009 return 0; 2010 } 2011 2012 namespace { 2013 2014 const pass_data pass_data_set_nothrow_function_flags = 2015 { 2016 RTL_PASS, /* type */ 2017 "nothrow", /* name */ 2018 OPTGROUP_NONE, /* optinfo_flags */ 2019 TV_NONE, /* tv_id */ 2020 0, /* properties_required */ 2021 0, /* properties_provided */ 2022 0, /* properties_destroyed */ 2023 0, /* todo_flags_start */ 2024 0, /* todo_flags_finish */ 2025 }; 2026 2027 class pass_set_nothrow_function_flags : public rtl_opt_pass 2028 { 2029 public: 2030 pass_set_nothrow_function_flags (gcc::context *ctxt) 2031 : rtl_opt_pass (pass_data_set_nothrow_function_flags, ctxt) 2032 {} 2033 2034 /* opt_pass methods: */ 2035 virtual unsigned int execute (function *) 2036 { 2037 return set_nothrow_function_flags (); 2038 } 2039 2040 }; // class pass_set_nothrow_function_flags 2041 2042 } // anon namespace 2043 2044 rtl_opt_pass * 2045 make_pass_set_nothrow_function_flags (gcc::context *ctxt) 2046 { 2047 return new pass_set_nothrow_function_flags (ctxt); 2048 } 2049 2050 2051 /* Various hooks for unwind library. */ 2052 2053 /* Expand the EH support builtin functions: 2054 __builtin_eh_pointer and __builtin_eh_filter. */ 2055 2056 static eh_region 2057 expand_builtin_eh_common (tree region_nr_t) 2058 { 2059 HOST_WIDE_INT region_nr; 2060 eh_region region; 2061 2062 gcc_assert (tree_fits_shwi_p (region_nr_t)); 2063 region_nr = tree_to_shwi (region_nr_t); 2064 2065 region = (*cfun->eh->region_array)[region_nr]; 2066 2067 /* ??? We shouldn't have been able to delete a eh region without 2068 deleting all the code that depended on it. */ 2069 gcc_assert (region != NULL); 2070 2071 return region; 2072 } 2073 2074 /* Expand to the exc_ptr value from the given eh region. */ 2075 2076 rtx 2077 expand_builtin_eh_pointer (tree exp) 2078 { 2079 eh_region region 2080 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0)); 2081 if (region->exc_ptr_reg == NULL) 2082 region->exc_ptr_reg = gen_reg_rtx (ptr_mode); 2083 return region->exc_ptr_reg; 2084 } 2085 2086 /* Expand to the filter value from the given eh region. */ 2087 2088 rtx 2089 expand_builtin_eh_filter (tree exp) 2090 { 2091 eh_region region 2092 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0)); 2093 if (region->filter_reg == NULL) 2094 region->filter_reg = gen_reg_rtx (targetm.eh_return_filter_mode ()); 2095 return region->filter_reg; 2096 } 2097 2098 /* Copy the exc_ptr and filter values from one landing pad's registers 2099 to another. This is used to inline the resx statement. */ 2100 2101 rtx 2102 expand_builtin_eh_copy_values (tree exp) 2103 { 2104 eh_region dst 2105 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0)); 2106 eh_region src 2107 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 1)); 2108 scalar_int_mode fmode = targetm.eh_return_filter_mode (); 2109 2110 if (dst->exc_ptr_reg == NULL) 2111 dst->exc_ptr_reg = gen_reg_rtx (ptr_mode); 2112 if (src->exc_ptr_reg == NULL) 2113 src->exc_ptr_reg = gen_reg_rtx (ptr_mode); 2114 2115 if (dst->filter_reg == NULL) 2116 dst->filter_reg = gen_reg_rtx (fmode); 2117 if (src->filter_reg == NULL) 2118 src->filter_reg = gen_reg_rtx (fmode); 2119 2120 emit_move_insn (dst->exc_ptr_reg, src->exc_ptr_reg); 2121 emit_move_insn (dst->filter_reg, src->filter_reg); 2122 2123 return const0_rtx; 2124 } 2125 2126 /* Do any necessary initialization to access arbitrary stack frames. 2127 On the SPARC, this means flushing the register windows. */ 2128 2129 void 2130 expand_builtin_unwind_init (void) 2131 { 2132 /* Set this so all the registers get saved in our frame; we need to be 2133 able to copy the saved values for any registers from frames we unwind. */ 2134 crtl->saves_all_registers = 1; 2135 2136 SETUP_FRAME_ADDRESSES (); 2137 } 2138 2139 /* Map a non-negative number to an eh return data register number; expands 2140 to -1 if no return data register is associated with the input number. 2141 At least the inputs 0 and 1 must be mapped; the target may provide more. */ 2142 2143 rtx 2144 expand_builtin_eh_return_data_regno (tree exp) 2145 { 2146 tree which = CALL_EXPR_ARG (exp, 0); 2147 unsigned HOST_WIDE_INT iwhich; 2148 2149 if (TREE_CODE (which) != INTEGER_CST) 2150 { 2151 error ("argument of %<__builtin_eh_return_regno%> must be constant"); 2152 return constm1_rtx; 2153 } 2154 2155 iwhich = tree_to_uhwi (which); 2156 iwhich = EH_RETURN_DATA_REGNO (iwhich); 2157 if (iwhich == INVALID_REGNUM) 2158 return constm1_rtx; 2159 2160 #ifdef DWARF_FRAME_REGNUM 2161 iwhich = DWARF_FRAME_REGNUM (iwhich); 2162 #else 2163 iwhich = DBX_REGISTER_NUMBER (iwhich); 2164 #endif 2165 2166 return GEN_INT (iwhich); 2167 } 2168 2169 /* Given a value extracted from the return address register or stack slot, 2170 return the actual address encoded in that value. */ 2171 2172 rtx 2173 expand_builtin_extract_return_addr (tree addr_tree) 2174 { 2175 rtx addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL); 2176 2177 if (GET_MODE (addr) != Pmode 2178 && GET_MODE (addr) != VOIDmode) 2179 { 2180 #ifdef POINTERS_EXTEND_UNSIGNED 2181 addr = convert_memory_address (Pmode, addr); 2182 #else 2183 addr = convert_to_mode (Pmode, addr, 0); 2184 #endif 2185 } 2186 2187 /* First mask out any unwanted bits. */ 2188 rtx mask = MASK_RETURN_ADDR; 2189 if (mask) 2190 expand_and (Pmode, addr, mask, addr); 2191 2192 /* Then adjust to find the real return address. */ 2193 if (RETURN_ADDR_OFFSET) 2194 addr = plus_constant (Pmode, addr, RETURN_ADDR_OFFSET); 2195 2196 return addr; 2197 } 2198 2199 /* Given an actual address in addr_tree, do any necessary encoding 2200 and return the value to be stored in the return address register or 2201 stack slot so the epilogue will return to that address. */ 2202 2203 rtx 2204 expand_builtin_frob_return_addr (tree addr_tree) 2205 { 2206 rtx addr = expand_expr (addr_tree, NULL_RTX, ptr_mode, EXPAND_NORMAL); 2207 2208 addr = convert_memory_address (Pmode, addr); 2209 2210 if (RETURN_ADDR_OFFSET) 2211 { 2212 addr = force_reg (Pmode, addr); 2213 addr = plus_constant (Pmode, addr, -RETURN_ADDR_OFFSET); 2214 } 2215 2216 return addr; 2217 } 2218 2219 /* Set up the epilogue with the magic bits we'll need to return to the 2220 exception handler. */ 2221 2222 void 2223 expand_builtin_eh_return (tree stackadj_tree ATTRIBUTE_UNUSED, 2224 tree handler_tree) 2225 { 2226 rtx tmp; 2227 2228 #ifdef EH_RETURN_STACKADJ_RTX 2229 tmp = expand_expr (stackadj_tree, crtl->eh.ehr_stackadj, 2230 VOIDmode, EXPAND_NORMAL); 2231 tmp = convert_memory_address (Pmode, tmp); 2232 if (!crtl->eh.ehr_stackadj) 2233 crtl->eh.ehr_stackadj = copy_addr_to_reg (tmp); 2234 else if (tmp != crtl->eh.ehr_stackadj) 2235 emit_move_insn (crtl->eh.ehr_stackadj, tmp); 2236 #endif 2237 2238 tmp = expand_expr (handler_tree, crtl->eh.ehr_handler, 2239 VOIDmode, EXPAND_NORMAL); 2240 tmp = convert_memory_address (Pmode, tmp); 2241 if (!crtl->eh.ehr_handler) 2242 crtl->eh.ehr_handler = copy_addr_to_reg (tmp); 2243 else if (tmp != crtl->eh.ehr_handler) 2244 emit_move_insn (crtl->eh.ehr_handler, tmp); 2245 2246 if (!crtl->eh.ehr_label) 2247 crtl->eh.ehr_label = gen_label_rtx (); 2248 emit_jump (crtl->eh.ehr_label); 2249 } 2250 2251 /* Expand __builtin_eh_return. This exit path from the function loads up 2252 the eh return data registers, adjusts the stack, and branches to a 2253 given PC other than the normal return address. */ 2254 2255 void 2256 expand_eh_return (void) 2257 { 2258 rtx_code_label *around_label; 2259 2260 if (! crtl->eh.ehr_label) 2261 return; 2262 2263 crtl->calls_eh_return = 1; 2264 2265 #ifdef EH_RETURN_STACKADJ_RTX 2266 emit_move_insn (EH_RETURN_STACKADJ_RTX, const0_rtx); 2267 #endif 2268 2269 around_label = gen_label_rtx (); 2270 emit_jump (around_label); 2271 2272 emit_label (crtl->eh.ehr_label); 2273 clobber_return_register (); 2274 2275 #ifdef EH_RETURN_STACKADJ_RTX 2276 emit_move_insn (EH_RETURN_STACKADJ_RTX, crtl->eh.ehr_stackadj); 2277 #endif 2278 2279 if (targetm.have_eh_return ()) 2280 emit_insn (targetm.gen_eh_return (crtl->eh.ehr_handler)); 2281 else 2282 { 2283 if (rtx handler = EH_RETURN_HANDLER_RTX) 2284 emit_move_insn (handler, crtl->eh.ehr_handler); 2285 else 2286 error ("__builtin_eh_return not supported on this target"); 2287 } 2288 2289 emit_label (around_label); 2290 } 2291 2292 /* Convert a ptr_mode address ADDR_TREE to a Pmode address controlled by 2293 POINTERS_EXTEND_UNSIGNED and return it. */ 2294 2295 rtx 2296 expand_builtin_extend_pointer (tree addr_tree) 2297 { 2298 rtx addr = expand_expr (addr_tree, NULL_RTX, ptr_mode, EXPAND_NORMAL); 2299 int extend; 2300 2301 #ifdef POINTERS_EXTEND_UNSIGNED 2302 extend = POINTERS_EXTEND_UNSIGNED; 2303 #else 2304 /* The previous EH code did an unsigned extend by default, so we do this also 2305 for consistency. */ 2306 extend = 1; 2307 #endif 2308 2309 return convert_modes (targetm.unwind_word_mode (), ptr_mode, addr, extend); 2310 } 2311 2312 static int 2313 add_action_record (action_hash_type *ar_hash, int filter, int next) 2314 { 2315 struct action_record **slot, *new_ar, tmp; 2316 2317 tmp.filter = filter; 2318 tmp.next = next; 2319 slot = ar_hash->find_slot (&tmp, INSERT); 2320 2321 if ((new_ar = *slot) == NULL) 2322 { 2323 new_ar = XNEW (struct action_record); 2324 new_ar->offset = crtl->eh.action_record_data->length () + 1; 2325 new_ar->filter = filter; 2326 new_ar->next = next; 2327 *slot = new_ar; 2328 2329 /* The filter value goes in untouched. The link to the next 2330 record is a "self-relative" byte offset, or zero to indicate 2331 that there is no next record. So convert the absolute 1 based 2332 indices we've been carrying around into a displacement. */ 2333 2334 push_sleb128 (&crtl->eh.action_record_data, filter); 2335 if (next) 2336 next -= crtl->eh.action_record_data->length () + 1; 2337 push_sleb128 (&crtl->eh.action_record_data, next); 2338 } 2339 2340 return new_ar->offset; 2341 } 2342 2343 static int 2344 collect_one_action_chain (action_hash_type *ar_hash, eh_region region) 2345 { 2346 int next; 2347 2348 /* If we've reached the top of the region chain, then we have 2349 no actions, and require no landing pad. */ 2350 if (region == NULL) 2351 return -1; 2352 2353 switch (region->type) 2354 { 2355 case ERT_CLEANUP: 2356 { 2357 eh_region r; 2358 /* A cleanup adds a zero filter to the beginning of the chain, but 2359 there are special cases to look out for. If there are *only* 2360 cleanups along a path, then it compresses to a zero action. 2361 Further, if there are multiple cleanups along a path, we only 2362 need to represent one of them, as that is enough to trigger 2363 entry to the landing pad at runtime. */ 2364 next = collect_one_action_chain (ar_hash, region->outer); 2365 if (next <= 0) 2366 return 0; 2367 for (r = region->outer; r ; r = r->outer) 2368 if (r->type == ERT_CLEANUP) 2369 return next; 2370 return add_action_record (ar_hash, 0, next); 2371 } 2372 2373 case ERT_TRY: 2374 { 2375 eh_catch c; 2376 2377 /* Process the associated catch regions in reverse order. 2378 If there's a catch-all handler, then we don't need to 2379 search outer regions. Use a magic -3 value to record 2380 that we haven't done the outer search. */ 2381 next = -3; 2382 for (c = region->u.eh_try.last_catch; c ; c = c->prev_catch) 2383 { 2384 if (c->type_list == NULL) 2385 { 2386 /* Retrieve the filter from the head of the filter list 2387 where we have stored it (see assign_filter_values). */ 2388 int filter = TREE_INT_CST_LOW (TREE_VALUE (c->filter_list)); 2389 next = add_action_record (ar_hash, filter, 0); 2390 } 2391 else 2392 { 2393 /* Once the outer search is done, trigger an action record for 2394 each filter we have. */ 2395 tree flt_node; 2396 2397 if (next == -3) 2398 { 2399 next = collect_one_action_chain (ar_hash, region->outer); 2400 2401 /* If there is no next action, terminate the chain. */ 2402 if (next == -1) 2403 next = 0; 2404 /* If all outer actions are cleanups or must_not_throw, 2405 we'll have no action record for it, since we had wanted 2406 to encode these states in the call-site record directly. 2407 Add a cleanup action to the chain to catch these. */ 2408 else if (next <= 0) 2409 next = add_action_record (ar_hash, 0, 0); 2410 } 2411 2412 flt_node = c->filter_list; 2413 for (; flt_node; flt_node = TREE_CHAIN (flt_node)) 2414 { 2415 int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node)); 2416 next = add_action_record (ar_hash, filter, next); 2417 } 2418 } 2419 } 2420 return next; 2421 } 2422 2423 case ERT_ALLOWED_EXCEPTIONS: 2424 /* An exception specification adds its filter to the 2425 beginning of the chain. */ 2426 next = collect_one_action_chain (ar_hash, region->outer); 2427 2428 /* If there is no next action, terminate the chain. */ 2429 if (next == -1) 2430 next = 0; 2431 /* If all outer actions are cleanups or must_not_throw, 2432 we'll have no action record for it, since we had wanted 2433 to encode these states in the call-site record directly. 2434 Add a cleanup action to the chain to catch these. */ 2435 else if (next <= 0) 2436 next = add_action_record (ar_hash, 0, 0); 2437 2438 return add_action_record (ar_hash, region->u.allowed.filter, next); 2439 2440 case ERT_MUST_NOT_THROW: 2441 /* A must-not-throw region with no inner handlers or cleanups 2442 requires no call-site entry. Note that this differs from 2443 the no handler or cleanup case in that we do require an lsda 2444 to be generated. Return a magic -2 value to record this. */ 2445 return -2; 2446 } 2447 2448 gcc_unreachable (); 2449 } 2450 2451 static int 2452 add_call_site (rtx landing_pad, int action, int section) 2453 { 2454 call_site_record record; 2455 2456 record = ggc_alloc<call_site_record_d> (); 2457 record->landing_pad = landing_pad; 2458 record->action = action; 2459 2460 vec_safe_push (crtl->eh.call_site_record_v[section], record); 2461 2462 return call_site_base + crtl->eh.call_site_record_v[section]->length () - 1; 2463 } 2464 2465 static rtx_note * 2466 emit_note_eh_region_end (rtx_insn *insn) 2467 { 2468 return emit_note_after (NOTE_INSN_EH_REGION_END, insn); 2469 } 2470 2471 /* Add NOP after NOTE_INSN_SWITCH_TEXT_SECTIONS when the cold section starts 2472 with landing pad. 2473 With landing pad being at offset 0 from the start label of the section 2474 we would miss EH delivery because 0 is special and means no landing pad. */ 2475 2476 static bool 2477 maybe_add_nop_after_section_switch (void) 2478 { 2479 if (!crtl->uses_eh_lsda 2480 || !crtl->eh.call_site_record_v[1]) 2481 return false; 2482 int n = vec_safe_length (crtl->eh.call_site_record_v[1]); 2483 hash_set<rtx_insn *> visited; 2484 2485 for (int i = 0; i < n; ++i) 2486 { 2487 struct call_site_record_d *cs 2488 = (*crtl->eh.call_site_record_v[1])[i]; 2489 if (cs->landing_pad) 2490 { 2491 rtx_insn *insn = as_a <rtx_insn *> (cs->landing_pad); 2492 while (true) 2493 { 2494 /* Landing pads have LABEL_PRESERVE_P flag set. This check make 2495 sure that we do not walk past landing pad visited earlier 2496 which would result in possible quadratic behaviour. */ 2497 if (LABEL_P (insn) && LABEL_PRESERVE_P (insn) 2498 && visited.add (insn)) 2499 break; 2500 2501 /* Conservatively assume that ASM insn may be empty. We have 2502 now way to tell what they contain. */ 2503 if (active_insn_p (insn) 2504 && GET_CODE (PATTERN (insn)) != ASM_INPUT 2505 && GET_CODE (PATTERN (insn)) != ASM_OPERANDS) 2506 break; 2507 2508 /* If we reached the start of hot section, then NOP will be 2509 needed. */ 2510 if (GET_CODE (insn) == NOTE 2511 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS) 2512 { 2513 emit_insn_after (gen_nop (), insn); 2514 break; 2515 } 2516 2517 /* We visit only labels from cold section. We should never hit 2518 begining of the insn stream here. */ 2519 insn = PREV_INSN (insn); 2520 } 2521 } 2522 } 2523 return false; 2524 } 2525 2526 /* Turn REG_EH_REGION notes back into NOTE_INSN_EH_REGION notes. 2527 The new note numbers will not refer to region numbers, but 2528 instead to call site entries. */ 2529 2530 static unsigned int 2531 convert_to_eh_region_ranges (void) 2532 { 2533 rtx insn; 2534 rtx_insn *iter; 2535 rtx_note *note; 2536 action_hash_type ar_hash (31); 2537 int last_action = -3; 2538 rtx_insn *last_action_insn = NULL; 2539 rtx last_landing_pad = NULL_RTX; 2540 rtx_insn *first_no_action_insn = NULL; 2541 int call_site = 0; 2542 int cur_sec = 0; 2543 rtx_insn *section_switch_note = NULL; 2544 rtx_insn *first_no_action_insn_before_switch = NULL; 2545 rtx_insn *last_no_action_insn_before_switch = NULL; 2546 int saved_call_site_base = call_site_base; 2547 2548 vec_alloc (crtl->eh.action_record_data, 64); 2549 2550 for (iter = get_insns (); iter ; iter = NEXT_INSN (iter)) 2551 if (INSN_P (iter)) 2552 { 2553 eh_landing_pad lp; 2554 eh_region region; 2555 bool nothrow; 2556 int this_action; 2557 rtx_code_label *this_landing_pad; 2558 2559 insn = iter; 2560 if (NONJUMP_INSN_P (insn) 2561 && GET_CODE (PATTERN (insn)) == SEQUENCE) 2562 insn = XVECEXP (PATTERN (insn), 0, 0); 2563 2564 nothrow = get_eh_region_and_lp_from_rtx (insn, ®ion, &lp); 2565 if (nothrow) 2566 continue; 2567 if (region) 2568 this_action = collect_one_action_chain (&ar_hash, region); 2569 else 2570 this_action = -1; 2571 2572 /* Existence of catch handlers, or must-not-throw regions 2573 implies that an lsda is needed (even if empty). */ 2574 if (this_action != -1) 2575 crtl->uses_eh_lsda = 1; 2576 2577 /* Delay creation of region notes for no-action regions 2578 until we're sure that an lsda will be required. */ 2579 else if (last_action == -3) 2580 { 2581 first_no_action_insn = iter; 2582 last_action = -1; 2583 } 2584 2585 if (this_action >= 0) 2586 this_landing_pad = lp->landing_pad; 2587 else 2588 this_landing_pad = NULL; 2589 2590 /* Differing actions or landing pads implies a change in call-site 2591 info, which implies some EH_REGION note should be emitted. */ 2592 if (last_action != this_action 2593 || last_landing_pad != this_landing_pad) 2594 { 2595 /* If there is a queued no-action region in the other section 2596 with hot/cold partitioning, emit it now. */ 2597 if (first_no_action_insn_before_switch) 2598 { 2599 gcc_assert (this_action != -1 2600 && last_action == (first_no_action_insn 2601 ? -1 : -3)); 2602 call_site = add_call_site (NULL_RTX, 0, 0); 2603 note = emit_note_before (NOTE_INSN_EH_REGION_BEG, 2604 first_no_action_insn_before_switch); 2605 NOTE_EH_HANDLER (note) = call_site; 2606 note 2607 = emit_note_eh_region_end (last_no_action_insn_before_switch); 2608 NOTE_EH_HANDLER (note) = call_site; 2609 gcc_assert (last_action != -3 2610 || (last_action_insn 2611 == last_no_action_insn_before_switch)); 2612 first_no_action_insn_before_switch = NULL; 2613 last_no_action_insn_before_switch = NULL; 2614 call_site_base++; 2615 } 2616 /* If we'd not seen a previous action (-3) or the previous 2617 action was must-not-throw (-2), then we do not need an 2618 end note. */ 2619 if (last_action >= -1) 2620 { 2621 /* If we delayed the creation of the begin, do it now. */ 2622 if (first_no_action_insn) 2623 { 2624 call_site = add_call_site (NULL_RTX, 0, cur_sec); 2625 note = emit_note_before (NOTE_INSN_EH_REGION_BEG, 2626 first_no_action_insn); 2627 NOTE_EH_HANDLER (note) = call_site; 2628 first_no_action_insn = NULL; 2629 } 2630 2631 note = emit_note_eh_region_end (last_action_insn); 2632 NOTE_EH_HANDLER (note) = call_site; 2633 } 2634 2635 /* If the new action is must-not-throw, then no region notes 2636 are created. */ 2637 if (this_action >= -1) 2638 { 2639 call_site = add_call_site (this_landing_pad, 2640 this_action < 0 ? 0 : this_action, 2641 cur_sec); 2642 note = emit_note_before (NOTE_INSN_EH_REGION_BEG, iter); 2643 NOTE_EH_HANDLER (note) = call_site; 2644 } 2645 2646 last_action = this_action; 2647 last_landing_pad = this_landing_pad; 2648 } 2649 last_action_insn = iter; 2650 } 2651 else if (NOTE_P (iter) 2652 && NOTE_KIND (iter) == NOTE_INSN_SWITCH_TEXT_SECTIONS) 2653 { 2654 gcc_assert (section_switch_note == NULL_RTX); 2655 gcc_assert (flag_reorder_blocks_and_partition); 2656 section_switch_note = iter; 2657 if (first_no_action_insn) 2658 { 2659 first_no_action_insn_before_switch = first_no_action_insn; 2660 last_no_action_insn_before_switch = last_action_insn; 2661 first_no_action_insn = NULL; 2662 gcc_assert (last_action == -1); 2663 last_action = -3; 2664 } 2665 /* Force closing of current EH region before section switch and 2666 opening a new one afterwards. */ 2667 else if (last_action != -3) 2668 last_landing_pad = pc_rtx; 2669 if (crtl->eh.call_site_record_v[cur_sec]) 2670 call_site_base += crtl->eh.call_site_record_v[cur_sec]->length (); 2671 cur_sec++; 2672 gcc_assert (crtl->eh.call_site_record_v[cur_sec] == NULL); 2673 vec_alloc (crtl->eh.call_site_record_v[cur_sec], 10); 2674 } 2675 2676 if (last_action >= -1 && ! first_no_action_insn) 2677 { 2678 note = emit_note_eh_region_end (last_action_insn); 2679 NOTE_EH_HANDLER (note) = call_site; 2680 } 2681 2682 call_site_base = saved_call_site_base; 2683 2684 return 0; 2685 } 2686 2687 namespace { 2688 2689 const pass_data pass_data_convert_to_eh_region_ranges = 2690 { 2691 RTL_PASS, /* type */ 2692 "eh_ranges", /* name */ 2693 OPTGROUP_NONE, /* optinfo_flags */ 2694 TV_NONE, /* tv_id */ 2695 0, /* properties_required */ 2696 0, /* properties_provided */ 2697 0, /* properties_destroyed */ 2698 0, /* todo_flags_start */ 2699 0, /* todo_flags_finish */ 2700 }; 2701 2702 class pass_convert_to_eh_region_ranges : public rtl_opt_pass 2703 { 2704 public: 2705 pass_convert_to_eh_region_ranges (gcc::context *ctxt) 2706 : rtl_opt_pass (pass_data_convert_to_eh_region_ranges, ctxt) 2707 {} 2708 2709 /* opt_pass methods: */ 2710 virtual bool gate (function *); 2711 virtual unsigned int execute (function *) 2712 { 2713 int ret = convert_to_eh_region_ranges (); 2714 maybe_add_nop_after_section_switch (); 2715 return ret; 2716 } 2717 2718 }; // class pass_convert_to_eh_region_ranges 2719 2720 bool 2721 pass_convert_to_eh_region_ranges::gate (function *) 2722 { 2723 /* Nothing to do for SJLJ exceptions or if no regions created. */ 2724 if (cfun->eh->region_tree == NULL) 2725 return false; 2726 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) 2727 return false; 2728 return true; 2729 } 2730 2731 } // anon namespace 2732 2733 rtl_opt_pass * 2734 make_pass_convert_to_eh_region_ranges (gcc::context *ctxt) 2735 { 2736 return new pass_convert_to_eh_region_ranges (ctxt); 2737 } 2738 2739 static void 2740 push_uleb128 (vec<uchar, va_gc> **data_area, unsigned int value) 2741 { 2742 do 2743 { 2744 unsigned char byte = value & 0x7f; 2745 value >>= 7; 2746 if (value) 2747 byte |= 0x80; 2748 vec_safe_push (*data_area, byte); 2749 } 2750 while (value); 2751 } 2752 2753 static void 2754 push_sleb128 (vec<uchar, va_gc> **data_area, int value) 2755 { 2756 unsigned char byte; 2757 int more; 2758 2759 do 2760 { 2761 byte = value & 0x7f; 2762 value >>= 7; 2763 more = ! ((value == 0 && (byte & 0x40) == 0) 2764 || (value == -1 && (byte & 0x40) != 0)); 2765 if (more) 2766 byte |= 0x80; 2767 vec_safe_push (*data_area, byte); 2768 } 2769 while (more); 2770 } 2771 2772 2773 static int 2774 dw2_size_of_call_site_table (int section) 2775 { 2776 int n = vec_safe_length (crtl->eh.call_site_record_v[section]); 2777 int size = n * (4 + 4 + 4); 2778 int i; 2779 2780 for (i = 0; i < n; ++i) 2781 { 2782 struct call_site_record_d *cs = 2783 (*crtl->eh.call_site_record_v[section])[i]; 2784 size += size_of_uleb128 (cs->action); 2785 } 2786 2787 return size; 2788 } 2789 2790 static int 2791 sjlj_size_of_call_site_table (void) 2792 { 2793 int n = vec_safe_length (crtl->eh.call_site_record_v[0]); 2794 int size = 0; 2795 int i; 2796 2797 for (i = 0; i < n; ++i) 2798 { 2799 struct call_site_record_d *cs = 2800 (*crtl->eh.call_site_record_v[0])[i]; 2801 size += size_of_uleb128 (INTVAL (cs->landing_pad)); 2802 size += size_of_uleb128 (cs->action); 2803 } 2804 2805 return size; 2806 } 2807 2808 static void 2809 dw2_output_call_site_table (int cs_format, int section) 2810 { 2811 int n = vec_safe_length (crtl->eh.call_site_record_v[section]); 2812 int i; 2813 const char *begin; 2814 2815 if (section == 0) 2816 begin = current_function_func_begin_label; 2817 else if (first_function_block_is_cold) 2818 begin = crtl->subsections.hot_section_label; 2819 else 2820 begin = crtl->subsections.cold_section_label; 2821 2822 for (i = 0; i < n; ++i) 2823 { 2824 struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[section])[i]; 2825 char reg_start_lab[32]; 2826 char reg_end_lab[32]; 2827 char landing_pad_lab[32]; 2828 2829 ASM_GENERATE_INTERNAL_LABEL (reg_start_lab, "LEHB", call_site_base + i); 2830 ASM_GENERATE_INTERNAL_LABEL (reg_end_lab, "LEHE", call_site_base + i); 2831 2832 if (cs->landing_pad) 2833 ASM_GENERATE_INTERNAL_LABEL (landing_pad_lab, "L", 2834 CODE_LABEL_NUMBER (cs->landing_pad)); 2835 2836 /* ??? Perhaps use insn length scaling if the assembler supports 2837 generic arithmetic. */ 2838 /* ??? Perhaps use attr_length to choose data1 or data2 instead of 2839 data4 if the function is small enough. */ 2840 if (cs_format == DW_EH_PE_uleb128) 2841 { 2842 dw2_asm_output_delta_uleb128 (reg_start_lab, begin, 2843 "region %d start", i); 2844 dw2_asm_output_delta_uleb128 (reg_end_lab, reg_start_lab, 2845 "length"); 2846 if (cs->landing_pad) 2847 dw2_asm_output_delta_uleb128 (landing_pad_lab, begin, 2848 "landing pad"); 2849 else 2850 dw2_asm_output_data_uleb128 (0, "landing pad"); 2851 } 2852 else 2853 { 2854 dw2_asm_output_delta (4, reg_start_lab, begin, 2855 "region %d start", i); 2856 dw2_asm_output_delta (4, reg_end_lab, reg_start_lab, "length"); 2857 if (cs->landing_pad) 2858 dw2_asm_output_delta (4, landing_pad_lab, begin, 2859 "landing pad"); 2860 else 2861 dw2_asm_output_data (4, 0, "landing pad"); 2862 } 2863 dw2_asm_output_data_uleb128 (cs->action, "action"); 2864 } 2865 2866 call_site_base += n; 2867 } 2868 2869 static void 2870 sjlj_output_call_site_table (void) 2871 { 2872 int n = vec_safe_length (crtl->eh.call_site_record_v[0]); 2873 int i; 2874 2875 for (i = 0; i < n; ++i) 2876 { 2877 struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[0])[i]; 2878 2879 dw2_asm_output_data_uleb128 (INTVAL (cs->landing_pad), 2880 "region %d landing pad", i); 2881 dw2_asm_output_data_uleb128 (cs->action, "action"); 2882 } 2883 2884 call_site_base += n; 2885 } 2886 2887 /* Switch to the section that should be used for exception tables. */ 2888 2889 static void 2890 switch_to_exception_section (const char * ARG_UNUSED (fnname)) 2891 { 2892 section *s; 2893 2894 if (exception_section) 2895 s = exception_section; 2896 else 2897 { 2898 int flags; 2899 2900 if (EH_TABLES_CAN_BE_READ_ONLY) 2901 { 2902 int tt_format = 2903 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1); 2904 flags = ((! flag_pic 2905 || ((tt_format & 0x70) != DW_EH_PE_absptr 2906 && (tt_format & 0x70) != DW_EH_PE_aligned)) 2907 ? 0 : SECTION_WRITE); 2908 } 2909 else 2910 flags = SECTION_WRITE; 2911 2912 /* Compute the section and cache it into exception_section, 2913 unless it depends on the function name. */ 2914 if (targetm_common.have_named_sections) 2915 { 2916 #ifdef HAVE_LD_EH_GC_SECTIONS 2917 if (flag_function_sections 2918 || (DECL_COMDAT_GROUP (current_function_decl) && HAVE_COMDAT_GROUP)) 2919 { 2920 char *section_name = XNEWVEC (char, strlen (fnname) + 32); 2921 /* The EH table must match the code section, so only mark 2922 it linkonce if we have COMDAT groups to tie them together. */ 2923 if (DECL_COMDAT_GROUP (current_function_decl) && HAVE_COMDAT_GROUP) 2924 flags |= SECTION_LINKONCE; 2925 sprintf (section_name, ".gcc_except_table.%s", fnname); 2926 s = get_section (section_name, flags, current_function_decl); 2927 free (section_name); 2928 } 2929 else 2930 #endif 2931 exception_section 2932 = s = get_section (".gcc_except_table", flags, NULL); 2933 } 2934 else 2935 exception_section 2936 = s = flags == SECTION_WRITE ? data_section : readonly_data_section; 2937 } 2938 2939 switch_to_section (s); 2940 } 2941 2942 /* Output a reference from an exception table to the type_info object TYPE. 2943 TT_FORMAT and TT_FORMAT_SIZE describe the DWARF encoding method used for 2944 the value. */ 2945 2946 static void 2947 output_ttype (tree type, int tt_format, int tt_format_size) 2948 { 2949 rtx value; 2950 bool is_public = true; 2951 2952 if (type == NULL_TREE) 2953 value = const0_rtx; 2954 else 2955 { 2956 /* FIXME lto. pass_ipa_free_lang_data changes all types to 2957 runtime types so TYPE should already be a runtime type 2958 reference. When pass_ipa_free_lang data is made a default 2959 pass, we can then remove the call to lookup_type_for_runtime 2960 below. */ 2961 if (TYPE_P (type)) 2962 type = lookup_type_for_runtime (type); 2963 2964 value = expand_expr (type, NULL_RTX, VOIDmode, EXPAND_INITIALIZER); 2965 2966 /* Let cgraph know that the rtti decl is used. Not all of the 2967 paths below go through assemble_integer, which would take 2968 care of this for us. */ 2969 STRIP_NOPS (type); 2970 if (TREE_CODE (type) == ADDR_EXPR) 2971 { 2972 type = TREE_OPERAND (type, 0); 2973 if (VAR_P (type)) 2974 is_public = TREE_PUBLIC (type); 2975 } 2976 else 2977 gcc_assert (TREE_CODE (type) == INTEGER_CST); 2978 } 2979 2980 /* Allow the target to override the type table entry format. */ 2981 if (targetm.asm_out.ttype (value)) 2982 return; 2983 2984 if (tt_format == DW_EH_PE_absptr || tt_format == DW_EH_PE_aligned) 2985 assemble_integer (value, tt_format_size, 2986 tt_format_size * BITS_PER_UNIT, 1); 2987 else 2988 dw2_asm_output_encoded_addr_rtx (tt_format, value, is_public, NULL); 2989 } 2990 2991 /* Output an exception table for the current function according to SECTION. 2992 2993 If the function has been partitioned into hot and cold parts, value 0 for 2994 SECTION refers to the table associated with the hot part while value 1 2995 refers to the table associated with the cold part. If the function has 2996 not been partitioned, value 0 refers to the single exception table. */ 2997 2998 static void 2999 output_one_function_exception_table (int section) 3000 { 3001 int tt_format, cs_format, lp_format, i; 3002 char ttype_label[32]; 3003 char cs_after_size_label[32]; 3004 char cs_end_label[32]; 3005 int call_site_len; 3006 int have_tt_data; 3007 int tt_format_size = 0; 3008 3009 have_tt_data = (vec_safe_length (cfun->eh->ttype_data) 3010 || (targetm.arm_eabi_unwinder 3011 ? vec_safe_length (cfun->eh->ehspec_data.arm_eabi) 3012 : vec_safe_length (cfun->eh->ehspec_data.other))); 3013 3014 /* Indicate the format of the @TType entries. */ 3015 if (! have_tt_data) 3016 tt_format = DW_EH_PE_omit; 3017 else 3018 { 3019 tt_format = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1); 3020 if (HAVE_AS_LEB128) 3021 ASM_GENERATE_INTERNAL_LABEL (ttype_label, 3022 section ? "LLSDATTC" : "LLSDATT", 3023 current_function_funcdef_no); 3024 3025 tt_format_size = size_of_encoded_value (tt_format); 3026 3027 assemble_align (tt_format_size * BITS_PER_UNIT); 3028 } 3029 3030 targetm.asm_out.internal_label (asm_out_file, section ? "LLSDAC" : "LLSDA", 3031 current_function_funcdef_no); 3032 3033 /* The LSDA header. */ 3034 3035 /* Indicate the format of the landing pad start pointer. An omitted 3036 field implies @LPStart == @Start. */ 3037 /* Currently we always put @LPStart == @Start. This field would 3038 be most useful in moving the landing pads completely out of 3039 line to another section, but it could also be used to minimize 3040 the size of uleb128 landing pad offsets. */ 3041 lp_format = DW_EH_PE_omit; 3042 dw2_asm_output_data (1, lp_format, "@LPStart format (%s)", 3043 eh_data_format_name (lp_format)); 3044 3045 /* @LPStart pointer would go here. */ 3046 3047 dw2_asm_output_data (1, tt_format, "@TType format (%s)", 3048 eh_data_format_name (tt_format)); 3049 3050 if (!HAVE_AS_LEB128) 3051 { 3052 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) 3053 call_site_len = sjlj_size_of_call_site_table (); 3054 else 3055 call_site_len = dw2_size_of_call_site_table (section); 3056 } 3057 3058 /* A pc-relative 4-byte displacement to the @TType data. */ 3059 if (have_tt_data) 3060 { 3061 if (HAVE_AS_LEB128) 3062 { 3063 char ttype_after_disp_label[32]; 3064 ASM_GENERATE_INTERNAL_LABEL (ttype_after_disp_label, 3065 section ? "LLSDATTDC" : "LLSDATTD", 3066 current_function_funcdef_no); 3067 dw2_asm_output_delta_uleb128 (ttype_label, ttype_after_disp_label, 3068 "@TType base offset"); 3069 ASM_OUTPUT_LABEL (asm_out_file, ttype_after_disp_label); 3070 } 3071 else 3072 { 3073 /* Ug. Alignment queers things. */ 3074 unsigned int before_disp, after_disp, last_disp, disp; 3075 3076 before_disp = 1 + 1; 3077 after_disp = (1 + size_of_uleb128 (call_site_len) 3078 + call_site_len 3079 + vec_safe_length (crtl->eh.action_record_data) 3080 + (vec_safe_length (cfun->eh->ttype_data) 3081 * tt_format_size)); 3082 3083 disp = after_disp; 3084 do 3085 { 3086 unsigned int disp_size, pad; 3087 3088 last_disp = disp; 3089 disp_size = size_of_uleb128 (disp); 3090 pad = before_disp + disp_size + after_disp; 3091 if (pad % tt_format_size) 3092 pad = tt_format_size - (pad % tt_format_size); 3093 else 3094 pad = 0; 3095 disp = after_disp + pad; 3096 } 3097 while (disp != last_disp); 3098 3099 dw2_asm_output_data_uleb128 (disp, "@TType base offset"); 3100 } 3101 } 3102 3103 /* Indicate the format of the call-site offsets. */ 3104 if (HAVE_AS_LEB128) 3105 cs_format = DW_EH_PE_uleb128; 3106 else 3107 cs_format = DW_EH_PE_udata4; 3108 3109 dw2_asm_output_data (1, cs_format, "call-site format (%s)", 3110 eh_data_format_name (cs_format)); 3111 3112 if (HAVE_AS_LEB128) 3113 { 3114 ASM_GENERATE_INTERNAL_LABEL (cs_after_size_label, 3115 section ? "LLSDACSBC" : "LLSDACSB", 3116 current_function_funcdef_no); 3117 ASM_GENERATE_INTERNAL_LABEL (cs_end_label, 3118 section ? "LLSDACSEC" : "LLSDACSE", 3119 current_function_funcdef_no); 3120 dw2_asm_output_delta_uleb128 (cs_end_label, cs_after_size_label, 3121 "Call-site table length"); 3122 ASM_OUTPUT_LABEL (asm_out_file, cs_after_size_label); 3123 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) 3124 sjlj_output_call_site_table (); 3125 else 3126 dw2_output_call_site_table (cs_format, section); 3127 ASM_OUTPUT_LABEL (asm_out_file, cs_end_label); 3128 } 3129 else 3130 { 3131 dw2_asm_output_data_uleb128 (call_site_len, "Call-site table length"); 3132 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) 3133 sjlj_output_call_site_table (); 3134 else 3135 dw2_output_call_site_table (cs_format, section); 3136 } 3137 3138 /* ??? Decode and interpret the data for flag_debug_asm. */ 3139 { 3140 uchar uc; 3141 FOR_EACH_VEC_ELT (*crtl->eh.action_record_data, i, uc) 3142 dw2_asm_output_data (1, uc, i ? NULL : "Action record table"); 3143 } 3144 3145 if (have_tt_data) 3146 assemble_align (tt_format_size * BITS_PER_UNIT); 3147 3148 i = vec_safe_length (cfun->eh->ttype_data); 3149 while (i-- > 0) 3150 { 3151 tree type = (*cfun->eh->ttype_data)[i]; 3152 output_ttype (type, tt_format, tt_format_size); 3153 } 3154 3155 if (HAVE_AS_LEB128 && have_tt_data) 3156 ASM_OUTPUT_LABEL (asm_out_file, ttype_label); 3157 3158 /* ??? Decode and interpret the data for flag_debug_asm. */ 3159 if (targetm.arm_eabi_unwinder) 3160 { 3161 tree type; 3162 for (i = 0; 3163 vec_safe_iterate (cfun->eh->ehspec_data.arm_eabi, i, &type); ++i) 3164 output_ttype (type, tt_format, tt_format_size); 3165 } 3166 else 3167 { 3168 uchar uc; 3169 for (i = 0; 3170 vec_safe_iterate (cfun->eh->ehspec_data.other, i, &uc); ++i) 3171 dw2_asm_output_data (1, uc, 3172 i ? NULL : "Exception specification table"); 3173 } 3174 } 3175 3176 /* Output an exception table for the current function according to SECTION, 3177 switching back and forth from the function section appropriately. 3178 3179 If the function has been partitioned into hot and cold parts, value 0 for 3180 SECTION refers to the table associated with the hot part while value 1 3181 refers to the table associated with the cold part. If the function has 3182 not been partitioned, value 0 refers to the single exception table. */ 3183 3184 void 3185 output_function_exception_table (int section) 3186 { 3187 const char *fnname = get_fnname_from_decl (current_function_decl); 3188 rtx personality = get_personality_function (current_function_decl); 3189 3190 /* Not all functions need anything. */ 3191 if (!crtl->uses_eh_lsda) 3192 return; 3193 3194 /* No need to emit any boilerplate stuff for the cold part. */ 3195 if (section == 1 && !crtl->eh.call_site_record_v[1]) 3196 return; 3197 3198 if (personality) 3199 { 3200 assemble_external_libcall (personality); 3201 3202 if (targetm.asm_out.emit_except_personality) 3203 targetm.asm_out.emit_except_personality (personality); 3204 } 3205 3206 switch_to_exception_section (fnname); 3207 3208 /* If the target wants a label to begin the table, emit it here. */ 3209 targetm.asm_out.emit_except_table_label (asm_out_file); 3210 3211 /* Do the real work. */ 3212 output_one_function_exception_table (section); 3213 3214 switch_to_section (current_function_section ()); 3215 } 3216 3217 void 3218 set_eh_throw_stmt_table (function *fun, hash_map<gimple *, int> *table) 3219 { 3220 fun->eh->throw_stmt_table = table; 3221 } 3222 3223 hash_map<gimple *, int> * 3224 get_eh_throw_stmt_table (struct function *fun) 3225 { 3226 return fun->eh->throw_stmt_table; 3227 } 3228 3229 /* Determine if the function needs an EH personality function. */ 3230 3231 enum eh_personality_kind 3232 function_needs_eh_personality (struct function *fn) 3233 { 3234 enum eh_personality_kind kind = eh_personality_none; 3235 eh_region i; 3236 3237 FOR_ALL_EH_REGION_FN (i, fn) 3238 { 3239 switch (i->type) 3240 { 3241 case ERT_CLEANUP: 3242 /* Can do with any personality including the generic C one. */ 3243 kind = eh_personality_any; 3244 break; 3245 3246 case ERT_TRY: 3247 case ERT_ALLOWED_EXCEPTIONS: 3248 /* Always needs a EH personality function. The generic C 3249 personality doesn't handle these even for empty type lists. */ 3250 return eh_personality_lang; 3251 3252 case ERT_MUST_NOT_THROW: 3253 /* Always needs a EH personality function. The language may specify 3254 what abort routine that must be used, e.g. std::terminate. */ 3255 return eh_personality_lang; 3256 } 3257 } 3258 3259 return kind; 3260 } 3261 3262 /* Dump EH information to OUT. */ 3263 3264 void 3265 dump_eh_tree (FILE * out, struct function *fun) 3266 { 3267 eh_region i; 3268 int depth = 0; 3269 static const char *const type_name[] = { 3270 "cleanup", "try", "allowed_exceptions", "must_not_throw" 3271 }; 3272 3273 i = fun->eh->region_tree; 3274 if (!i) 3275 return; 3276 3277 fprintf (out, "Eh tree:\n"); 3278 while (1) 3279 { 3280 fprintf (out, " %*s %i %s", depth * 2, "", 3281 i->index, type_name[(int) i->type]); 3282 3283 if (i->landing_pads) 3284 { 3285 eh_landing_pad lp; 3286 3287 fprintf (out, " land:"); 3288 if (current_ir_type () == IR_GIMPLE) 3289 { 3290 for (lp = i->landing_pads; lp ; lp = lp->next_lp) 3291 { 3292 fprintf (out, "{%i,", lp->index); 3293 print_generic_expr (out, lp->post_landing_pad); 3294 fputc ('}', out); 3295 if (lp->next_lp) 3296 fputc (',', out); 3297 } 3298 } 3299 else 3300 { 3301 for (lp = i->landing_pads; lp ; lp = lp->next_lp) 3302 { 3303 fprintf (out, "{%i,", lp->index); 3304 if (lp->landing_pad) 3305 fprintf (out, "%i%s,", INSN_UID (lp->landing_pad), 3306 NOTE_P (lp->landing_pad) ? "(del)" : ""); 3307 else 3308 fprintf (out, "(nil),"); 3309 if (lp->post_landing_pad) 3310 { 3311 rtx_insn *lab = label_rtx (lp->post_landing_pad); 3312 fprintf (out, "%i%s}", INSN_UID (lab), 3313 NOTE_P (lab) ? "(del)" : ""); 3314 } 3315 else 3316 fprintf (out, "(nil)}"); 3317 if (lp->next_lp) 3318 fputc (',', out); 3319 } 3320 } 3321 } 3322 3323 switch (i->type) 3324 { 3325 case ERT_CLEANUP: 3326 case ERT_MUST_NOT_THROW: 3327 break; 3328 3329 case ERT_TRY: 3330 { 3331 eh_catch c; 3332 fprintf (out, " catch:"); 3333 for (c = i->u.eh_try.first_catch; c; c = c->next_catch) 3334 { 3335 fputc ('{', out); 3336 if (c->label) 3337 { 3338 fprintf (out, "lab:"); 3339 print_generic_expr (out, c->label); 3340 fputc (';', out); 3341 } 3342 print_generic_expr (out, c->type_list); 3343 fputc ('}', out); 3344 if (c->next_catch) 3345 fputc (',', out); 3346 } 3347 } 3348 break; 3349 3350 case ERT_ALLOWED_EXCEPTIONS: 3351 fprintf (out, " filter :%i types:", i->u.allowed.filter); 3352 print_generic_expr (out, i->u.allowed.type_list); 3353 break; 3354 } 3355 fputc ('\n', out); 3356 3357 /* If there are sub-regions, process them. */ 3358 if (i->inner) 3359 i = i->inner, depth++; 3360 /* If there are peers, process them. */ 3361 else if (i->next_peer) 3362 i = i->next_peer; 3363 /* Otherwise, step back up the tree to the next peer. */ 3364 else 3365 { 3366 do 3367 { 3368 i = i->outer; 3369 depth--; 3370 if (i == NULL) 3371 return; 3372 } 3373 while (i->next_peer == NULL); 3374 i = i->next_peer; 3375 } 3376 } 3377 } 3378 3379 /* Dump the EH tree for FN on stderr. */ 3380 3381 DEBUG_FUNCTION void 3382 debug_eh_tree (struct function *fn) 3383 { 3384 dump_eh_tree (stderr, fn); 3385 } 3386 3387 /* Verify invariants on EH datastructures. */ 3388 3389 DEBUG_FUNCTION void 3390 verify_eh_tree (struct function *fun) 3391 { 3392 eh_region r, outer; 3393 int nvisited_lp, nvisited_r; 3394 int count_lp, count_r, depth, i; 3395 eh_landing_pad lp; 3396 bool err = false; 3397 3398 if (!fun->eh->region_tree) 3399 return; 3400 3401 count_r = 0; 3402 for (i = 1; vec_safe_iterate (fun->eh->region_array, i, &r); ++i) 3403 if (r) 3404 { 3405 if (r->index == i) 3406 count_r++; 3407 else 3408 { 3409 error ("region_array is corrupted for region %i", r->index); 3410 err = true; 3411 } 3412 } 3413 3414 count_lp = 0; 3415 for (i = 1; vec_safe_iterate (fun->eh->lp_array, i, &lp); ++i) 3416 if (lp) 3417 { 3418 if (lp->index == i) 3419 count_lp++; 3420 else 3421 { 3422 error ("lp_array is corrupted for lp %i", lp->index); 3423 err = true; 3424 } 3425 } 3426 3427 depth = nvisited_lp = nvisited_r = 0; 3428 outer = NULL; 3429 r = fun->eh->region_tree; 3430 while (1) 3431 { 3432 if ((*fun->eh->region_array)[r->index] != r) 3433 { 3434 error ("region_array is corrupted for region %i", r->index); 3435 err = true; 3436 } 3437 if (r->outer != outer) 3438 { 3439 error ("outer block of region %i is wrong", r->index); 3440 err = true; 3441 } 3442 if (depth < 0) 3443 { 3444 error ("negative nesting depth of region %i", r->index); 3445 err = true; 3446 } 3447 nvisited_r++; 3448 3449 for (lp = r->landing_pads; lp ; lp = lp->next_lp) 3450 { 3451 if ((*fun->eh->lp_array)[lp->index] != lp) 3452 { 3453 error ("lp_array is corrupted for lp %i", lp->index); 3454 err = true; 3455 } 3456 if (lp->region != r) 3457 { 3458 error ("region of lp %i is wrong", lp->index); 3459 err = true; 3460 } 3461 nvisited_lp++; 3462 } 3463 3464 if (r->inner) 3465 outer = r, r = r->inner, depth++; 3466 else if (r->next_peer) 3467 r = r->next_peer; 3468 else 3469 { 3470 do 3471 { 3472 r = r->outer; 3473 if (r == NULL) 3474 goto region_done; 3475 depth--; 3476 outer = r->outer; 3477 } 3478 while (r->next_peer == NULL); 3479 r = r->next_peer; 3480 } 3481 } 3482 region_done: 3483 if (depth != 0) 3484 { 3485 error ("tree list ends on depth %i", depth); 3486 err = true; 3487 } 3488 if (count_r != nvisited_r) 3489 { 3490 error ("region_array does not match region_tree"); 3491 err = true; 3492 } 3493 if (count_lp != nvisited_lp) 3494 { 3495 error ("lp_array does not match region_tree"); 3496 err = true; 3497 } 3498 3499 if (err) 3500 { 3501 dump_eh_tree (stderr, fun); 3502 internal_error ("verify_eh_tree failed"); 3503 } 3504 } 3505 3506 #include "gt-except.h" 3507