1 /* Implements exception handling.
2 Copyright (C) 1989-2018 Free Software Foundation, Inc.
3 Contributed by Mike Stump <mrs@cygnus.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21
22 /* An exception is an event that can be "thrown" from within a
23 function. This event can then be "caught" by the callers of
24 the function.
25
26 The representation of exceptions changes several times during
27 the compilation process:
28
29 In the beginning, in the front end, we have the GENERIC trees
30 TRY_CATCH_EXPR, TRY_FINALLY_EXPR, WITH_CLEANUP_EXPR,
31 CLEANUP_POINT_EXPR, CATCH_EXPR, and EH_FILTER_EXPR.
32
33 During initial gimplification (gimplify.c) these are lowered
34 to the GIMPLE_TRY, GIMPLE_CATCH, and GIMPLE_EH_FILTER nodes.
35 The WITH_CLEANUP_EXPR and CLEANUP_POINT_EXPR nodes are converted
36 into GIMPLE_TRY_FINALLY nodes; the others are a more direct 1-1
37 conversion.
38
39 During pass_lower_eh (tree-eh.c) we record the nested structure
40 of the TRY nodes in EH_REGION nodes in CFUN->EH->REGION_TREE.
41 We expand the eh_protect_cleanup_actions langhook into MUST_NOT_THROW
42 regions at this time. We can then flatten the statements within
43 the TRY nodes to straight-line code. Statements that had been within
44 TRY nodes that can throw are recorded within CFUN->EH->THROW_STMT_TABLE,
45 so that we may remember what action is supposed to be taken if
46 a given statement does throw. During this lowering process,
47 we create an EH_LANDING_PAD node for each EH_REGION that has
48 some code within the function that needs to be executed if a
49 throw does happen. We also create RESX statements that are
50 used to transfer control from an inner EH_REGION to an outer
51 EH_REGION. We also create EH_DISPATCH statements as placeholders
52 for a runtime type comparison that should be made in order to
53 select the action to perform among different CATCH and EH_FILTER
54 regions.
55
56 During pass_lower_eh_dispatch (tree-eh.c), which is run after
57 all inlining is complete, we are able to run assign_filter_values,
58 which allows us to map the set of types manipulated by all of the
59 CATCH and EH_FILTER regions to a set of integers. This set of integers
60 will be how the exception runtime communicates with the code generated
61 within the function. We then expand the GIMPLE_EH_DISPATCH statements
62 to a switch or conditional branches that use the argument provided by
63 the runtime (__builtin_eh_filter) and the set of integers we computed
64 in assign_filter_values.
65
66 During pass_lower_resx (tree-eh.c), which is run near the end
67 of optimization, we expand RESX statements. If the eh region
68 that is outer to the RESX statement is a MUST_NOT_THROW, then
69 the RESX expands to some form of abort statement. If the eh
70 region that is outer to the RESX statement is within the current
71 function, then the RESX expands to a bookkeeping call
72 (__builtin_eh_copy_values) and a goto. Otherwise, the next
73 handler for the exception must be within a function somewhere
74 up the call chain, so we call back into the exception runtime
75 (__builtin_unwind_resume).
76
77 During pass_expand (cfgexpand.c), we generate REG_EH_REGION notes
78 that create an rtl to eh_region mapping that corresponds to the
79 gimple to eh_region mapping that had been recorded in the
80 THROW_STMT_TABLE.
81
82 Then, via finish_eh_generation, we generate the real landing pads
83 to which the runtime will actually transfer control. These new
84 landing pads perform whatever bookkeeping is needed by the target
85 backend in order to resume execution within the current function.
86 Each of these new landing pads falls through into the post_landing_pad
87 label which had been used within the CFG up to this point. All
88 exception edges within the CFG are redirected to the new landing pads.
89 If the target uses setjmp to implement exceptions, the various extra
90 calls into the runtime to register and unregister the current stack
91 frame are emitted at this time.
92
93 During pass_convert_to_eh_region_ranges (except.c), we transform
94 the REG_EH_REGION notes attached to individual insns into
95 non-overlapping ranges of insns bounded by NOTE_INSN_EH_REGION_BEG
96 and NOTE_INSN_EH_REGION_END. Each insn within such ranges has the
97 same associated action within the exception region tree, meaning
98 that (1) the exception is caught by the same landing pad within the
99 current function, (2) the exception is blocked by the runtime with
100 a MUST_NOT_THROW region, or (3) the exception is not handled at all
101 within the current function.
102
103 Finally, during assembly generation, we call
104 output_function_exception_table (except.c) to emit the tables with
105 which the exception runtime can determine if a given stack frame
106 handles a given exception, and if so what filter value to provide
107 to the function when the non-local control transfer is effected.
108 If the target uses dwarf2 unwinding to implement exceptions, then
109 output_call_frame_info (dwarf2out.c) emits the required unwind data. */
110
111
112 #include "config.h"
113 #include "system.h"
114 #include "coretypes.h"
115 #include "backend.h"
116 #include "target.h"
117 #include "rtl.h"
118 #include "tree.h"
119 #include "cfghooks.h"
120 #include "tree-pass.h"
121 #include "memmodel.h"
122 #include "tm_p.h"
123 #include "stringpool.h"
124 #include "expmed.h"
125 #include "optabs.h"
126 #include "emit-rtl.h"
127 #include "cgraph.h"
128 #include "diagnostic.h"
129 #include "fold-const.h"
130 #include "stor-layout.h"
131 #include "explow.h"
132 #include "stmt.h"
133 #include "expr.h"
134 #include "calls.h"
135 #include "libfuncs.h"
136 #include "except.h"
137 #include "output.h"
138 #include "dwarf2asm.h"
139 #include "dwarf2out.h"
140 #include "common/common-target.h"
141 #include "langhooks.h"
142 #include "cfgrtl.h"
143 #include "tree-pretty-print.h"
144 #include "cfgloop.h"
145 #include "builtins.h"
146 #include "tree-hash-traits.h"
147
148 static GTY(()) int call_site_base;
149
150 static GTY(()) hash_map<tree_hash, tree> *type_to_runtime_map;
151
152 static GTY(()) tree setjmp_fn;
153
154 /* Describe the SjLj_Function_Context structure. */
155 static GTY(()) tree sjlj_fc_type_node;
156 static int sjlj_fc_call_site_ofs;
157 static int sjlj_fc_data_ofs;
158 static int sjlj_fc_personality_ofs;
159 static int sjlj_fc_lsda_ofs;
160 static int sjlj_fc_jbuf_ofs;
161
162
163 struct GTY(()) call_site_record_d
164 {
165 rtx landing_pad;
166 int action;
167 };
168
169 /* In the following structure and associated functions,
170 we represent entries in the action table as 1-based indices.
171 Special cases are:
172
173 0: null action record, non-null landing pad; implies cleanups
174 -1: null action record, null landing pad; implies no action
175 -2: no call-site entry; implies must_not_throw
176 -3: we have yet to process outer regions
177
178 Further, no special cases apply to the "next" field of the record.
179 For next, 0 means end of list. */
180
181 struct action_record
182 {
183 int offset;
184 int filter;
185 int next;
186 };
187
188 /* Hashtable helpers. */
189
190 struct action_record_hasher : free_ptr_hash <action_record>
191 {
192 static inline hashval_t hash (const action_record *);
193 static inline bool equal (const action_record *, const action_record *);
194 };
195
196 inline hashval_t
hash(const action_record * entry)197 action_record_hasher::hash (const action_record *entry)
198 {
199 return entry->next * 1009 + entry->filter;
200 }
201
202 inline bool
equal(const action_record * entry,const action_record * data)203 action_record_hasher::equal (const action_record *entry,
204 const action_record *data)
205 {
206 return entry->filter == data->filter && entry->next == data->next;
207 }
208
209 typedef hash_table<action_record_hasher> action_hash_type;
210
211 static bool get_eh_region_and_lp_from_rtx (const_rtx, eh_region *,
212 eh_landing_pad *);
213
214 static void dw2_build_landing_pads (void);
215
216 static int collect_one_action_chain (action_hash_type *, eh_region);
217 static int add_call_site (rtx, int, int);
218
219 static void push_uleb128 (vec<uchar, va_gc> **, unsigned int);
220 static void push_sleb128 (vec<uchar, va_gc> **, int);
221 static int dw2_size_of_call_site_table (int);
222 static int sjlj_size_of_call_site_table (void);
223 static void dw2_output_call_site_table (int, int);
224 static void sjlj_output_call_site_table (void);
225
226
227 void
init_eh(void)228 init_eh (void)
229 {
230 if (! flag_exceptions)
231 return;
232
233 type_to_runtime_map = hash_map<tree_hash, tree>::create_ggc (31);
234
235 /* Create the SjLj_Function_Context structure. This should match
236 the definition in unwind-sjlj.c. */
237 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
238 {
239 tree f_jbuf, f_per, f_lsda, f_prev, f_cs, f_data, tmp;
240
241 sjlj_fc_type_node = lang_hooks.types.make_type (RECORD_TYPE);
242
243 f_prev = build_decl (BUILTINS_LOCATION,
244 FIELD_DECL, get_identifier ("__prev"),
245 build_pointer_type (sjlj_fc_type_node));
246 DECL_FIELD_CONTEXT (f_prev) = sjlj_fc_type_node;
247
248 f_cs = build_decl (BUILTINS_LOCATION,
249 FIELD_DECL, get_identifier ("__call_site"),
250 integer_type_node);
251 DECL_FIELD_CONTEXT (f_cs) = sjlj_fc_type_node;
252
253 tmp = build_index_type (size_int (4 - 1));
254 tmp = build_array_type (lang_hooks.types.type_for_mode
255 (targetm.unwind_word_mode (), 1),
256 tmp);
257 f_data = build_decl (BUILTINS_LOCATION,
258 FIELD_DECL, get_identifier ("__data"), tmp);
259 DECL_FIELD_CONTEXT (f_data) = sjlj_fc_type_node;
260
261 f_per = build_decl (BUILTINS_LOCATION,
262 FIELD_DECL, get_identifier ("__personality"),
263 ptr_type_node);
264 DECL_FIELD_CONTEXT (f_per) = sjlj_fc_type_node;
265
266 f_lsda = build_decl (BUILTINS_LOCATION,
267 FIELD_DECL, get_identifier ("__lsda"),
268 ptr_type_node);
269 DECL_FIELD_CONTEXT (f_lsda) = sjlj_fc_type_node;
270
271 #ifdef DONT_USE_BUILTIN_SETJMP
272 #ifdef JMP_BUF_SIZE
273 tmp = size_int (JMP_BUF_SIZE - 1);
274 #else
275 /* Should be large enough for most systems, if it is not,
276 JMP_BUF_SIZE should be defined with the proper value. It will
277 also tend to be larger than necessary for most systems, a more
278 optimal port will define JMP_BUF_SIZE. */
279 tmp = size_int (FIRST_PSEUDO_REGISTER + 2 - 1);
280 #endif
281 #else
282 /* Compute a minimally sized jump buffer. We need room to store at
283 least 3 pointers - stack pointer, frame pointer and return address.
284 Plus for some targets we need room for an extra pointer - in the
285 case of MIPS this is the global pointer. This makes a total of four
286 pointers, but to be safe we actually allocate room for 5.
287
288 If pointers are smaller than words then we allocate enough room for
289 5 words, just in case the backend needs this much room. For more
290 discussion on this issue see:
291 http://gcc.gnu.org/ml/gcc-patches/2014-05/msg00313.html. */
292 if (POINTER_SIZE > BITS_PER_WORD)
293 tmp = size_int (5 - 1);
294 else
295 tmp = size_int ((5 * BITS_PER_WORD / POINTER_SIZE) - 1);
296 #endif
297
298 tmp = build_index_type (tmp);
299 tmp = build_array_type (ptr_type_node, tmp);
300 f_jbuf = build_decl (BUILTINS_LOCATION,
301 FIELD_DECL, get_identifier ("__jbuf"), tmp);
302 #ifdef DONT_USE_BUILTIN_SETJMP
303 /* We don't know what the alignment requirements of the
304 runtime's jmp_buf has. Overestimate. */
305 SET_DECL_ALIGN (f_jbuf, BIGGEST_ALIGNMENT);
306 DECL_USER_ALIGN (f_jbuf) = 1;
307 #endif
308 DECL_FIELD_CONTEXT (f_jbuf) = sjlj_fc_type_node;
309
310 TYPE_FIELDS (sjlj_fc_type_node) = f_prev;
311 TREE_CHAIN (f_prev) = f_cs;
312 TREE_CHAIN (f_cs) = f_data;
313 TREE_CHAIN (f_data) = f_per;
314 TREE_CHAIN (f_per) = f_lsda;
315 TREE_CHAIN (f_lsda) = f_jbuf;
316
317 layout_type (sjlj_fc_type_node);
318
319 /* Cache the interesting field offsets so that we have
320 easy access from rtl. */
321 sjlj_fc_call_site_ofs
322 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_cs))
323 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_cs)) / BITS_PER_UNIT);
324 sjlj_fc_data_ofs
325 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_data))
326 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_data)) / BITS_PER_UNIT);
327 sjlj_fc_personality_ofs
328 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_per))
329 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_per)) / BITS_PER_UNIT);
330 sjlj_fc_lsda_ofs
331 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_lsda))
332 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_lsda)) / BITS_PER_UNIT);
333 sjlj_fc_jbuf_ofs
334 = (tree_to_uhwi (DECL_FIELD_OFFSET (f_jbuf))
335 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_jbuf)) / BITS_PER_UNIT);
336
337 #ifdef DONT_USE_BUILTIN_SETJMP
338 tmp = build_function_type_list (integer_type_node, TREE_TYPE (f_jbuf),
339 NULL);
340 setjmp_fn = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
341 get_identifier ("setjmp"), tmp);
342 TREE_PUBLIC (setjmp_fn) = 1;
343 DECL_EXTERNAL (setjmp_fn) = 1;
344 DECL_ASSEMBLER_NAME (setjmp_fn);
345 #endif
346 }
347 }
348
349 void
init_eh_for_function(void)350 init_eh_for_function (void)
351 {
352 cfun->eh = ggc_cleared_alloc<eh_status> ();
353
354 /* Make sure zero'th entries are used. */
355 vec_safe_push (cfun->eh->region_array, (eh_region)0);
356 vec_safe_push (cfun->eh->lp_array, (eh_landing_pad)0);
357 }
358
359 /* Routines to generate the exception tree somewhat directly.
360 These are used from tree-eh.c when processing exception related
361 nodes during tree optimization. */
362
363 static eh_region
gen_eh_region(enum eh_region_type type,eh_region outer)364 gen_eh_region (enum eh_region_type type, eh_region outer)
365 {
366 eh_region new_eh;
367
368 /* Insert a new blank region as a leaf in the tree. */
369 new_eh = ggc_cleared_alloc<eh_region_d> ();
370 new_eh->type = type;
371 new_eh->outer = outer;
372 if (outer)
373 {
374 new_eh->next_peer = outer->inner;
375 outer->inner = new_eh;
376 }
377 else
378 {
379 new_eh->next_peer = cfun->eh->region_tree;
380 cfun->eh->region_tree = new_eh;
381 }
382
383 new_eh->index = vec_safe_length (cfun->eh->region_array);
384 vec_safe_push (cfun->eh->region_array, new_eh);
385
386 /* Copy the language's notion of whether to use __cxa_end_cleanup. */
387 if (targetm.arm_eabi_unwinder && lang_hooks.eh_use_cxa_end_cleanup)
388 new_eh->use_cxa_end_cleanup = true;
389
390 return new_eh;
391 }
392
393 eh_region
gen_eh_region_cleanup(eh_region outer)394 gen_eh_region_cleanup (eh_region outer)
395 {
396 return gen_eh_region (ERT_CLEANUP, outer);
397 }
398
399 eh_region
gen_eh_region_try(eh_region outer)400 gen_eh_region_try (eh_region outer)
401 {
402 return gen_eh_region (ERT_TRY, outer);
403 }
404
405 eh_catch
gen_eh_region_catch(eh_region t,tree type_or_list)406 gen_eh_region_catch (eh_region t, tree type_or_list)
407 {
408 eh_catch c, l;
409 tree type_list, type_node;
410
411 gcc_assert (t->type == ERT_TRY);
412
413 /* Ensure to always end up with a type list to normalize further
414 processing, then register each type against the runtime types map. */
415 type_list = type_or_list;
416 if (type_or_list)
417 {
418 if (TREE_CODE (type_or_list) != TREE_LIST)
419 type_list = tree_cons (NULL_TREE, type_or_list, NULL_TREE);
420
421 type_node = type_list;
422 for (; type_node; type_node = TREE_CHAIN (type_node))
423 add_type_for_runtime (TREE_VALUE (type_node));
424 }
425
426 c = ggc_cleared_alloc<eh_catch_d> ();
427 c->type_list = type_list;
428 l = t->u.eh_try.last_catch;
429 c->prev_catch = l;
430 if (l)
431 l->next_catch = c;
432 else
433 t->u.eh_try.first_catch = c;
434 t->u.eh_try.last_catch = c;
435
436 return c;
437 }
438
439 eh_region
gen_eh_region_allowed(eh_region outer,tree allowed)440 gen_eh_region_allowed (eh_region outer, tree allowed)
441 {
442 eh_region region = gen_eh_region (ERT_ALLOWED_EXCEPTIONS, outer);
443 region->u.allowed.type_list = allowed;
444
445 for (; allowed ; allowed = TREE_CHAIN (allowed))
446 add_type_for_runtime (TREE_VALUE (allowed));
447
448 return region;
449 }
450
451 eh_region
gen_eh_region_must_not_throw(eh_region outer)452 gen_eh_region_must_not_throw (eh_region outer)
453 {
454 return gen_eh_region (ERT_MUST_NOT_THROW, outer);
455 }
456
457 eh_landing_pad
gen_eh_landing_pad(eh_region region)458 gen_eh_landing_pad (eh_region region)
459 {
460 eh_landing_pad lp = ggc_cleared_alloc<eh_landing_pad_d> ();
461
462 lp->next_lp = region->landing_pads;
463 lp->region = region;
464 lp->index = vec_safe_length (cfun->eh->lp_array);
465 region->landing_pads = lp;
466
467 vec_safe_push (cfun->eh->lp_array, lp);
468
469 return lp;
470 }
471
472 eh_region
get_eh_region_from_number_fn(struct function * ifun,int i)473 get_eh_region_from_number_fn (struct function *ifun, int i)
474 {
475 return (*ifun->eh->region_array)[i];
476 }
477
478 eh_region
get_eh_region_from_number(int i)479 get_eh_region_from_number (int i)
480 {
481 return get_eh_region_from_number_fn (cfun, i);
482 }
483
484 eh_landing_pad
get_eh_landing_pad_from_number_fn(struct function * ifun,int i)485 get_eh_landing_pad_from_number_fn (struct function *ifun, int i)
486 {
487 return (*ifun->eh->lp_array)[i];
488 }
489
490 eh_landing_pad
get_eh_landing_pad_from_number(int i)491 get_eh_landing_pad_from_number (int i)
492 {
493 return get_eh_landing_pad_from_number_fn (cfun, i);
494 }
495
496 eh_region
get_eh_region_from_lp_number_fn(struct function * ifun,int i)497 get_eh_region_from_lp_number_fn (struct function *ifun, int i)
498 {
499 if (i < 0)
500 return (*ifun->eh->region_array)[-i];
501 else if (i == 0)
502 return NULL;
503 else
504 {
505 eh_landing_pad lp;
506 lp = (*ifun->eh->lp_array)[i];
507 return lp->region;
508 }
509 }
510
511 eh_region
get_eh_region_from_lp_number(int i)512 get_eh_region_from_lp_number (int i)
513 {
514 return get_eh_region_from_lp_number_fn (cfun, i);
515 }
516
517 /* Returns true if the current function has exception handling regions. */
518
519 bool
current_function_has_exception_handlers(void)520 current_function_has_exception_handlers (void)
521 {
522 return cfun->eh->region_tree != NULL;
523 }
524
525 /* A subroutine of duplicate_eh_regions. Copy the eh_region tree at OLD.
526 Root it at OUTER, and apply LP_OFFSET to the lp numbers. */
527
528 struct duplicate_eh_regions_data
529 {
530 duplicate_eh_regions_map label_map;
531 void *label_map_data;
532 hash_map<void *, void *> *eh_map;
533 };
534
535 static void
duplicate_eh_regions_1(struct duplicate_eh_regions_data * data,eh_region old_r,eh_region outer)536 duplicate_eh_regions_1 (struct duplicate_eh_regions_data *data,
537 eh_region old_r, eh_region outer)
538 {
539 eh_landing_pad old_lp, new_lp;
540 eh_region new_r;
541
542 new_r = gen_eh_region (old_r->type, outer);
543 gcc_assert (!data->eh_map->put (old_r, new_r));
544
545 switch (old_r->type)
546 {
547 case ERT_CLEANUP:
548 break;
549
550 case ERT_TRY:
551 {
552 eh_catch oc, nc;
553 for (oc = old_r->u.eh_try.first_catch; oc ; oc = oc->next_catch)
554 {
555 /* We should be doing all our region duplication before and
556 during inlining, which is before filter lists are created. */
557 gcc_assert (oc->filter_list == NULL);
558 nc = gen_eh_region_catch (new_r, oc->type_list);
559 nc->label = data->label_map (oc->label, data->label_map_data);
560 }
561 }
562 break;
563
564 case ERT_ALLOWED_EXCEPTIONS:
565 new_r->u.allowed.type_list = old_r->u.allowed.type_list;
566 if (old_r->u.allowed.label)
567 new_r->u.allowed.label
568 = data->label_map (old_r->u.allowed.label, data->label_map_data);
569 else
570 new_r->u.allowed.label = NULL_TREE;
571 break;
572
573 case ERT_MUST_NOT_THROW:
574 new_r->u.must_not_throw.failure_loc =
575 LOCATION_LOCUS (old_r->u.must_not_throw.failure_loc);
576 new_r->u.must_not_throw.failure_decl =
577 old_r->u.must_not_throw.failure_decl;
578 break;
579 }
580
581 for (old_lp = old_r->landing_pads; old_lp ; old_lp = old_lp->next_lp)
582 {
583 /* Don't bother copying unused landing pads. */
584 if (old_lp->post_landing_pad == NULL)
585 continue;
586
587 new_lp = gen_eh_landing_pad (new_r);
588 gcc_assert (!data->eh_map->put (old_lp, new_lp));
589
590 new_lp->post_landing_pad
591 = data->label_map (old_lp->post_landing_pad, data->label_map_data);
592 EH_LANDING_PAD_NR (new_lp->post_landing_pad) = new_lp->index;
593 }
594
595 /* Make sure to preserve the original use of __cxa_end_cleanup. */
596 new_r->use_cxa_end_cleanup = old_r->use_cxa_end_cleanup;
597
598 for (old_r = old_r->inner; old_r ; old_r = old_r->next_peer)
599 duplicate_eh_regions_1 (data, old_r, new_r);
600 }
601
602 /* Duplicate the EH regions from IFUN rooted at COPY_REGION into
603 the current function and root the tree below OUTER_REGION.
604 The special case of COPY_REGION of NULL means all regions.
605 Remap labels using MAP/MAP_DATA callback. Return a pointer map
606 that allows the caller to remap uses of both EH regions and
607 EH landing pads. */
608
609 hash_map<void *, void *> *
duplicate_eh_regions(struct function * ifun,eh_region copy_region,int outer_lp,duplicate_eh_regions_map map,void * map_data)610 duplicate_eh_regions (struct function *ifun,
611 eh_region copy_region, int outer_lp,
612 duplicate_eh_regions_map map, void *map_data)
613 {
614 struct duplicate_eh_regions_data data;
615 eh_region outer_region;
616
617 if (flag_checking)
618 verify_eh_tree (ifun);
619
620 data.label_map = map;
621 data.label_map_data = map_data;
622 data.eh_map = new hash_map<void *, void *>;
623
624 outer_region = get_eh_region_from_lp_number_fn (cfun, outer_lp);
625
626 /* Copy all the regions in the subtree. */
627 if (copy_region)
628 duplicate_eh_regions_1 (&data, copy_region, outer_region);
629 else
630 {
631 eh_region r;
632 for (r = ifun->eh->region_tree; r ; r = r->next_peer)
633 duplicate_eh_regions_1 (&data, r, outer_region);
634 }
635
636 if (flag_checking)
637 verify_eh_tree (cfun);
638
639 return data.eh_map;
640 }
641
642 /* Return the region that is outer to both REGION_A and REGION_B in IFUN. */
643
644 eh_region
eh_region_outermost(struct function * ifun,eh_region region_a,eh_region region_b)645 eh_region_outermost (struct function *ifun, eh_region region_a,
646 eh_region region_b)
647 {
648 gcc_assert (ifun->eh->region_array);
649 gcc_assert (ifun->eh->region_tree);
650
651 auto_sbitmap b_outer (ifun->eh->region_array->length ());
652 bitmap_clear (b_outer);
653
654 do
655 {
656 bitmap_set_bit (b_outer, region_b->index);
657 region_b = region_b->outer;
658 }
659 while (region_b);
660
661 do
662 {
663 if (bitmap_bit_p (b_outer, region_a->index))
664 break;
665 region_a = region_a->outer;
666 }
667 while (region_a);
668
669 return region_a;
670 }
671
672 void
add_type_for_runtime(tree type)673 add_type_for_runtime (tree type)
674 {
675 /* If TYPE is NOP_EXPR, it means that it already is a runtime type. */
676 if (TREE_CODE (type) == NOP_EXPR)
677 return;
678
679 bool existed = false;
680 tree *slot = &type_to_runtime_map->get_or_insert (type, &existed);
681 if (!existed)
682 *slot = lang_hooks.eh_runtime_type (type);
683 }
684
685 tree
lookup_type_for_runtime(tree type)686 lookup_type_for_runtime (tree type)
687 {
688 /* If TYPE is NOP_EXPR, it means that it already is a runtime type. */
689 if (TREE_CODE (type) == NOP_EXPR)
690 return type;
691
692 /* We should have always inserted the data earlier. */
693 return *type_to_runtime_map->get (type);
694 }
695
696
697 /* Represent an entry in @TTypes for either catch actions
698 or exception filter actions. */
699 struct ttypes_filter {
700 tree t;
701 int filter;
702 };
703
704 /* Helper for ttypes_filter hashing. */
705
706 struct ttypes_filter_hasher : free_ptr_hash <ttypes_filter>
707 {
708 typedef tree_node *compare_type;
709 static inline hashval_t hash (const ttypes_filter *);
710 static inline bool equal (const ttypes_filter *, const tree_node *);
711 };
712
713 /* Compare ENTRY (a ttypes_filter entry in the hash table) with DATA
714 (a tree) for a @TTypes type node we are thinking about adding. */
715
716 inline bool
equal(const ttypes_filter * entry,const tree_node * data)717 ttypes_filter_hasher::equal (const ttypes_filter *entry, const tree_node *data)
718 {
719 return entry->t == data;
720 }
721
722 inline hashval_t
hash(const ttypes_filter * entry)723 ttypes_filter_hasher::hash (const ttypes_filter *entry)
724 {
725 return TREE_HASH (entry->t);
726 }
727
728 typedef hash_table<ttypes_filter_hasher> ttypes_hash_type;
729
730
731 /* Helper for ehspec hashing. */
732
733 struct ehspec_hasher : free_ptr_hash <ttypes_filter>
734 {
735 static inline hashval_t hash (const ttypes_filter *);
736 static inline bool equal (const ttypes_filter *, const ttypes_filter *);
737 };
738
739 /* Compare ENTRY with DATA (both struct ttypes_filter) for a @TTypes
740 exception specification list we are thinking about adding. */
741 /* ??? Currently we use the type lists in the order given. Someone
742 should put these in some canonical order. */
743
744 inline bool
equal(const ttypes_filter * entry,const ttypes_filter * data)745 ehspec_hasher::equal (const ttypes_filter *entry, const ttypes_filter *data)
746 {
747 return type_list_equal (entry->t, data->t);
748 }
749
750 /* Hash function for exception specification lists. */
751
752 inline hashval_t
hash(const ttypes_filter * entry)753 ehspec_hasher::hash (const ttypes_filter *entry)
754 {
755 hashval_t h = 0;
756 tree list;
757
758 for (list = entry->t; list ; list = TREE_CHAIN (list))
759 h = (h << 5) + (h >> 27) + TREE_HASH (TREE_VALUE (list));
760 return h;
761 }
762
763 typedef hash_table<ehspec_hasher> ehspec_hash_type;
764
765
766 /* Add TYPE (which may be NULL) to cfun->eh->ttype_data, using TYPES_HASH
767 to speed up the search. Return the filter value to be used. */
768
769 static int
add_ttypes_entry(ttypes_hash_type * ttypes_hash,tree type)770 add_ttypes_entry (ttypes_hash_type *ttypes_hash, tree type)
771 {
772 struct ttypes_filter **slot, *n;
773
774 slot = ttypes_hash->find_slot_with_hash (type, (hashval_t) TREE_HASH (type),
775 INSERT);
776
777 if ((n = *slot) == NULL)
778 {
779 /* Filter value is a 1 based table index. */
780
781 n = XNEW (struct ttypes_filter);
782 n->t = type;
783 n->filter = vec_safe_length (cfun->eh->ttype_data) + 1;
784 *slot = n;
785
786 vec_safe_push (cfun->eh->ttype_data, type);
787 }
788
789 return n->filter;
790 }
791
792 /* Add LIST to cfun->eh->ehspec_data, using EHSPEC_HASH and TYPES_HASH
793 to speed up the search. Return the filter value to be used. */
794
795 static int
add_ehspec_entry(ehspec_hash_type * ehspec_hash,ttypes_hash_type * ttypes_hash,tree list)796 add_ehspec_entry (ehspec_hash_type *ehspec_hash, ttypes_hash_type *ttypes_hash,
797 tree list)
798 {
799 struct ttypes_filter **slot, *n;
800 struct ttypes_filter dummy;
801
802 dummy.t = list;
803 slot = ehspec_hash->find_slot (&dummy, INSERT);
804
805 if ((n = *slot) == NULL)
806 {
807 int len;
808
809 if (targetm.arm_eabi_unwinder)
810 len = vec_safe_length (cfun->eh->ehspec_data.arm_eabi);
811 else
812 len = vec_safe_length (cfun->eh->ehspec_data.other);
813
814 /* Filter value is a -1 based byte index into a uleb128 buffer. */
815
816 n = XNEW (struct ttypes_filter);
817 n->t = list;
818 n->filter = -(len + 1);
819 *slot = n;
820
821 /* Generate a 0 terminated list of filter values. */
822 for (; list ; list = TREE_CHAIN (list))
823 {
824 if (targetm.arm_eabi_unwinder)
825 vec_safe_push (cfun->eh->ehspec_data.arm_eabi, TREE_VALUE (list));
826 else
827 {
828 /* Look up each type in the list and encode its filter
829 value as a uleb128. */
830 push_uleb128 (&cfun->eh->ehspec_data.other,
831 add_ttypes_entry (ttypes_hash, TREE_VALUE (list)));
832 }
833 }
834 if (targetm.arm_eabi_unwinder)
835 vec_safe_push (cfun->eh->ehspec_data.arm_eabi, NULL_TREE);
836 else
837 vec_safe_push (cfun->eh->ehspec_data.other, (uchar)0);
838 }
839
840 return n->filter;
841 }
842
843 /* Generate the action filter values to be used for CATCH and
844 ALLOWED_EXCEPTIONS regions. When using dwarf2 exception regions,
845 we use lots of landing pads, and so every type or list can share
846 the same filter value, which saves table space. */
847
848 void
assign_filter_values(void)849 assign_filter_values (void)
850 {
851 int i;
852 eh_region r;
853 eh_catch c;
854
855 vec_alloc (cfun->eh->ttype_data, 16);
856 if (targetm.arm_eabi_unwinder)
857 vec_alloc (cfun->eh->ehspec_data.arm_eabi, 64);
858 else
859 vec_alloc (cfun->eh->ehspec_data.other, 64);
860
861 ehspec_hash_type ehspec (31);
862 ttypes_hash_type ttypes (31);
863
864 for (i = 1; vec_safe_iterate (cfun->eh->region_array, i, &r); ++i)
865 {
866 if (r == NULL)
867 continue;
868
869 switch (r->type)
870 {
871 case ERT_TRY:
872 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
873 {
874 /* Whatever type_list is (NULL or true list), we build a list
875 of filters for the region. */
876 c->filter_list = NULL_TREE;
877
878 if (c->type_list != NULL)
879 {
880 /* Get a filter value for each of the types caught and store
881 them in the region's dedicated list. */
882 tree tp_node = c->type_list;
883
884 for ( ; tp_node; tp_node = TREE_CHAIN (tp_node))
885 {
886 int flt
887 = add_ttypes_entry (&ttypes, TREE_VALUE (tp_node));
888 tree flt_node = build_int_cst (integer_type_node, flt);
889
890 c->filter_list
891 = tree_cons (NULL_TREE, flt_node, c->filter_list);
892 }
893 }
894 else
895 {
896 /* Get a filter value for the NULL list also since it
897 will need an action record anyway. */
898 int flt = add_ttypes_entry (&ttypes, NULL);
899 tree flt_node = build_int_cst (integer_type_node, flt);
900
901 c->filter_list
902 = tree_cons (NULL_TREE, flt_node, NULL);
903 }
904 }
905 break;
906
907 case ERT_ALLOWED_EXCEPTIONS:
908 r->u.allowed.filter
909 = add_ehspec_entry (&ehspec, &ttypes, r->u.allowed.type_list);
910 break;
911
912 default:
913 break;
914 }
915 }
916 }
917
918 /* Emit SEQ into basic block just before INSN (that is assumed to be
919 first instruction of some existing BB and return the newly
920 produced block. */
921 static basic_block
emit_to_new_bb_before(rtx_insn * seq,rtx_insn * insn)922 emit_to_new_bb_before (rtx_insn *seq, rtx_insn *insn)
923 {
924 rtx_insn *last;
925 basic_block bb;
926 edge e;
927 edge_iterator ei;
928
929 /* If there happens to be a fallthru edge (possibly created by cleanup_cfg
930 call), we don't want it to go into newly created landing pad or other EH
931 construct. */
932 for (ei = ei_start (BLOCK_FOR_INSN (insn)->preds); (e = ei_safe_edge (ei)); )
933 if (e->flags & EDGE_FALLTHRU)
934 force_nonfallthru (e);
935 else
936 ei_next (&ei);
937 last = emit_insn_before (seq, insn);
938 if (BARRIER_P (last))
939 last = PREV_INSN (last);
940 bb = create_basic_block (seq, last, BLOCK_FOR_INSN (insn)->prev_bb);
941 update_bb_for_insn (bb);
942 bb->flags |= BB_SUPERBLOCK;
943 return bb;
944 }
945
946 /* A subroutine of dw2_build_landing_pads, also used for edge splitting
947 at the rtl level. Emit the code required by the target at a landing
948 pad for the given region. */
949
950 static void
expand_dw2_landing_pad_for_region(eh_region region)951 expand_dw2_landing_pad_for_region (eh_region region)
952 {
953 if (targetm.have_exception_receiver ())
954 emit_insn (targetm.gen_exception_receiver ());
955 else if (targetm.have_nonlocal_goto_receiver ())
956 emit_insn (targetm.gen_nonlocal_goto_receiver ());
957 else
958 { /* Nothing */ }
959
960 if (region->exc_ptr_reg)
961 emit_move_insn (region->exc_ptr_reg,
962 gen_rtx_REG (ptr_mode, EH_RETURN_DATA_REGNO (0)));
963 if (region->filter_reg)
964 emit_move_insn (region->filter_reg,
965 gen_rtx_REG (targetm.eh_return_filter_mode (),
966 EH_RETURN_DATA_REGNO (1)));
967 }
968
969 /* Expand the extra code needed at landing pads for dwarf2 unwinding. */
970
971 static void
dw2_build_landing_pads(void)972 dw2_build_landing_pads (void)
973 {
974 int i;
975 eh_landing_pad lp;
976 int e_flags = EDGE_FALLTHRU;
977
978 /* If we're going to partition blocks, we need to be able to add
979 new landing pads later, which means that we need to hold on to
980 the post-landing-pad block. Prevent it from being merged away.
981 We'll remove this bit after partitioning. */
982 if (flag_reorder_blocks_and_partition)
983 e_flags |= EDGE_PRESERVE;
984
985 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
986 {
987 basic_block bb;
988 rtx_insn *seq;
989
990 if (lp == NULL || lp->post_landing_pad == NULL)
991 continue;
992
993 start_sequence ();
994
995 lp->landing_pad = gen_label_rtx ();
996 emit_label (lp->landing_pad);
997 LABEL_PRESERVE_P (lp->landing_pad) = 1;
998
999 expand_dw2_landing_pad_for_region (lp->region);
1000
1001 seq = get_insns ();
1002 end_sequence ();
1003
1004 bb = emit_to_new_bb_before (seq, label_rtx (lp->post_landing_pad));
1005 bb->count = bb->next_bb->count;
1006 make_single_succ_edge (bb, bb->next_bb, e_flags);
1007 if (current_loops)
1008 {
1009 struct loop *loop = bb->next_bb->loop_father;
1010 /* If we created a pre-header block, add the new block to the
1011 outer loop, otherwise to the loop itself. */
1012 if (bb->next_bb == loop->header)
1013 add_bb_to_loop (bb, loop_outer (loop));
1014 else
1015 add_bb_to_loop (bb, loop);
1016 }
1017 }
1018 }
1019
1020
1021 static vec<int> sjlj_lp_call_site_index;
1022
1023 /* Process all active landing pads. Assign each one a compact dispatch
1024 index, and a call-site index. */
1025
1026 static int
sjlj_assign_call_site_values(void)1027 sjlj_assign_call_site_values (void)
1028 {
1029 action_hash_type ar_hash (31);
1030 int i, disp_index;
1031 eh_landing_pad lp;
1032
1033 vec_alloc (crtl->eh.action_record_data, 64);
1034
1035 disp_index = 0;
1036 call_site_base = 1;
1037 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
1038 if (lp && lp->post_landing_pad)
1039 {
1040 int action, call_site;
1041
1042 /* First: build the action table. */
1043 action = collect_one_action_chain (&ar_hash, lp->region);
1044
1045 /* Next: assign call-site values. If dwarf2 terms, this would be
1046 the region number assigned by convert_to_eh_region_ranges, but
1047 handles no-action and must-not-throw differently. */
1048 /* Map must-not-throw to otherwise unused call-site index 0. */
1049 if (action == -2)
1050 call_site = 0;
1051 /* Map no-action to otherwise unused call-site index -1. */
1052 else if (action == -1)
1053 call_site = -1;
1054 /* Otherwise, look it up in the table. */
1055 else
1056 call_site = add_call_site (GEN_INT (disp_index), action, 0);
1057 sjlj_lp_call_site_index[i] = call_site;
1058
1059 disp_index++;
1060 }
1061
1062 return disp_index;
1063 }
1064
1065 /* Emit code to record the current call-site index before every
1066 insn that can throw. */
1067
1068 static void
sjlj_mark_call_sites(void)1069 sjlj_mark_call_sites (void)
1070 {
1071 int last_call_site = -2;
1072 rtx_insn *insn;
1073 rtx mem;
1074
1075 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
1076 {
1077 eh_landing_pad lp;
1078 eh_region r;
1079 bool nothrow;
1080 int this_call_site;
1081 rtx_insn *before, *p;
1082
1083 /* Reset value tracking at extended basic block boundaries. */
1084 if (LABEL_P (insn))
1085 last_call_site = -2;
1086
1087 /* If the function allocates dynamic stack space, the context must
1088 be updated after every allocation/deallocation accordingly. */
1089 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_UPDATE_SJLJ_CONTEXT)
1090 {
1091 rtx buf_addr;
1092
1093 start_sequence ();
1094 buf_addr = plus_constant (Pmode, XEXP (crtl->eh.sjlj_fc, 0),
1095 sjlj_fc_jbuf_ofs);
1096 expand_builtin_update_setjmp_buf (buf_addr);
1097 p = get_insns ();
1098 end_sequence ();
1099 emit_insn_before (p, insn);
1100 }
1101
1102 if (! INSN_P (insn))
1103 continue;
1104
1105 nothrow = get_eh_region_and_lp_from_rtx (insn, &r, &lp);
1106 if (nothrow)
1107 continue;
1108 if (lp)
1109 this_call_site = sjlj_lp_call_site_index[lp->index];
1110 else if (r == NULL)
1111 {
1112 /* Calls (and trapping insns) without notes are outside any
1113 exception handling region in this function. Mark them as
1114 no action. */
1115 this_call_site = -1;
1116 }
1117 else
1118 {
1119 gcc_assert (r->type == ERT_MUST_NOT_THROW);
1120 this_call_site = 0;
1121 }
1122
1123 if (this_call_site != -1)
1124 crtl->uses_eh_lsda = 1;
1125
1126 if (this_call_site == last_call_site)
1127 continue;
1128
1129 /* Don't separate a call from it's argument loads. */
1130 before = insn;
1131 if (CALL_P (insn))
1132 before = find_first_parameter_load (insn, NULL);
1133
1134 start_sequence ();
1135 mem = adjust_address (crtl->eh.sjlj_fc, TYPE_MODE (integer_type_node),
1136 sjlj_fc_call_site_ofs);
1137 emit_move_insn (mem, gen_int_mode (this_call_site, GET_MODE (mem)));
1138 p = get_insns ();
1139 end_sequence ();
1140
1141 emit_insn_before (p, before);
1142 last_call_site = this_call_site;
1143 }
1144 }
1145
1146 /* Construct the SjLj_Function_Context. */
1147
1148 static void
sjlj_emit_function_enter(rtx_code_label * dispatch_label)1149 sjlj_emit_function_enter (rtx_code_label *dispatch_label)
1150 {
1151 rtx_insn *fn_begin, *seq;
1152 rtx fc, mem;
1153 bool fn_begin_outside_block;
1154 rtx personality = get_personality_function (current_function_decl);
1155
1156 fc = crtl->eh.sjlj_fc;
1157
1158 start_sequence ();
1159
1160 /* We're storing this libcall's address into memory instead of
1161 calling it directly. Thus, we must call assemble_external_libcall
1162 here, as we can not depend on emit_library_call to do it for us. */
1163 assemble_external_libcall (personality);
1164 mem = adjust_address (fc, Pmode, sjlj_fc_personality_ofs);
1165 emit_move_insn (mem, personality);
1166
1167 mem = adjust_address (fc, Pmode, sjlj_fc_lsda_ofs);
1168 if (crtl->uses_eh_lsda)
1169 {
1170 char buf[20];
1171 rtx sym;
1172
1173 ASM_GENERATE_INTERNAL_LABEL (buf, "LLSDA", current_function_funcdef_no);
1174 sym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
1175 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_LOCAL;
1176 emit_move_insn (mem, sym);
1177 }
1178 else
1179 emit_move_insn (mem, const0_rtx);
1180
1181 if (dispatch_label)
1182 {
1183 rtx addr = plus_constant (Pmode, XEXP (fc, 0), sjlj_fc_jbuf_ofs);
1184
1185 #ifdef DONT_USE_BUILTIN_SETJMP
1186 addr = copy_addr_to_reg (addr);
1187 addr = convert_memory_address (ptr_mode, addr);
1188 tree addr_tree = make_tree (ptr_type_node, addr);
1189
1190 tree call_expr = build_call_expr (setjmp_fn, 1, addr_tree);
1191 rtx x = expand_call (call_expr, NULL_RTX, false);
1192
1193 emit_cmp_and_jump_insns (x, const0_rtx, NE, 0,
1194 TYPE_MODE (integer_type_node), 0,
1195 dispatch_label,
1196 profile_probability::unlikely ());
1197 #else
1198 expand_builtin_setjmp_setup (addr, dispatch_label);
1199 #endif
1200 }
1201
1202 emit_library_call (unwind_sjlj_register_libfunc, LCT_NORMAL, VOIDmode,
1203 XEXP (fc, 0), Pmode);
1204
1205 seq = get_insns ();
1206 end_sequence ();
1207
1208 /* ??? Instead of doing this at the beginning of the function,
1209 do this in a block that is at loop level 0 and dominates all
1210 can_throw_internal instructions. */
1211
1212 fn_begin_outside_block = true;
1213 for (fn_begin = get_insns (); ; fn_begin = NEXT_INSN (fn_begin))
1214 if (NOTE_P (fn_begin))
1215 {
1216 if (NOTE_KIND (fn_begin) == NOTE_INSN_FUNCTION_BEG)
1217 break;
1218 else if (NOTE_INSN_BASIC_BLOCK_P (fn_begin))
1219 fn_begin_outside_block = false;
1220 }
1221
1222 #ifdef DONT_USE_BUILTIN_SETJMP
1223 if (dispatch_label)
1224 {
1225 /* The sequence contains a branch in the middle so we need to force
1226 the creation of a new basic block by means of BB_SUPERBLOCK. */
1227 if (fn_begin_outside_block)
1228 {
1229 basic_block bb
1230 = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
1231 if (JUMP_P (BB_END (bb)))
1232 emit_insn_before (seq, BB_END (bb));
1233 else
1234 emit_insn_after (seq, BB_END (bb));
1235 }
1236 else
1237 emit_insn_after (seq, fn_begin);
1238
1239 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->flags |= BB_SUPERBLOCK;
1240 return;
1241 }
1242 #endif
1243
1244 if (fn_begin_outside_block)
1245 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
1246 else
1247 emit_insn_after (seq, fn_begin);
1248 }
1249
1250 /* Call back from expand_function_end to know where we should put
1251 the call to unwind_sjlj_unregister_libfunc if needed. */
1252
1253 void
sjlj_emit_function_exit_after(rtx_insn * after)1254 sjlj_emit_function_exit_after (rtx_insn *after)
1255 {
1256 crtl->eh.sjlj_exit_after = after;
1257 }
1258
1259 static void
sjlj_emit_function_exit(void)1260 sjlj_emit_function_exit (void)
1261 {
1262 rtx_insn *seq, *insn;
1263
1264 start_sequence ();
1265
1266 emit_library_call (unwind_sjlj_unregister_libfunc, LCT_NORMAL, VOIDmode,
1267 XEXP (crtl->eh.sjlj_fc, 0), Pmode);
1268
1269 seq = get_insns ();
1270 end_sequence ();
1271
1272 /* ??? Really this can be done in any block at loop level 0 that
1273 post-dominates all can_throw_internal instructions. This is
1274 the last possible moment. */
1275
1276 insn = crtl->eh.sjlj_exit_after;
1277 if (LABEL_P (insn))
1278 insn = NEXT_INSN (insn);
1279
1280 emit_insn_after (seq, insn);
1281 }
1282
1283 static void
sjlj_emit_dispatch_table(rtx_code_label * dispatch_label,int num_dispatch)1284 sjlj_emit_dispatch_table (rtx_code_label *dispatch_label, int num_dispatch)
1285 {
1286 scalar_int_mode unwind_word_mode = targetm.unwind_word_mode ();
1287 scalar_int_mode filter_mode = targetm.eh_return_filter_mode ();
1288 eh_landing_pad lp;
1289 rtx mem, fc, exc_ptr_reg, filter_reg;
1290 rtx_insn *seq;
1291 basic_block bb;
1292 eh_region r;
1293 int i, disp_index;
1294 vec<tree> dispatch_labels = vNULL;
1295
1296 fc = crtl->eh.sjlj_fc;
1297
1298 start_sequence ();
1299
1300 emit_label (dispatch_label);
1301
1302 #ifndef DONT_USE_BUILTIN_SETJMP
1303 expand_builtin_setjmp_receiver (dispatch_label);
1304
1305 /* The caller of expand_builtin_setjmp_receiver is responsible for
1306 making sure that the label doesn't vanish. The only other caller
1307 is the expander for __builtin_setjmp_receiver, which places this
1308 label on the nonlocal_goto_label list. Since we're modeling these
1309 CFG edges more exactly, we can use the forced_labels list instead. */
1310 LABEL_PRESERVE_P (dispatch_label) = 1;
1311 vec_safe_push<rtx_insn *> (forced_labels, dispatch_label);
1312 #endif
1313
1314 /* Load up exc_ptr and filter values from the function context. */
1315 mem = adjust_address (fc, unwind_word_mode, sjlj_fc_data_ofs);
1316 if (unwind_word_mode != ptr_mode)
1317 {
1318 #ifdef POINTERS_EXTEND_UNSIGNED
1319 mem = convert_memory_address (ptr_mode, mem);
1320 #else
1321 mem = convert_to_mode (ptr_mode, mem, 0);
1322 #endif
1323 }
1324 exc_ptr_reg = force_reg (ptr_mode, mem);
1325
1326 mem = adjust_address (fc, unwind_word_mode,
1327 sjlj_fc_data_ofs + GET_MODE_SIZE (unwind_word_mode));
1328 if (unwind_word_mode != filter_mode)
1329 mem = convert_to_mode (filter_mode, mem, 0);
1330 filter_reg = force_reg (filter_mode, mem);
1331
1332 /* Jump to one of the directly reachable regions. */
1333
1334 disp_index = 0;
1335 rtx_code_label *first_reachable_label = NULL;
1336
1337 /* If there's exactly one call site in the function, don't bother
1338 generating a switch statement. */
1339 if (num_dispatch > 1)
1340 dispatch_labels.create (num_dispatch);
1341
1342 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
1343 if (lp && lp->post_landing_pad)
1344 {
1345 rtx_insn *seq2;
1346 rtx_code_label *label;
1347
1348 start_sequence ();
1349
1350 lp->landing_pad = dispatch_label;
1351
1352 if (num_dispatch > 1)
1353 {
1354 tree t_label, case_elt, t;
1355
1356 t_label = create_artificial_label (UNKNOWN_LOCATION);
1357 t = build_int_cst (integer_type_node, disp_index);
1358 case_elt = build_case_label (t, NULL, t_label);
1359 dispatch_labels.quick_push (case_elt);
1360 label = jump_target_rtx (t_label);
1361 }
1362 else
1363 label = gen_label_rtx ();
1364
1365 if (disp_index == 0)
1366 first_reachable_label = label;
1367 emit_label (label);
1368
1369 r = lp->region;
1370 if (r->exc_ptr_reg)
1371 emit_move_insn (r->exc_ptr_reg, exc_ptr_reg);
1372 if (r->filter_reg)
1373 emit_move_insn (r->filter_reg, filter_reg);
1374
1375 seq2 = get_insns ();
1376 end_sequence ();
1377
1378 rtx_insn *before = label_rtx (lp->post_landing_pad);
1379 bb = emit_to_new_bb_before (seq2, before);
1380 make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU);
1381 if (current_loops)
1382 {
1383 struct loop *loop = bb->next_bb->loop_father;
1384 /* If we created a pre-header block, add the new block to the
1385 outer loop, otherwise to the loop itself. */
1386 if (bb->next_bb == loop->header)
1387 add_bb_to_loop (bb, loop_outer (loop));
1388 else
1389 add_bb_to_loop (bb, loop);
1390 /* ??? For multiple dispatches we will end up with edges
1391 from the loop tree root into this loop, making it a
1392 multiple-entry loop. Discard all affected loops. */
1393 if (num_dispatch > 1)
1394 {
1395 for (loop = bb->loop_father;
1396 loop_outer (loop); loop = loop_outer (loop))
1397 mark_loop_for_removal (loop);
1398 }
1399 }
1400
1401 disp_index++;
1402 }
1403 gcc_assert (disp_index == num_dispatch);
1404
1405 if (num_dispatch > 1)
1406 {
1407 rtx disp = adjust_address (fc, TYPE_MODE (integer_type_node),
1408 sjlj_fc_call_site_ofs);
1409 expand_sjlj_dispatch_table (disp, dispatch_labels);
1410 }
1411
1412 seq = get_insns ();
1413 end_sequence ();
1414
1415 bb = emit_to_new_bb_before (seq, first_reachable_label);
1416 if (num_dispatch == 1)
1417 {
1418 make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU);
1419 if (current_loops)
1420 {
1421 struct loop *loop = bb->next_bb->loop_father;
1422 /* If we created a pre-header block, add the new block to the
1423 outer loop, otherwise to the loop itself. */
1424 if (bb->next_bb == loop->header)
1425 add_bb_to_loop (bb, loop_outer (loop));
1426 else
1427 add_bb_to_loop (bb, loop);
1428 }
1429 }
1430 else
1431 {
1432 /* We are not wiring up edges here, but as the dispatcher call
1433 is at function begin simply associate the block with the
1434 outermost (non-)loop. */
1435 if (current_loops)
1436 add_bb_to_loop (bb, current_loops->tree_root);
1437 }
1438 }
1439
1440 static void
sjlj_build_landing_pads(void)1441 sjlj_build_landing_pads (void)
1442 {
1443 int num_dispatch;
1444
1445 num_dispatch = vec_safe_length (cfun->eh->lp_array);
1446 if (num_dispatch == 0)
1447 return;
1448 sjlj_lp_call_site_index.safe_grow_cleared (num_dispatch);
1449
1450 num_dispatch = sjlj_assign_call_site_values ();
1451 if (num_dispatch > 0)
1452 {
1453 rtx_code_label *dispatch_label = gen_label_rtx ();
1454 int align = STACK_SLOT_ALIGNMENT (sjlj_fc_type_node,
1455 TYPE_MODE (sjlj_fc_type_node),
1456 TYPE_ALIGN (sjlj_fc_type_node));
1457 crtl->eh.sjlj_fc
1458 = assign_stack_local (TYPE_MODE (sjlj_fc_type_node),
1459 int_size_in_bytes (sjlj_fc_type_node),
1460 align);
1461
1462 sjlj_mark_call_sites ();
1463 sjlj_emit_function_enter (dispatch_label);
1464 sjlj_emit_dispatch_table (dispatch_label, num_dispatch);
1465 sjlj_emit_function_exit ();
1466 }
1467
1468 /* If we do not have any landing pads, we may still need to register a
1469 personality routine and (empty) LSDA to handle must-not-throw regions. */
1470 else if (function_needs_eh_personality (cfun) != eh_personality_none)
1471 {
1472 int align = STACK_SLOT_ALIGNMENT (sjlj_fc_type_node,
1473 TYPE_MODE (sjlj_fc_type_node),
1474 TYPE_ALIGN (sjlj_fc_type_node));
1475 crtl->eh.sjlj_fc
1476 = assign_stack_local (TYPE_MODE (sjlj_fc_type_node),
1477 int_size_in_bytes (sjlj_fc_type_node),
1478 align);
1479
1480 sjlj_mark_call_sites ();
1481 sjlj_emit_function_enter (NULL);
1482 sjlj_emit_function_exit ();
1483 }
1484
1485 sjlj_lp_call_site_index.release ();
1486 }
1487
1488 /* Update the sjlj function context. This function should be called
1489 whenever we allocate or deallocate dynamic stack space. */
1490
1491 void
update_sjlj_context(void)1492 update_sjlj_context (void)
1493 {
1494 if (!flag_exceptions)
1495 return;
1496
1497 emit_note (NOTE_INSN_UPDATE_SJLJ_CONTEXT);
1498 }
1499
1500 /* After initial rtl generation, call back to finish generating
1501 exception support code. */
1502
1503 void
finish_eh_generation(void)1504 finish_eh_generation (void)
1505 {
1506 basic_block bb;
1507
1508 /* Construct the landing pads. */
1509 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
1510 sjlj_build_landing_pads ();
1511 else
1512 dw2_build_landing_pads ();
1513 break_superblocks ();
1514
1515 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ
1516 /* Kludge for Alpha (see alpha_gp_save_rtx). */
1517 || single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->insns.r)
1518 commit_edge_insertions ();
1519
1520 /* Redirect all EH edges from the post_landing_pad to the landing pad. */
1521 FOR_EACH_BB_FN (bb, cfun)
1522 {
1523 eh_landing_pad lp;
1524 edge_iterator ei;
1525 edge e;
1526
1527 lp = get_eh_landing_pad_from_rtx (BB_END (bb));
1528
1529 FOR_EACH_EDGE (e, ei, bb->succs)
1530 if (e->flags & EDGE_EH)
1531 break;
1532
1533 /* We should not have generated any new throwing insns during this
1534 pass, and we should not have lost any EH edges, so we only need
1535 to handle two cases here:
1536 (1) reachable handler and an existing edge to post-landing-pad,
1537 (2) no reachable handler and no edge. */
1538 gcc_assert ((lp != NULL) == (e != NULL));
1539 if (lp != NULL)
1540 {
1541 gcc_assert (BB_HEAD (e->dest) == label_rtx (lp->post_landing_pad));
1542
1543 redirect_edge_succ (e, BLOCK_FOR_INSN (lp->landing_pad));
1544 e->flags |= (CALL_P (BB_END (bb))
1545 ? EDGE_ABNORMAL | EDGE_ABNORMAL_CALL
1546 : EDGE_ABNORMAL);
1547 }
1548 }
1549 }
1550
1551 /* This section handles removing dead code for flow. */
1552
1553 void
remove_eh_landing_pad(eh_landing_pad lp)1554 remove_eh_landing_pad (eh_landing_pad lp)
1555 {
1556 eh_landing_pad *pp;
1557
1558 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
1559 continue;
1560 *pp = lp->next_lp;
1561
1562 if (lp->post_landing_pad)
1563 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
1564 (*cfun->eh->lp_array)[lp->index] = NULL;
1565 }
1566
1567 /* Splice the EH region at PP from the region tree. */
1568
1569 static void
remove_eh_handler_splicer(eh_region * pp)1570 remove_eh_handler_splicer (eh_region *pp)
1571 {
1572 eh_region region = *pp;
1573 eh_landing_pad lp;
1574
1575 for (lp = region->landing_pads; lp ; lp = lp->next_lp)
1576 {
1577 if (lp->post_landing_pad)
1578 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
1579 (*cfun->eh->lp_array)[lp->index] = NULL;
1580 }
1581
1582 if (region->inner)
1583 {
1584 eh_region p, outer;
1585 outer = region->outer;
1586
1587 *pp = p = region->inner;
1588 do
1589 {
1590 p->outer = outer;
1591 pp = &p->next_peer;
1592 p = *pp;
1593 }
1594 while (p);
1595 }
1596 *pp = region->next_peer;
1597
1598 (*cfun->eh->region_array)[region->index] = NULL;
1599 }
1600
1601 /* Splice a single EH region REGION from the region tree.
1602
1603 To unlink REGION, we need to find the pointer to it with a relatively
1604 expensive search in REGION's outer region. If you are going to
1605 remove a number of handlers, using remove_unreachable_eh_regions may
1606 be a better option. */
1607
1608 void
remove_eh_handler(eh_region region)1609 remove_eh_handler (eh_region region)
1610 {
1611 eh_region *pp, *pp_start, p, outer;
1612
1613 outer = region->outer;
1614 if (outer)
1615 pp_start = &outer->inner;
1616 else
1617 pp_start = &cfun->eh->region_tree;
1618 for (pp = pp_start, p = *pp; p != region; pp = &p->next_peer, p = *pp)
1619 continue;
1620
1621 remove_eh_handler_splicer (pp);
1622 }
1623
1624 /* Worker for remove_unreachable_eh_regions.
1625 PP is a pointer to the region to start a region tree depth-first
1626 search from. R_REACHABLE is the set of regions that have to be
1627 preserved. */
1628
1629 static void
remove_unreachable_eh_regions_worker(eh_region * pp,sbitmap r_reachable)1630 remove_unreachable_eh_regions_worker (eh_region *pp, sbitmap r_reachable)
1631 {
1632 while (*pp)
1633 {
1634 eh_region region = *pp;
1635 remove_unreachable_eh_regions_worker (®ion->inner, r_reachable);
1636 if (!bitmap_bit_p (r_reachable, region->index))
1637 remove_eh_handler_splicer (pp);
1638 else
1639 pp = ®ion->next_peer;
1640 }
1641 }
1642
1643 /* Splice all EH regions *not* marked in R_REACHABLE from the region tree.
1644 Do this by traversing the EH tree top-down and splice out regions that
1645 are not marked. By removing regions from the leaves, we avoid costly
1646 searches in the region tree. */
1647
1648 void
remove_unreachable_eh_regions(sbitmap r_reachable)1649 remove_unreachable_eh_regions (sbitmap r_reachable)
1650 {
1651 remove_unreachable_eh_regions_worker (&cfun->eh->region_tree, r_reachable);
1652 }
1653
1654 /* Invokes CALLBACK for every exception handler landing pad label.
1655 Only used by reload hackery; should not be used by new code. */
1656
1657 void
for_each_eh_label(void (* callback)(rtx))1658 for_each_eh_label (void (*callback) (rtx))
1659 {
1660 eh_landing_pad lp;
1661 int i;
1662
1663 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
1664 {
1665 if (lp)
1666 {
1667 rtx_code_label *lab = lp->landing_pad;
1668 if (lab && LABEL_P (lab))
1669 (*callback) (lab);
1670 }
1671 }
1672 }
1673
1674 /* Create the REG_EH_REGION note for INSN, given its ECF_FLAGS for a
1675 call insn.
1676
1677 At the gimple level, we use LP_NR
1678 > 0 : The statement transfers to landing pad LP_NR
1679 = 0 : The statement is outside any EH region
1680 < 0 : The statement is within MUST_NOT_THROW region -LP_NR.
1681
1682 At the rtl level, we use LP_NR
1683 > 0 : The insn transfers to landing pad LP_NR
1684 = 0 : The insn cannot throw
1685 < 0 : The insn is within MUST_NOT_THROW region -LP_NR
1686 = INT_MIN : The insn cannot throw or execute a nonlocal-goto.
1687 missing note: The insn is outside any EH region.
1688
1689 ??? This difference probably ought to be avoided. We could stand
1690 to record nothrow for arbitrary gimple statements, and so avoid
1691 some moderately complex lookups in stmt_could_throw_p. Perhaps
1692 NOTHROW should be mapped on both sides to INT_MIN. Perhaps the
1693 no-nonlocal-goto property should be recorded elsewhere as a bit
1694 on the call_insn directly. Perhaps we should make more use of
1695 attaching the trees to call_insns (reachable via symbol_ref in
1696 direct call cases) and just pull the data out of the trees. */
1697
1698 void
make_reg_eh_region_note(rtx_insn * insn,int ecf_flags,int lp_nr)1699 make_reg_eh_region_note (rtx_insn *insn, int ecf_flags, int lp_nr)
1700 {
1701 rtx value;
1702 if (ecf_flags & ECF_NOTHROW)
1703 value = const0_rtx;
1704 else if (lp_nr != 0)
1705 value = GEN_INT (lp_nr);
1706 else
1707 return;
1708 add_reg_note (insn, REG_EH_REGION, value);
1709 }
1710
1711 /* Create a REG_EH_REGION note for a CALL_INSN that cannot throw
1712 nor perform a non-local goto. Replace the region note if it
1713 already exists. */
1714
1715 void
make_reg_eh_region_note_nothrow_nononlocal(rtx_insn * insn)1716 make_reg_eh_region_note_nothrow_nononlocal (rtx_insn *insn)
1717 {
1718 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1719 rtx intmin = GEN_INT (INT_MIN);
1720
1721 if (note != 0)
1722 XEXP (note, 0) = intmin;
1723 else
1724 add_reg_note (insn, REG_EH_REGION, intmin);
1725 }
1726
1727 /* Return true if INSN could throw, assuming no REG_EH_REGION note
1728 to the contrary. */
1729
1730 bool
insn_could_throw_p(const_rtx insn)1731 insn_could_throw_p (const_rtx insn)
1732 {
1733 if (!flag_exceptions)
1734 return false;
1735 if (CALL_P (insn))
1736 return true;
1737 if (INSN_P (insn) && cfun->can_throw_non_call_exceptions)
1738 return may_trap_p (PATTERN (insn));
1739 return false;
1740 }
1741
1742 /* Copy an REG_EH_REGION note to each insn that might throw beginning
1743 at FIRST and ending at LAST. NOTE_OR_INSN is either the source insn
1744 to look for a note, or the note itself. */
1745
1746 void
copy_reg_eh_region_note_forward(rtx note_or_insn,rtx_insn * first,rtx last)1747 copy_reg_eh_region_note_forward (rtx note_or_insn, rtx_insn *first, rtx last)
1748 {
1749 rtx_insn *insn;
1750 rtx note = note_or_insn;
1751
1752 if (INSN_P (note_or_insn))
1753 {
1754 note = find_reg_note (note_or_insn, REG_EH_REGION, NULL_RTX);
1755 if (note == NULL)
1756 return;
1757 }
1758 else if (is_a <rtx_insn *> (note_or_insn))
1759 return;
1760 note = XEXP (note, 0);
1761
1762 for (insn = first; insn != last ; insn = NEXT_INSN (insn))
1763 if (!find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1764 && insn_could_throw_p (insn))
1765 add_reg_note (insn, REG_EH_REGION, note);
1766 }
1767
1768 /* Likewise, but iterate backward. */
1769
1770 void
copy_reg_eh_region_note_backward(rtx note_or_insn,rtx_insn * last,rtx first)1771 copy_reg_eh_region_note_backward (rtx note_or_insn, rtx_insn *last, rtx first)
1772 {
1773 rtx_insn *insn;
1774 rtx note = note_or_insn;
1775
1776 if (INSN_P (note_or_insn))
1777 {
1778 note = find_reg_note (note_or_insn, REG_EH_REGION, NULL_RTX);
1779 if (note == NULL)
1780 return;
1781 }
1782 else if (is_a <rtx_insn *> (note_or_insn))
1783 return;
1784 note = XEXP (note, 0);
1785
1786 for (insn = last; insn != first; insn = PREV_INSN (insn))
1787 if (insn_could_throw_p (insn))
1788 add_reg_note (insn, REG_EH_REGION, note);
1789 }
1790
1791
1792 /* Extract all EH information from INSN. Return true if the insn
1793 was marked NOTHROW. */
1794
1795 static bool
get_eh_region_and_lp_from_rtx(const_rtx insn,eh_region * pr,eh_landing_pad * plp)1796 get_eh_region_and_lp_from_rtx (const_rtx insn, eh_region *pr,
1797 eh_landing_pad *plp)
1798 {
1799 eh_landing_pad lp = NULL;
1800 eh_region r = NULL;
1801 bool ret = false;
1802 rtx note;
1803 int lp_nr;
1804
1805 if (! INSN_P (insn))
1806 goto egress;
1807
1808 if (NONJUMP_INSN_P (insn)
1809 && GET_CODE (PATTERN (insn)) == SEQUENCE)
1810 insn = XVECEXP (PATTERN (insn), 0, 0);
1811
1812 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1813 if (!note)
1814 {
1815 ret = !insn_could_throw_p (insn);
1816 goto egress;
1817 }
1818
1819 lp_nr = INTVAL (XEXP (note, 0));
1820 if (lp_nr == 0 || lp_nr == INT_MIN)
1821 {
1822 ret = true;
1823 goto egress;
1824 }
1825
1826 if (lp_nr < 0)
1827 r = (*cfun->eh->region_array)[-lp_nr];
1828 else
1829 {
1830 lp = (*cfun->eh->lp_array)[lp_nr];
1831 r = lp->region;
1832 }
1833
1834 egress:
1835 *plp = lp;
1836 *pr = r;
1837 return ret;
1838 }
1839
1840 /* Return the landing pad to which INSN may go, or NULL if it does not
1841 have a reachable landing pad within this function. */
1842
1843 eh_landing_pad
get_eh_landing_pad_from_rtx(const_rtx insn)1844 get_eh_landing_pad_from_rtx (const_rtx insn)
1845 {
1846 eh_landing_pad lp;
1847 eh_region r;
1848
1849 get_eh_region_and_lp_from_rtx (insn, &r, &lp);
1850 return lp;
1851 }
1852
1853 /* Return the region to which INSN may go, or NULL if it does not
1854 have a reachable region within this function. */
1855
1856 eh_region
get_eh_region_from_rtx(const_rtx insn)1857 get_eh_region_from_rtx (const_rtx insn)
1858 {
1859 eh_landing_pad lp;
1860 eh_region r;
1861
1862 get_eh_region_and_lp_from_rtx (insn, &r, &lp);
1863 return r;
1864 }
1865
1866 /* Return true if INSN throws and is caught by something in this function. */
1867
1868 bool
can_throw_internal(const_rtx insn)1869 can_throw_internal (const_rtx insn)
1870 {
1871 return get_eh_landing_pad_from_rtx (insn) != NULL;
1872 }
1873
1874 /* Return true if INSN throws and escapes from the current function. */
1875
1876 bool
can_throw_external(const_rtx insn)1877 can_throw_external (const_rtx insn)
1878 {
1879 eh_landing_pad lp;
1880 eh_region r;
1881 bool nothrow;
1882
1883 if (! INSN_P (insn))
1884 return false;
1885
1886 if (NONJUMP_INSN_P (insn)
1887 && GET_CODE (PATTERN (insn)) == SEQUENCE)
1888 {
1889 rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn));
1890 int i, n = seq->len ();
1891
1892 for (i = 0; i < n; i++)
1893 if (can_throw_external (seq->element (i)))
1894 return true;
1895
1896 return false;
1897 }
1898
1899 nothrow = get_eh_region_and_lp_from_rtx (insn, &r, &lp);
1900
1901 /* If we can't throw, we obviously can't throw external. */
1902 if (nothrow)
1903 return false;
1904
1905 /* If we have an internal landing pad, then we're not external. */
1906 if (lp != NULL)
1907 return false;
1908
1909 /* If we're not within an EH region, then we are external. */
1910 if (r == NULL)
1911 return true;
1912
1913 /* The only thing that ought to be left is MUST_NOT_THROW regions,
1914 which don't always have landing pads. */
1915 gcc_assert (r->type == ERT_MUST_NOT_THROW);
1916 return false;
1917 }
1918
1919 /* Return true if INSN cannot throw at all. */
1920
1921 bool
insn_nothrow_p(const_rtx insn)1922 insn_nothrow_p (const_rtx insn)
1923 {
1924 eh_landing_pad lp;
1925 eh_region r;
1926
1927 if (! INSN_P (insn))
1928 return true;
1929
1930 if (NONJUMP_INSN_P (insn)
1931 && GET_CODE (PATTERN (insn)) == SEQUENCE)
1932 {
1933 rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn));
1934 int i, n = seq->len ();
1935
1936 for (i = 0; i < n; i++)
1937 if (!insn_nothrow_p (seq->element (i)))
1938 return false;
1939
1940 return true;
1941 }
1942
1943 return get_eh_region_and_lp_from_rtx (insn, &r, &lp);
1944 }
1945
1946 /* Return true if INSN can perform a non-local goto. */
1947 /* ??? This test is here in this file because it (ab)uses REG_EH_REGION. */
1948
1949 bool
can_nonlocal_goto(const rtx_insn * insn)1950 can_nonlocal_goto (const rtx_insn *insn)
1951 {
1952 if (nonlocal_goto_handler_labels && CALL_P (insn))
1953 {
1954 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1955 if (!note || INTVAL (XEXP (note, 0)) != INT_MIN)
1956 return true;
1957 }
1958 return false;
1959 }
1960
1961 /* Set TREE_NOTHROW and crtl->all_throwers_are_sibcalls. */
1962
1963 static unsigned int
set_nothrow_function_flags(void)1964 set_nothrow_function_flags (void)
1965 {
1966 rtx_insn *insn;
1967
1968 crtl->nothrow = 1;
1969
1970 /* Assume crtl->all_throwers_are_sibcalls until we encounter
1971 something that can throw an exception. We specifically exempt
1972 CALL_INSNs that are SIBLING_CALL_P, as these are really jumps,
1973 and can't throw. Most CALL_INSNs are not SIBLING_CALL_P, so this
1974 is optimistic. */
1975
1976 crtl->all_throwers_are_sibcalls = 1;
1977
1978 /* If we don't know that this implementation of the function will
1979 actually be used, then we must not set TREE_NOTHROW, since
1980 callers must not assume that this function does not throw. */
1981 if (TREE_NOTHROW (current_function_decl))
1982 return 0;
1983
1984 if (! flag_exceptions)
1985 return 0;
1986
1987 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1988 if (can_throw_external (insn))
1989 {
1990 crtl->nothrow = 0;
1991
1992 if (!CALL_P (insn) || !SIBLING_CALL_P (insn))
1993 {
1994 crtl->all_throwers_are_sibcalls = 0;
1995 return 0;
1996 }
1997 }
1998
1999 if (crtl->nothrow
2000 && (cgraph_node::get (current_function_decl)->get_availability ()
2001 >= AVAIL_AVAILABLE))
2002 {
2003 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2004 struct cgraph_edge *e;
2005 for (e = node->callers; e; e = e->next_caller)
2006 e->can_throw_external = false;
2007 node->set_nothrow_flag (true);
2008
2009 if (dump_file)
2010 fprintf (dump_file, "Marking function nothrow: %s\n\n",
2011 current_function_name ());
2012 }
2013 return 0;
2014 }
2015
2016 namespace {
2017
2018 const pass_data pass_data_set_nothrow_function_flags =
2019 {
2020 RTL_PASS, /* type */
2021 "nothrow", /* name */
2022 OPTGROUP_NONE, /* optinfo_flags */
2023 TV_NONE, /* tv_id */
2024 0, /* properties_required */
2025 0, /* properties_provided */
2026 0, /* properties_destroyed */
2027 0, /* todo_flags_start */
2028 0, /* todo_flags_finish */
2029 };
2030
2031 class pass_set_nothrow_function_flags : public rtl_opt_pass
2032 {
2033 public:
pass_set_nothrow_function_flags(gcc::context * ctxt)2034 pass_set_nothrow_function_flags (gcc::context *ctxt)
2035 : rtl_opt_pass (pass_data_set_nothrow_function_flags, ctxt)
2036 {}
2037
2038 /* opt_pass methods: */
execute(function *)2039 virtual unsigned int execute (function *)
2040 {
2041 return set_nothrow_function_flags ();
2042 }
2043
2044 }; // class pass_set_nothrow_function_flags
2045
2046 } // anon namespace
2047
2048 rtl_opt_pass *
make_pass_set_nothrow_function_flags(gcc::context * ctxt)2049 make_pass_set_nothrow_function_flags (gcc::context *ctxt)
2050 {
2051 return new pass_set_nothrow_function_flags (ctxt);
2052 }
2053
2054
2055 /* Various hooks for unwind library. */
2056
2057 /* Expand the EH support builtin functions:
2058 __builtin_eh_pointer and __builtin_eh_filter. */
2059
2060 static eh_region
expand_builtin_eh_common(tree region_nr_t)2061 expand_builtin_eh_common (tree region_nr_t)
2062 {
2063 HOST_WIDE_INT region_nr;
2064 eh_region region;
2065
2066 gcc_assert (tree_fits_shwi_p (region_nr_t));
2067 region_nr = tree_to_shwi (region_nr_t);
2068
2069 region = (*cfun->eh->region_array)[region_nr];
2070
2071 /* ??? We shouldn't have been able to delete a eh region without
2072 deleting all the code that depended on it. */
2073 gcc_assert (region != NULL);
2074
2075 return region;
2076 }
2077
2078 /* Expand to the exc_ptr value from the given eh region. */
2079
2080 rtx
expand_builtin_eh_pointer(tree exp)2081 expand_builtin_eh_pointer (tree exp)
2082 {
2083 eh_region region
2084 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0));
2085 if (region->exc_ptr_reg == NULL)
2086 region->exc_ptr_reg = gen_reg_rtx (ptr_mode);
2087 return region->exc_ptr_reg;
2088 }
2089
2090 /* Expand to the filter value from the given eh region. */
2091
2092 rtx
expand_builtin_eh_filter(tree exp)2093 expand_builtin_eh_filter (tree exp)
2094 {
2095 eh_region region
2096 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0));
2097 if (region->filter_reg == NULL)
2098 region->filter_reg = gen_reg_rtx (targetm.eh_return_filter_mode ());
2099 return region->filter_reg;
2100 }
2101
2102 /* Copy the exc_ptr and filter values from one landing pad's registers
2103 to another. This is used to inline the resx statement. */
2104
2105 rtx
expand_builtin_eh_copy_values(tree exp)2106 expand_builtin_eh_copy_values (tree exp)
2107 {
2108 eh_region dst
2109 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0));
2110 eh_region src
2111 = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 1));
2112 scalar_int_mode fmode = targetm.eh_return_filter_mode ();
2113
2114 if (dst->exc_ptr_reg == NULL)
2115 dst->exc_ptr_reg = gen_reg_rtx (ptr_mode);
2116 if (src->exc_ptr_reg == NULL)
2117 src->exc_ptr_reg = gen_reg_rtx (ptr_mode);
2118
2119 if (dst->filter_reg == NULL)
2120 dst->filter_reg = gen_reg_rtx (fmode);
2121 if (src->filter_reg == NULL)
2122 src->filter_reg = gen_reg_rtx (fmode);
2123
2124 emit_move_insn (dst->exc_ptr_reg, src->exc_ptr_reg);
2125 emit_move_insn (dst->filter_reg, src->filter_reg);
2126
2127 return const0_rtx;
2128 }
2129
2130 /* Do any necessary initialization to access arbitrary stack frames.
2131 On the SPARC, this means flushing the register windows. */
2132
2133 void
expand_builtin_unwind_init(void)2134 expand_builtin_unwind_init (void)
2135 {
2136 /* Set this so all the registers get saved in our frame; we need to be
2137 able to copy the saved values for any registers from frames we unwind. */
2138 crtl->saves_all_registers = 1;
2139
2140 SETUP_FRAME_ADDRESSES ();
2141 }
2142
2143 /* Map a non-negative number to an eh return data register number; expands
2144 to -1 if no return data register is associated with the input number.
2145 At least the inputs 0 and 1 must be mapped; the target may provide more. */
2146
2147 rtx
expand_builtin_eh_return_data_regno(tree exp)2148 expand_builtin_eh_return_data_regno (tree exp)
2149 {
2150 tree which = CALL_EXPR_ARG (exp, 0);
2151 unsigned HOST_WIDE_INT iwhich;
2152
2153 if (TREE_CODE (which) != INTEGER_CST)
2154 {
2155 error ("argument of %<__builtin_eh_return_regno%> must be constant");
2156 return constm1_rtx;
2157 }
2158
2159 iwhich = tree_to_uhwi (which);
2160 iwhich = EH_RETURN_DATA_REGNO (iwhich);
2161 if (iwhich == INVALID_REGNUM)
2162 return constm1_rtx;
2163
2164 #ifdef DWARF_FRAME_REGNUM
2165 iwhich = DWARF_FRAME_REGNUM (iwhich);
2166 #else
2167 iwhich = DBX_REGISTER_NUMBER (iwhich);
2168 #endif
2169
2170 return GEN_INT (iwhich);
2171 }
2172
2173 /* Given a value extracted from the return address register or stack slot,
2174 return the actual address encoded in that value. */
2175
2176 rtx
expand_builtin_extract_return_addr(tree addr_tree)2177 expand_builtin_extract_return_addr (tree addr_tree)
2178 {
2179 rtx addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
2180
2181 if (GET_MODE (addr) != Pmode
2182 && GET_MODE (addr) != VOIDmode)
2183 {
2184 #ifdef POINTERS_EXTEND_UNSIGNED
2185 addr = convert_memory_address (Pmode, addr);
2186 #else
2187 addr = convert_to_mode (Pmode, addr, 0);
2188 #endif
2189 }
2190
2191 /* First mask out any unwanted bits. */
2192 rtx mask = MASK_RETURN_ADDR;
2193 if (mask)
2194 expand_and (Pmode, addr, mask, addr);
2195
2196 /* Then adjust to find the real return address. */
2197 if (RETURN_ADDR_OFFSET)
2198 addr = plus_constant (Pmode, addr, RETURN_ADDR_OFFSET);
2199
2200 return addr;
2201 }
2202
2203 /* Given an actual address in addr_tree, do any necessary encoding
2204 and return the value to be stored in the return address register or
2205 stack slot so the epilogue will return to that address. */
2206
2207 rtx
expand_builtin_frob_return_addr(tree addr_tree)2208 expand_builtin_frob_return_addr (tree addr_tree)
2209 {
2210 rtx addr = expand_expr (addr_tree, NULL_RTX, ptr_mode, EXPAND_NORMAL);
2211
2212 addr = convert_memory_address (Pmode, addr);
2213
2214 if (RETURN_ADDR_OFFSET)
2215 {
2216 addr = force_reg (Pmode, addr);
2217 addr = plus_constant (Pmode, addr, -RETURN_ADDR_OFFSET);
2218 }
2219
2220 return addr;
2221 }
2222
2223 /* Set up the epilogue with the magic bits we'll need to return to the
2224 exception handler. */
2225
2226 void
expand_builtin_eh_return(tree stackadj_tree ATTRIBUTE_UNUSED,tree handler_tree)2227 expand_builtin_eh_return (tree stackadj_tree ATTRIBUTE_UNUSED,
2228 tree handler_tree)
2229 {
2230 rtx tmp;
2231
2232 #ifdef EH_RETURN_STACKADJ_RTX
2233 tmp = expand_expr (stackadj_tree, crtl->eh.ehr_stackadj,
2234 VOIDmode, EXPAND_NORMAL);
2235 tmp = convert_memory_address (Pmode, tmp);
2236 if (!crtl->eh.ehr_stackadj)
2237 crtl->eh.ehr_stackadj = copy_addr_to_reg (tmp);
2238 else if (tmp != crtl->eh.ehr_stackadj)
2239 emit_move_insn (crtl->eh.ehr_stackadj, tmp);
2240 #endif
2241
2242 tmp = expand_expr (handler_tree, crtl->eh.ehr_handler,
2243 VOIDmode, EXPAND_NORMAL);
2244 tmp = convert_memory_address (Pmode, tmp);
2245 if (!crtl->eh.ehr_handler)
2246 crtl->eh.ehr_handler = copy_addr_to_reg (tmp);
2247 else if (tmp != crtl->eh.ehr_handler)
2248 emit_move_insn (crtl->eh.ehr_handler, tmp);
2249
2250 if (!crtl->eh.ehr_label)
2251 crtl->eh.ehr_label = gen_label_rtx ();
2252 emit_jump (crtl->eh.ehr_label);
2253 }
2254
2255 /* Expand __builtin_eh_return. This exit path from the function loads up
2256 the eh return data registers, adjusts the stack, and branches to a
2257 given PC other than the normal return address. */
2258
2259 void
expand_eh_return(void)2260 expand_eh_return (void)
2261 {
2262 rtx_code_label *around_label;
2263
2264 if (! crtl->eh.ehr_label)
2265 return;
2266
2267 crtl->calls_eh_return = 1;
2268
2269 #ifdef EH_RETURN_STACKADJ_RTX
2270 emit_move_insn (EH_RETURN_STACKADJ_RTX, const0_rtx);
2271 #endif
2272
2273 around_label = gen_label_rtx ();
2274 emit_jump (around_label);
2275
2276 emit_label (crtl->eh.ehr_label);
2277 clobber_return_register ();
2278
2279 #ifdef EH_RETURN_STACKADJ_RTX
2280 emit_move_insn (EH_RETURN_STACKADJ_RTX, crtl->eh.ehr_stackadj);
2281 #endif
2282
2283 if (targetm.have_eh_return ())
2284 emit_insn (targetm.gen_eh_return (crtl->eh.ehr_handler));
2285 else
2286 {
2287 if (rtx handler = EH_RETURN_HANDLER_RTX)
2288 emit_move_insn (handler, crtl->eh.ehr_handler);
2289 else
2290 error ("__builtin_eh_return not supported on this target");
2291 }
2292
2293 emit_label (around_label);
2294 }
2295
2296 /* Convert a ptr_mode address ADDR_TREE to a Pmode address controlled by
2297 POINTERS_EXTEND_UNSIGNED and return it. */
2298
2299 rtx
expand_builtin_extend_pointer(tree addr_tree)2300 expand_builtin_extend_pointer (tree addr_tree)
2301 {
2302 rtx addr = expand_expr (addr_tree, NULL_RTX, ptr_mode, EXPAND_NORMAL);
2303 int extend;
2304
2305 #ifdef POINTERS_EXTEND_UNSIGNED
2306 extend = POINTERS_EXTEND_UNSIGNED;
2307 #else
2308 /* The previous EH code did an unsigned extend by default, so we do this also
2309 for consistency. */
2310 extend = 1;
2311 #endif
2312
2313 return convert_modes (targetm.unwind_word_mode (), ptr_mode, addr, extend);
2314 }
2315
2316 static int
add_action_record(action_hash_type * ar_hash,int filter,int next)2317 add_action_record (action_hash_type *ar_hash, int filter, int next)
2318 {
2319 struct action_record **slot, *new_ar, tmp;
2320
2321 tmp.filter = filter;
2322 tmp.next = next;
2323 slot = ar_hash->find_slot (&tmp, INSERT);
2324
2325 if ((new_ar = *slot) == NULL)
2326 {
2327 new_ar = XNEW (struct action_record);
2328 new_ar->offset = crtl->eh.action_record_data->length () + 1;
2329 new_ar->filter = filter;
2330 new_ar->next = next;
2331 *slot = new_ar;
2332
2333 /* The filter value goes in untouched. The link to the next
2334 record is a "self-relative" byte offset, or zero to indicate
2335 that there is no next record. So convert the absolute 1 based
2336 indices we've been carrying around into a displacement. */
2337
2338 push_sleb128 (&crtl->eh.action_record_data, filter);
2339 if (next)
2340 next -= crtl->eh.action_record_data->length () + 1;
2341 push_sleb128 (&crtl->eh.action_record_data, next);
2342 }
2343
2344 return new_ar->offset;
2345 }
2346
2347 static int
collect_one_action_chain(action_hash_type * ar_hash,eh_region region)2348 collect_one_action_chain (action_hash_type *ar_hash, eh_region region)
2349 {
2350 int next;
2351
2352 /* If we've reached the top of the region chain, then we have
2353 no actions, and require no landing pad. */
2354 if (region == NULL)
2355 return -1;
2356
2357 switch (region->type)
2358 {
2359 case ERT_CLEANUP:
2360 {
2361 eh_region r;
2362 /* A cleanup adds a zero filter to the beginning of the chain, but
2363 there are special cases to look out for. If there are *only*
2364 cleanups along a path, then it compresses to a zero action.
2365 Further, if there are multiple cleanups along a path, we only
2366 need to represent one of them, as that is enough to trigger
2367 entry to the landing pad at runtime. */
2368 next = collect_one_action_chain (ar_hash, region->outer);
2369 if (next <= 0)
2370 return 0;
2371 for (r = region->outer; r ; r = r->outer)
2372 if (r->type == ERT_CLEANUP)
2373 return next;
2374 return add_action_record (ar_hash, 0, next);
2375 }
2376
2377 case ERT_TRY:
2378 {
2379 eh_catch c;
2380
2381 /* Process the associated catch regions in reverse order.
2382 If there's a catch-all handler, then we don't need to
2383 search outer regions. Use a magic -3 value to record
2384 that we haven't done the outer search. */
2385 next = -3;
2386 for (c = region->u.eh_try.last_catch; c ; c = c->prev_catch)
2387 {
2388 if (c->type_list == NULL)
2389 {
2390 /* Retrieve the filter from the head of the filter list
2391 where we have stored it (see assign_filter_values). */
2392 int filter = TREE_INT_CST_LOW (TREE_VALUE (c->filter_list));
2393 next = add_action_record (ar_hash, filter, 0);
2394 }
2395 else
2396 {
2397 /* Once the outer search is done, trigger an action record for
2398 each filter we have. */
2399 tree flt_node;
2400
2401 if (next == -3)
2402 {
2403 next = collect_one_action_chain (ar_hash, region->outer);
2404
2405 /* If there is no next action, terminate the chain. */
2406 if (next == -1)
2407 next = 0;
2408 /* If all outer actions are cleanups or must_not_throw,
2409 we'll have no action record for it, since we had wanted
2410 to encode these states in the call-site record directly.
2411 Add a cleanup action to the chain to catch these. */
2412 else if (next <= 0)
2413 next = add_action_record (ar_hash, 0, 0);
2414 }
2415
2416 flt_node = c->filter_list;
2417 for (; flt_node; flt_node = TREE_CHAIN (flt_node))
2418 {
2419 int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node));
2420 next = add_action_record (ar_hash, filter, next);
2421 }
2422 }
2423 }
2424 return next;
2425 }
2426
2427 case ERT_ALLOWED_EXCEPTIONS:
2428 /* An exception specification adds its filter to the
2429 beginning of the chain. */
2430 next = collect_one_action_chain (ar_hash, region->outer);
2431
2432 /* If there is no next action, terminate the chain. */
2433 if (next == -1)
2434 next = 0;
2435 /* If all outer actions are cleanups or must_not_throw,
2436 we'll have no action record for it, since we had wanted
2437 to encode these states in the call-site record directly.
2438 Add a cleanup action to the chain to catch these. */
2439 else if (next <= 0)
2440 next = add_action_record (ar_hash, 0, 0);
2441
2442 return add_action_record (ar_hash, region->u.allowed.filter, next);
2443
2444 case ERT_MUST_NOT_THROW:
2445 /* A must-not-throw region with no inner handlers or cleanups
2446 requires no call-site entry. Note that this differs from
2447 the no handler or cleanup case in that we do require an lsda
2448 to be generated. Return a magic -2 value to record this. */
2449 return -2;
2450 }
2451
2452 gcc_unreachable ();
2453 }
2454
2455 static int
add_call_site(rtx landing_pad,int action,int section)2456 add_call_site (rtx landing_pad, int action, int section)
2457 {
2458 call_site_record record;
2459
2460 record = ggc_alloc<call_site_record_d> ();
2461 record->landing_pad = landing_pad;
2462 record->action = action;
2463
2464 vec_safe_push (crtl->eh.call_site_record_v[section], record);
2465
2466 return call_site_base + crtl->eh.call_site_record_v[section]->length () - 1;
2467 }
2468
2469 static rtx_note *
emit_note_eh_region_end(rtx_insn * insn)2470 emit_note_eh_region_end (rtx_insn *insn)
2471 {
2472 return emit_note_after (NOTE_INSN_EH_REGION_END, insn);
2473 }
2474
2475 /* Add NOP after NOTE_INSN_SWITCH_TEXT_SECTIONS when the cold section starts
2476 with landing pad.
2477 With landing pad being at offset 0 from the start label of the section
2478 we would miss EH delivery because 0 is special and means no landing pad. */
2479
2480 static bool
maybe_add_nop_after_section_switch(void)2481 maybe_add_nop_after_section_switch (void)
2482 {
2483 if (!crtl->uses_eh_lsda
2484 || !crtl->eh.call_site_record_v[1])
2485 return false;
2486 int n = vec_safe_length (crtl->eh.call_site_record_v[1]);
2487 hash_set<rtx_insn *> visited;
2488
2489 for (int i = 0; i < n; ++i)
2490 {
2491 struct call_site_record_d *cs
2492 = (*crtl->eh.call_site_record_v[1])[i];
2493 if (cs->landing_pad)
2494 {
2495 rtx_insn *insn = as_a <rtx_insn *> (cs->landing_pad);
2496 while (true)
2497 {
2498 /* Landing pads have LABEL_PRESERVE_P flag set. This check make
2499 sure that we do not walk past landing pad visited earlier
2500 which would result in possible quadratic behaviour. */
2501 if (LABEL_P (insn) && LABEL_PRESERVE_P (insn)
2502 && visited.add (insn))
2503 break;
2504
2505 /* Conservatively assume that ASM insn may be empty. We have
2506 now way to tell what they contain. */
2507 if (active_insn_p (insn)
2508 && GET_CODE (PATTERN (insn)) != ASM_INPUT
2509 && GET_CODE (PATTERN (insn)) != ASM_OPERANDS)
2510 break;
2511
2512 /* If we reached the start of hot section, then NOP will be
2513 needed. */
2514 if (GET_CODE (insn) == NOTE
2515 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2516 {
2517 emit_insn_after (gen_nop (), insn);
2518 break;
2519 }
2520
2521 /* We visit only labels from cold section. We should never hit
2522 begining of the insn stream here. */
2523 insn = PREV_INSN (insn);
2524 }
2525 }
2526 }
2527 return false;
2528 }
2529
2530 /* Turn REG_EH_REGION notes back into NOTE_INSN_EH_REGION notes.
2531 The new note numbers will not refer to region numbers, but
2532 instead to call site entries. */
2533
2534 static unsigned int
convert_to_eh_region_ranges(void)2535 convert_to_eh_region_ranges (void)
2536 {
2537 rtx insn;
2538 rtx_insn *iter;
2539 rtx_note *note;
2540 action_hash_type ar_hash (31);
2541 int last_action = -3;
2542 rtx_insn *last_action_insn = NULL;
2543 rtx last_landing_pad = NULL_RTX;
2544 rtx_insn *first_no_action_insn = NULL;
2545 int call_site = 0;
2546 int cur_sec = 0;
2547 rtx_insn *section_switch_note = NULL;
2548 rtx_insn *first_no_action_insn_before_switch = NULL;
2549 rtx_insn *last_no_action_insn_before_switch = NULL;
2550 int saved_call_site_base = call_site_base;
2551
2552 vec_alloc (crtl->eh.action_record_data, 64);
2553
2554 for (iter = get_insns (); iter ; iter = NEXT_INSN (iter))
2555 if (INSN_P (iter))
2556 {
2557 eh_landing_pad lp;
2558 eh_region region;
2559 bool nothrow;
2560 int this_action;
2561 rtx_code_label *this_landing_pad;
2562
2563 insn = iter;
2564 if (NONJUMP_INSN_P (insn)
2565 && GET_CODE (PATTERN (insn)) == SEQUENCE)
2566 insn = XVECEXP (PATTERN (insn), 0, 0);
2567
2568 nothrow = get_eh_region_and_lp_from_rtx (insn, ®ion, &lp);
2569 if (nothrow)
2570 continue;
2571 if (region)
2572 this_action = collect_one_action_chain (&ar_hash, region);
2573 else
2574 this_action = -1;
2575
2576 /* Existence of catch handlers, or must-not-throw regions
2577 implies that an lsda is needed (even if empty). */
2578 if (this_action != -1)
2579 crtl->uses_eh_lsda = 1;
2580
2581 /* Delay creation of region notes for no-action regions
2582 until we're sure that an lsda will be required. */
2583 else if (last_action == -3)
2584 {
2585 first_no_action_insn = iter;
2586 last_action = -1;
2587 }
2588
2589 if (this_action >= 0)
2590 this_landing_pad = lp->landing_pad;
2591 else
2592 this_landing_pad = NULL;
2593
2594 /* Differing actions or landing pads implies a change in call-site
2595 info, which implies some EH_REGION note should be emitted. */
2596 if (last_action != this_action
2597 || last_landing_pad != this_landing_pad)
2598 {
2599 /* If there is a queued no-action region in the other section
2600 with hot/cold partitioning, emit it now. */
2601 if (first_no_action_insn_before_switch)
2602 {
2603 gcc_assert (this_action != -1
2604 && last_action == (first_no_action_insn
2605 ? -1 : -3));
2606 call_site = add_call_site (NULL_RTX, 0, 0);
2607 note = emit_note_before (NOTE_INSN_EH_REGION_BEG,
2608 first_no_action_insn_before_switch);
2609 NOTE_EH_HANDLER (note) = call_site;
2610 note
2611 = emit_note_eh_region_end (last_no_action_insn_before_switch);
2612 NOTE_EH_HANDLER (note) = call_site;
2613 gcc_assert (last_action != -3
2614 || (last_action_insn
2615 == last_no_action_insn_before_switch));
2616 first_no_action_insn_before_switch = NULL;
2617 last_no_action_insn_before_switch = NULL;
2618 call_site_base++;
2619 }
2620 /* If we'd not seen a previous action (-3) or the previous
2621 action was must-not-throw (-2), then we do not need an
2622 end note. */
2623 if (last_action >= -1)
2624 {
2625 /* If we delayed the creation of the begin, do it now. */
2626 if (first_no_action_insn)
2627 {
2628 call_site = add_call_site (NULL_RTX, 0, cur_sec);
2629 note = emit_note_before (NOTE_INSN_EH_REGION_BEG,
2630 first_no_action_insn);
2631 NOTE_EH_HANDLER (note) = call_site;
2632 first_no_action_insn = NULL;
2633 }
2634
2635 note = emit_note_eh_region_end (last_action_insn);
2636 NOTE_EH_HANDLER (note) = call_site;
2637 }
2638
2639 /* If the new action is must-not-throw, then no region notes
2640 are created. */
2641 if (this_action >= -1)
2642 {
2643 call_site = add_call_site (this_landing_pad,
2644 this_action < 0 ? 0 : this_action,
2645 cur_sec);
2646 note = emit_note_before (NOTE_INSN_EH_REGION_BEG, iter);
2647 NOTE_EH_HANDLER (note) = call_site;
2648 }
2649
2650 last_action = this_action;
2651 last_landing_pad = this_landing_pad;
2652 }
2653 last_action_insn = iter;
2654 }
2655 else if (NOTE_P (iter)
2656 && NOTE_KIND (iter) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2657 {
2658 gcc_assert (section_switch_note == NULL_RTX);
2659 gcc_assert (flag_reorder_blocks_and_partition);
2660 section_switch_note = iter;
2661 if (first_no_action_insn)
2662 {
2663 first_no_action_insn_before_switch = first_no_action_insn;
2664 last_no_action_insn_before_switch = last_action_insn;
2665 first_no_action_insn = NULL;
2666 gcc_assert (last_action == -1);
2667 last_action = -3;
2668 }
2669 /* Force closing of current EH region before section switch and
2670 opening a new one afterwards. */
2671 else if (last_action != -3)
2672 last_landing_pad = pc_rtx;
2673 if (crtl->eh.call_site_record_v[cur_sec])
2674 call_site_base += crtl->eh.call_site_record_v[cur_sec]->length ();
2675 cur_sec++;
2676 gcc_assert (crtl->eh.call_site_record_v[cur_sec] == NULL);
2677 vec_alloc (crtl->eh.call_site_record_v[cur_sec], 10);
2678 }
2679
2680 if (last_action >= -1 && ! first_no_action_insn)
2681 {
2682 note = emit_note_eh_region_end (last_action_insn);
2683 NOTE_EH_HANDLER (note) = call_site;
2684 }
2685
2686 call_site_base = saved_call_site_base;
2687
2688 return 0;
2689 }
2690
2691 namespace {
2692
2693 const pass_data pass_data_convert_to_eh_region_ranges =
2694 {
2695 RTL_PASS, /* type */
2696 "eh_ranges", /* name */
2697 OPTGROUP_NONE, /* optinfo_flags */
2698 TV_NONE, /* tv_id */
2699 0, /* properties_required */
2700 0, /* properties_provided */
2701 0, /* properties_destroyed */
2702 0, /* todo_flags_start */
2703 0, /* todo_flags_finish */
2704 };
2705
2706 class pass_convert_to_eh_region_ranges : public rtl_opt_pass
2707 {
2708 public:
pass_convert_to_eh_region_ranges(gcc::context * ctxt)2709 pass_convert_to_eh_region_ranges (gcc::context *ctxt)
2710 : rtl_opt_pass (pass_data_convert_to_eh_region_ranges, ctxt)
2711 {}
2712
2713 /* opt_pass methods: */
2714 virtual bool gate (function *);
execute(function *)2715 virtual unsigned int execute (function *)
2716 {
2717 int ret = convert_to_eh_region_ranges ();
2718 maybe_add_nop_after_section_switch ();
2719 return ret;
2720 }
2721
2722 }; // class pass_convert_to_eh_region_ranges
2723
2724 bool
gate(function *)2725 pass_convert_to_eh_region_ranges::gate (function *)
2726 {
2727 /* Nothing to do for SJLJ exceptions or if no regions created. */
2728 if (cfun->eh->region_tree == NULL)
2729 return false;
2730 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
2731 return false;
2732 return true;
2733 }
2734
2735 } // anon namespace
2736
2737 rtl_opt_pass *
make_pass_convert_to_eh_region_ranges(gcc::context * ctxt)2738 make_pass_convert_to_eh_region_ranges (gcc::context *ctxt)
2739 {
2740 return new pass_convert_to_eh_region_ranges (ctxt);
2741 }
2742
2743 static void
push_uleb128(vec<uchar,va_gc> ** data_area,unsigned int value)2744 push_uleb128 (vec<uchar, va_gc> **data_area, unsigned int value)
2745 {
2746 do
2747 {
2748 unsigned char byte = value & 0x7f;
2749 value >>= 7;
2750 if (value)
2751 byte |= 0x80;
2752 vec_safe_push (*data_area, byte);
2753 }
2754 while (value);
2755 }
2756
2757 static void
push_sleb128(vec<uchar,va_gc> ** data_area,int value)2758 push_sleb128 (vec<uchar, va_gc> **data_area, int value)
2759 {
2760 unsigned char byte;
2761 int more;
2762
2763 do
2764 {
2765 byte = value & 0x7f;
2766 value >>= 7;
2767 more = ! ((value == 0 && (byte & 0x40) == 0)
2768 || (value == -1 && (byte & 0x40) != 0));
2769 if (more)
2770 byte |= 0x80;
2771 vec_safe_push (*data_area, byte);
2772 }
2773 while (more);
2774 }
2775
2776
2777 static int
dw2_size_of_call_site_table(int section)2778 dw2_size_of_call_site_table (int section)
2779 {
2780 int n = vec_safe_length (crtl->eh.call_site_record_v[section]);
2781 int size = n * (4 + 4 + 4);
2782 int i;
2783
2784 for (i = 0; i < n; ++i)
2785 {
2786 struct call_site_record_d *cs =
2787 (*crtl->eh.call_site_record_v[section])[i];
2788 size += size_of_uleb128 (cs->action);
2789 }
2790
2791 return size;
2792 }
2793
2794 static int
sjlj_size_of_call_site_table(void)2795 sjlj_size_of_call_site_table (void)
2796 {
2797 int n = vec_safe_length (crtl->eh.call_site_record_v[0]);
2798 int size = 0;
2799 int i;
2800
2801 for (i = 0; i < n; ++i)
2802 {
2803 struct call_site_record_d *cs =
2804 (*crtl->eh.call_site_record_v[0])[i];
2805 size += size_of_uleb128 (INTVAL (cs->landing_pad));
2806 size += size_of_uleb128 (cs->action);
2807 }
2808
2809 return size;
2810 }
2811
2812 static void
dw2_output_call_site_table(int cs_format,int section)2813 dw2_output_call_site_table (int cs_format, int section)
2814 {
2815 int n = vec_safe_length (crtl->eh.call_site_record_v[section]);
2816 int i;
2817 const char *begin;
2818
2819 if (section == 0)
2820 begin = current_function_func_begin_label;
2821 else if (first_function_block_is_cold)
2822 begin = crtl->subsections.hot_section_label;
2823 else
2824 begin = crtl->subsections.cold_section_label;
2825
2826 for (i = 0; i < n; ++i)
2827 {
2828 struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[section])[i];
2829 char reg_start_lab[32];
2830 char reg_end_lab[32];
2831 char landing_pad_lab[32];
2832
2833 ASM_GENERATE_INTERNAL_LABEL (reg_start_lab, "LEHB", call_site_base + i);
2834 ASM_GENERATE_INTERNAL_LABEL (reg_end_lab, "LEHE", call_site_base + i);
2835
2836 if (cs->landing_pad)
2837 ASM_GENERATE_INTERNAL_LABEL (landing_pad_lab, "L",
2838 CODE_LABEL_NUMBER (cs->landing_pad));
2839
2840 /* ??? Perhaps use insn length scaling if the assembler supports
2841 generic arithmetic. */
2842 /* ??? Perhaps use attr_length to choose data1 or data2 instead of
2843 data4 if the function is small enough. */
2844 if (cs_format == DW_EH_PE_uleb128)
2845 {
2846 dw2_asm_output_delta_uleb128 (reg_start_lab, begin,
2847 "region %d start", i);
2848 dw2_asm_output_delta_uleb128 (reg_end_lab, reg_start_lab,
2849 "length");
2850 if (cs->landing_pad)
2851 dw2_asm_output_delta_uleb128 (landing_pad_lab, begin,
2852 "landing pad");
2853 else
2854 dw2_asm_output_data_uleb128 (0, "landing pad");
2855 }
2856 else
2857 {
2858 dw2_asm_output_delta (4, reg_start_lab, begin,
2859 "region %d start", i);
2860 dw2_asm_output_delta (4, reg_end_lab, reg_start_lab, "length");
2861 if (cs->landing_pad)
2862 dw2_asm_output_delta (4, landing_pad_lab, begin,
2863 "landing pad");
2864 else
2865 dw2_asm_output_data (4, 0, "landing pad");
2866 }
2867 dw2_asm_output_data_uleb128 (cs->action, "action");
2868 }
2869
2870 call_site_base += n;
2871 }
2872
2873 static void
sjlj_output_call_site_table(void)2874 sjlj_output_call_site_table (void)
2875 {
2876 int n = vec_safe_length (crtl->eh.call_site_record_v[0]);
2877 int i;
2878
2879 for (i = 0; i < n; ++i)
2880 {
2881 struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[0])[i];
2882
2883 dw2_asm_output_data_uleb128 (INTVAL (cs->landing_pad),
2884 "region %d landing pad", i);
2885 dw2_asm_output_data_uleb128 (cs->action, "action");
2886 }
2887
2888 call_site_base += n;
2889 }
2890
2891 /* Switch to the section that should be used for exception tables. */
2892
2893 static void
switch_to_exception_section(const char * ARG_UNUSED (fnname))2894 switch_to_exception_section (const char * ARG_UNUSED (fnname))
2895 {
2896 section *s;
2897
2898 if (exception_section)
2899 s = exception_section;
2900 else
2901 {
2902 int flags;
2903
2904 if (EH_TABLES_CAN_BE_READ_ONLY)
2905 {
2906 int tt_format =
2907 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1);
2908 flags = ((! flag_pic
2909 || ((tt_format & 0x70) != DW_EH_PE_absptr
2910 && (tt_format & 0x70) != DW_EH_PE_aligned))
2911 ? 0 : SECTION_WRITE);
2912 }
2913 else
2914 flags = SECTION_WRITE;
2915
2916 /* Compute the section and cache it into exception_section,
2917 unless it depends on the function name. */
2918 if (targetm_common.have_named_sections)
2919 {
2920 #ifdef HAVE_LD_EH_GC_SECTIONS
2921 if (flag_function_sections
2922 || (DECL_COMDAT_GROUP (current_function_decl) && HAVE_COMDAT_GROUP))
2923 {
2924 char *section_name = XNEWVEC (char, strlen (fnname) + 32);
2925 /* The EH table must match the code section, so only mark
2926 it linkonce if we have COMDAT groups to tie them together. */
2927 if (DECL_COMDAT_GROUP (current_function_decl) && HAVE_COMDAT_GROUP)
2928 flags |= SECTION_LINKONCE;
2929 sprintf (section_name, ".gcc_except_table.%s", fnname);
2930 s = get_section (section_name, flags, current_function_decl);
2931 free (section_name);
2932 }
2933 else
2934 #endif
2935 exception_section
2936 = s = get_section (".gcc_except_table", flags, NULL);
2937 }
2938 else
2939 exception_section
2940 = s = flags == SECTION_WRITE ? data_section : readonly_data_section;
2941 }
2942
2943 switch_to_section (s);
2944 }
2945
2946 /* Output a reference from an exception table to the type_info object TYPE.
2947 TT_FORMAT and TT_FORMAT_SIZE describe the DWARF encoding method used for
2948 the value. */
2949
2950 static void
output_ttype(tree type,int tt_format,int tt_format_size)2951 output_ttype (tree type, int tt_format, int tt_format_size)
2952 {
2953 rtx value;
2954 bool is_public = true;
2955
2956 if (type == NULL_TREE)
2957 value = const0_rtx;
2958 else
2959 {
2960 /* FIXME lto. pass_ipa_free_lang_data changes all types to
2961 runtime types so TYPE should already be a runtime type
2962 reference. When pass_ipa_free_lang data is made a default
2963 pass, we can then remove the call to lookup_type_for_runtime
2964 below. */
2965 if (TYPE_P (type))
2966 type = lookup_type_for_runtime (type);
2967
2968 value = expand_expr (type, NULL_RTX, VOIDmode, EXPAND_INITIALIZER);
2969
2970 /* Let cgraph know that the rtti decl is used. Not all of the
2971 paths below go through assemble_integer, which would take
2972 care of this for us. */
2973 STRIP_NOPS (type);
2974 if (TREE_CODE (type) == ADDR_EXPR)
2975 {
2976 type = TREE_OPERAND (type, 0);
2977 if (VAR_P (type))
2978 is_public = TREE_PUBLIC (type);
2979 }
2980 else
2981 gcc_assert (TREE_CODE (type) == INTEGER_CST);
2982 }
2983
2984 /* Allow the target to override the type table entry format. */
2985 if (targetm.asm_out.ttype (value))
2986 return;
2987
2988 if (tt_format == DW_EH_PE_absptr || tt_format == DW_EH_PE_aligned)
2989 assemble_integer (value, tt_format_size,
2990 tt_format_size * BITS_PER_UNIT, 1);
2991 else
2992 dw2_asm_output_encoded_addr_rtx (tt_format, value, is_public, NULL);
2993 }
2994
2995 /* Output an exception table for the current function according to SECTION.
2996
2997 If the function has been partitioned into hot and cold parts, value 0 for
2998 SECTION refers to the table associated with the hot part while value 1
2999 refers to the table associated with the cold part. If the function has
3000 not been partitioned, value 0 refers to the single exception table. */
3001
3002 static void
output_one_function_exception_table(int section)3003 output_one_function_exception_table (int section)
3004 {
3005 int tt_format, cs_format, lp_format, i;
3006 char ttype_label[32];
3007 char cs_after_size_label[32];
3008 char cs_end_label[32];
3009 int call_site_len;
3010 int have_tt_data;
3011 int tt_format_size = 0;
3012
3013 have_tt_data = (vec_safe_length (cfun->eh->ttype_data)
3014 || (targetm.arm_eabi_unwinder
3015 ? vec_safe_length (cfun->eh->ehspec_data.arm_eabi)
3016 : vec_safe_length (cfun->eh->ehspec_data.other)));
3017
3018 /* Indicate the format of the @TType entries. */
3019 if (! have_tt_data)
3020 tt_format = DW_EH_PE_omit;
3021 else
3022 {
3023 tt_format = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1);
3024 if (HAVE_AS_LEB128)
3025 ASM_GENERATE_INTERNAL_LABEL (ttype_label,
3026 section ? "LLSDATTC" : "LLSDATT",
3027 current_function_funcdef_no);
3028
3029 tt_format_size = size_of_encoded_value (tt_format);
3030
3031 assemble_align (tt_format_size * BITS_PER_UNIT);
3032 }
3033
3034 targetm.asm_out.internal_label (asm_out_file, section ? "LLSDAC" : "LLSDA",
3035 current_function_funcdef_no);
3036
3037 /* The LSDA header. */
3038
3039 /* Indicate the format of the landing pad start pointer. An omitted
3040 field implies @LPStart == @Start. */
3041 /* Currently we always put @LPStart == @Start. This field would
3042 be most useful in moving the landing pads completely out of
3043 line to another section, but it could also be used to minimize
3044 the size of uleb128 landing pad offsets. */
3045 lp_format = DW_EH_PE_omit;
3046 dw2_asm_output_data (1, lp_format, "@LPStart format (%s)",
3047 eh_data_format_name (lp_format));
3048
3049 /* @LPStart pointer would go here. */
3050
3051 dw2_asm_output_data (1, tt_format, "@TType format (%s)",
3052 eh_data_format_name (tt_format));
3053
3054 if (!HAVE_AS_LEB128)
3055 {
3056 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
3057 call_site_len = sjlj_size_of_call_site_table ();
3058 else
3059 call_site_len = dw2_size_of_call_site_table (section);
3060 }
3061
3062 /* A pc-relative 4-byte displacement to the @TType data. */
3063 if (have_tt_data)
3064 {
3065 if (HAVE_AS_LEB128)
3066 {
3067 char ttype_after_disp_label[32];
3068 ASM_GENERATE_INTERNAL_LABEL (ttype_after_disp_label,
3069 section ? "LLSDATTDC" : "LLSDATTD",
3070 current_function_funcdef_no);
3071 dw2_asm_output_delta_uleb128 (ttype_label, ttype_after_disp_label,
3072 "@TType base offset");
3073 ASM_OUTPUT_LABEL (asm_out_file, ttype_after_disp_label);
3074 }
3075 else
3076 {
3077 /* Ug. Alignment queers things. */
3078 unsigned int before_disp, after_disp, last_disp, disp;
3079
3080 before_disp = 1 + 1;
3081 after_disp = (1 + size_of_uleb128 (call_site_len)
3082 + call_site_len
3083 + vec_safe_length (crtl->eh.action_record_data)
3084 + (vec_safe_length (cfun->eh->ttype_data)
3085 * tt_format_size));
3086
3087 disp = after_disp;
3088 do
3089 {
3090 unsigned int disp_size, pad;
3091
3092 last_disp = disp;
3093 disp_size = size_of_uleb128 (disp);
3094 pad = before_disp + disp_size + after_disp;
3095 if (pad % tt_format_size)
3096 pad = tt_format_size - (pad % tt_format_size);
3097 else
3098 pad = 0;
3099 disp = after_disp + pad;
3100 }
3101 while (disp != last_disp);
3102
3103 dw2_asm_output_data_uleb128 (disp, "@TType base offset");
3104 }
3105 }
3106
3107 /* Indicate the format of the call-site offsets. */
3108 if (HAVE_AS_LEB128)
3109 cs_format = DW_EH_PE_uleb128;
3110 else
3111 cs_format = DW_EH_PE_udata4;
3112
3113 dw2_asm_output_data (1, cs_format, "call-site format (%s)",
3114 eh_data_format_name (cs_format));
3115
3116 if (HAVE_AS_LEB128)
3117 {
3118 ASM_GENERATE_INTERNAL_LABEL (cs_after_size_label,
3119 section ? "LLSDACSBC" : "LLSDACSB",
3120 current_function_funcdef_no);
3121 ASM_GENERATE_INTERNAL_LABEL (cs_end_label,
3122 section ? "LLSDACSEC" : "LLSDACSE",
3123 current_function_funcdef_no);
3124 dw2_asm_output_delta_uleb128 (cs_end_label, cs_after_size_label,
3125 "Call-site table length");
3126 ASM_OUTPUT_LABEL (asm_out_file, cs_after_size_label);
3127 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
3128 sjlj_output_call_site_table ();
3129 else
3130 dw2_output_call_site_table (cs_format, section);
3131 ASM_OUTPUT_LABEL (asm_out_file, cs_end_label);
3132 }
3133 else
3134 {
3135 dw2_asm_output_data_uleb128 (call_site_len, "Call-site table length");
3136 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
3137 sjlj_output_call_site_table ();
3138 else
3139 dw2_output_call_site_table (cs_format, section);
3140 }
3141
3142 /* ??? Decode and interpret the data for flag_debug_asm. */
3143 {
3144 uchar uc;
3145 FOR_EACH_VEC_ELT (*crtl->eh.action_record_data, i, uc)
3146 dw2_asm_output_data (1, uc, i ? NULL : "Action record table");
3147 }
3148
3149 if (have_tt_data)
3150 assemble_align (tt_format_size * BITS_PER_UNIT);
3151
3152 i = vec_safe_length (cfun->eh->ttype_data);
3153 while (i-- > 0)
3154 {
3155 tree type = (*cfun->eh->ttype_data)[i];
3156 output_ttype (type, tt_format, tt_format_size);
3157 }
3158
3159 if (HAVE_AS_LEB128 && have_tt_data)
3160 ASM_OUTPUT_LABEL (asm_out_file, ttype_label);
3161
3162 /* ??? Decode and interpret the data for flag_debug_asm. */
3163 if (targetm.arm_eabi_unwinder)
3164 {
3165 tree type;
3166 for (i = 0;
3167 vec_safe_iterate (cfun->eh->ehspec_data.arm_eabi, i, &type); ++i)
3168 output_ttype (type, tt_format, tt_format_size);
3169 }
3170 else
3171 {
3172 uchar uc;
3173 for (i = 0;
3174 vec_safe_iterate (cfun->eh->ehspec_data.other, i, &uc); ++i)
3175 dw2_asm_output_data (1, uc,
3176 i ? NULL : "Exception specification table");
3177 }
3178 }
3179
3180 /* Output an exception table for the current function according to SECTION,
3181 switching back and forth from the function section appropriately.
3182
3183 If the function has been partitioned into hot and cold parts, value 0 for
3184 SECTION refers to the table associated with the hot part while value 1
3185 refers to the table associated with the cold part. If the function has
3186 not been partitioned, value 0 refers to the single exception table. */
3187
3188 void
output_function_exception_table(int section)3189 output_function_exception_table (int section)
3190 {
3191 const char *fnname = get_fnname_from_decl (current_function_decl);
3192 rtx personality = get_personality_function (current_function_decl);
3193
3194 /* Not all functions need anything. */
3195 if (!crtl->uses_eh_lsda)
3196 return;
3197
3198 /* No need to emit any boilerplate stuff for the cold part. */
3199 if (section == 1 && !crtl->eh.call_site_record_v[1])
3200 return;
3201
3202 if (personality)
3203 {
3204 assemble_external_libcall (personality);
3205
3206 if (targetm.asm_out.emit_except_personality)
3207 targetm.asm_out.emit_except_personality (personality);
3208 }
3209
3210 switch_to_exception_section (fnname);
3211
3212 /* If the target wants a label to begin the table, emit it here. */
3213 targetm.asm_out.emit_except_table_label (asm_out_file);
3214
3215 /* Do the real work. */
3216 output_one_function_exception_table (section);
3217
3218 switch_to_section (current_function_section ());
3219 }
3220
3221 void
set_eh_throw_stmt_table(function * fun,hash_map<gimple *,int> * table)3222 set_eh_throw_stmt_table (function *fun, hash_map<gimple *, int> *table)
3223 {
3224 fun->eh->throw_stmt_table = table;
3225 }
3226
3227 hash_map<gimple *, int> *
get_eh_throw_stmt_table(struct function * fun)3228 get_eh_throw_stmt_table (struct function *fun)
3229 {
3230 return fun->eh->throw_stmt_table;
3231 }
3232
3233 /* Determine if the function needs an EH personality function. */
3234
3235 enum eh_personality_kind
function_needs_eh_personality(struct function * fn)3236 function_needs_eh_personality (struct function *fn)
3237 {
3238 enum eh_personality_kind kind = eh_personality_none;
3239 eh_region i;
3240
3241 FOR_ALL_EH_REGION_FN (i, fn)
3242 {
3243 switch (i->type)
3244 {
3245 case ERT_CLEANUP:
3246 /* Can do with any personality including the generic C one. */
3247 kind = eh_personality_any;
3248 break;
3249
3250 case ERT_TRY:
3251 case ERT_ALLOWED_EXCEPTIONS:
3252 /* Always needs a EH personality function. The generic C
3253 personality doesn't handle these even for empty type lists. */
3254 return eh_personality_lang;
3255
3256 case ERT_MUST_NOT_THROW:
3257 /* Always needs a EH personality function. The language may specify
3258 what abort routine that must be used, e.g. std::terminate. */
3259 return eh_personality_lang;
3260 }
3261 }
3262
3263 return kind;
3264 }
3265
3266 /* Dump EH information to OUT. */
3267
3268 void
dump_eh_tree(FILE * out,struct function * fun)3269 dump_eh_tree (FILE * out, struct function *fun)
3270 {
3271 eh_region i;
3272 int depth = 0;
3273 static const char *const type_name[] = {
3274 "cleanup", "try", "allowed_exceptions", "must_not_throw"
3275 };
3276
3277 i = fun->eh->region_tree;
3278 if (!i)
3279 return;
3280
3281 fprintf (out, "Eh tree:\n");
3282 while (1)
3283 {
3284 fprintf (out, " %*s %i %s", depth * 2, "",
3285 i->index, type_name[(int) i->type]);
3286
3287 if (i->landing_pads)
3288 {
3289 eh_landing_pad lp;
3290
3291 fprintf (out, " land:");
3292 if (current_ir_type () == IR_GIMPLE)
3293 {
3294 for (lp = i->landing_pads; lp ; lp = lp->next_lp)
3295 {
3296 fprintf (out, "{%i,", lp->index);
3297 print_generic_expr (out, lp->post_landing_pad);
3298 fputc ('}', out);
3299 if (lp->next_lp)
3300 fputc (',', out);
3301 }
3302 }
3303 else
3304 {
3305 for (lp = i->landing_pads; lp ; lp = lp->next_lp)
3306 {
3307 fprintf (out, "{%i,", lp->index);
3308 if (lp->landing_pad)
3309 fprintf (out, "%i%s,", INSN_UID (lp->landing_pad),
3310 NOTE_P (lp->landing_pad) ? "(del)" : "");
3311 else
3312 fprintf (out, "(nil),");
3313 if (lp->post_landing_pad)
3314 {
3315 rtx_insn *lab = label_rtx (lp->post_landing_pad);
3316 fprintf (out, "%i%s}", INSN_UID (lab),
3317 NOTE_P (lab) ? "(del)" : "");
3318 }
3319 else
3320 fprintf (out, "(nil)}");
3321 if (lp->next_lp)
3322 fputc (',', out);
3323 }
3324 }
3325 }
3326
3327 switch (i->type)
3328 {
3329 case ERT_CLEANUP:
3330 case ERT_MUST_NOT_THROW:
3331 break;
3332
3333 case ERT_TRY:
3334 {
3335 eh_catch c;
3336 fprintf (out, " catch:");
3337 for (c = i->u.eh_try.first_catch; c; c = c->next_catch)
3338 {
3339 fputc ('{', out);
3340 if (c->label)
3341 {
3342 fprintf (out, "lab:");
3343 print_generic_expr (out, c->label);
3344 fputc (';', out);
3345 }
3346 print_generic_expr (out, c->type_list);
3347 fputc ('}', out);
3348 if (c->next_catch)
3349 fputc (',', out);
3350 }
3351 }
3352 break;
3353
3354 case ERT_ALLOWED_EXCEPTIONS:
3355 fprintf (out, " filter :%i types:", i->u.allowed.filter);
3356 print_generic_expr (out, i->u.allowed.type_list);
3357 break;
3358 }
3359 fputc ('\n', out);
3360
3361 /* If there are sub-regions, process them. */
3362 if (i->inner)
3363 i = i->inner, depth++;
3364 /* If there are peers, process them. */
3365 else if (i->next_peer)
3366 i = i->next_peer;
3367 /* Otherwise, step back up the tree to the next peer. */
3368 else
3369 {
3370 do
3371 {
3372 i = i->outer;
3373 depth--;
3374 if (i == NULL)
3375 return;
3376 }
3377 while (i->next_peer == NULL);
3378 i = i->next_peer;
3379 }
3380 }
3381 }
3382
3383 /* Dump the EH tree for FN on stderr. */
3384
3385 DEBUG_FUNCTION void
debug_eh_tree(struct function * fn)3386 debug_eh_tree (struct function *fn)
3387 {
3388 dump_eh_tree (stderr, fn);
3389 }
3390
3391 /* Verify invariants on EH datastructures. */
3392
3393 DEBUG_FUNCTION void
verify_eh_tree(struct function * fun)3394 verify_eh_tree (struct function *fun)
3395 {
3396 eh_region r, outer;
3397 int nvisited_lp, nvisited_r;
3398 int count_lp, count_r, depth, i;
3399 eh_landing_pad lp;
3400 bool err = false;
3401
3402 if (!fun->eh->region_tree)
3403 return;
3404
3405 count_r = 0;
3406 for (i = 1; vec_safe_iterate (fun->eh->region_array, i, &r); ++i)
3407 if (r)
3408 {
3409 if (r->index == i)
3410 count_r++;
3411 else
3412 {
3413 error ("region_array is corrupted for region %i", r->index);
3414 err = true;
3415 }
3416 }
3417
3418 count_lp = 0;
3419 for (i = 1; vec_safe_iterate (fun->eh->lp_array, i, &lp); ++i)
3420 if (lp)
3421 {
3422 if (lp->index == i)
3423 count_lp++;
3424 else
3425 {
3426 error ("lp_array is corrupted for lp %i", lp->index);
3427 err = true;
3428 }
3429 }
3430
3431 depth = nvisited_lp = nvisited_r = 0;
3432 outer = NULL;
3433 r = fun->eh->region_tree;
3434 while (1)
3435 {
3436 if ((*fun->eh->region_array)[r->index] != r)
3437 {
3438 error ("region_array is corrupted for region %i", r->index);
3439 err = true;
3440 }
3441 if (r->outer != outer)
3442 {
3443 error ("outer block of region %i is wrong", r->index);
3444 err = true;
3445 }
3446 if (depth < 0)
3447 {
3448 error ("negative nesting depth of region %i", r->index);
3449 err = true;
3450 }
3451 nvisited_r++;
3452
3453 for (lp = r->landing_pads; lp ; lp = lp->next_lp)
3454 {
3455 if ((*fun->eh->lp_array)[lp->index] != lp)
3456 {
3457 error ("lp_array is corrupted for lp %i", lp->index);
3458 err = true;
3459 }
3460 if (lp->region != r)
3461 {
3462 error ("region of lp %i is wrong", lp->index);
3463 err = true;
3464 }
3465 nvisited_lp++;
3466 }
3467
3468 if (r->inner)
3469 outer = r, r = r->inner, depth++;
3470 else if (r->next_peer)
3471 r = r->next_peer;
3472 else
3473 {
3474 do
3475 {
3476 r = r->outer;
3477 if (r == NULL)
3478 goto region_done;
3479 depth--;
3480 outer = r->outer;
3481 }
3482 while (r->next_peer == NULL);
3483 r = r->next_peer;
3484 }
3485 }
3486 region_done:
3487 if (depth != 0)
3488 {
3489 error ("tree list ends on depth %i", depth);
3490 err = true;
3491 }
3492 if (count_r != nvisited_r)
3493 {
3494 error ("region_array does not match region_tree");
3495 err = true;
3496 }
3497 if (count_lp != nvisited_lp)
3498 {
3499 error ("lp_array does not match region_tree");
3500 err = true;
3501 }
3502
3503 if (err)
3504 {
3505 dump_eh_tree (stderr, fun);
3506 internal_error ("verify_eh_tree failed");
3507 }
3508 }
3509
3510 #include "gt-except.h"
3511