1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
47 #include "attribs.h"
48 #include "asan.h"
49 #include "gimplify.h"
50 
51 /* In some instances a tree and a gimple need to be stored in a same table,
52    i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree *tp; tree t; gimple *g;} treemple;
54 
55 /* Misc functions used in this file.  */
56 
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58    Really this means any statement that could_throw_p.  We could
59    stuff this information into the stmt_ann data structure, but:
60 
61    (1) We absolutely rely on this information being kept until
62    we get to rtl.  Once we're done with lowering here, if we lose
63    the information there's no way to recover it!
64 
65    (2) There are many more statements that *cannot* throw as
66    compared to those that can.  We should be saving some amount
67    of space by only allocating memory for those that can throw.  */
68 
69 /* Add statement T in function IFUN to landing pad NUM.  */
70 
71 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple * t,int num)72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73 {
74   gcc_assert (num != 0);
75 
76   if (!get_eh_throw_stmt_table (ifun))
77     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78 
79   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80 }
81 
82 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
83 
84 void
add_stmt_to_eh_lp(gimple * t,int num)85 add_stmt_to_eh_lp (gimple *t, int num)
86 {
87   add_stmt_to_eh_lp_fn (cfun, t, num);
88 }
89 
90 /* Add statement T to the single EH landing pad in REGION.  */
91 
92 static void
record_stmt_eh_region(eh_region region,gimple * t)93 record_stmt_eh_region (eh_region region, gimple *t)
94 {
95   if (region == NULL)
96     return;
97   if (region->type == ERT_MUST_NOT_THROW)
98     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99   else
100     {
101       eh_landing_pad lp = region->landing_pads;
102       if (lp == NULL)
103 	lp = gen_eh_landing_pad (region);
104       else
105 	gcc_assert (lp->next_lp == NULL);
106       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107     }
108 }
109 
110 
111 /* Remove statement T in function IFUN from its EH landing pad.  */
112 
113 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple * t)114 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115 {
116   if (!get_eh_throw_stmt_table (ifun))
117     return false;
118 
119   if (!get_eh_throw_stmt_table (ifun)->get (t))
120     return false;
121 
122   get_eh_throw_stmt_table (ifun)->remove (t);
123       return true;
124 }
125 
126 
127 /* Remove statement T in the current function (cfun) from its
128    EH landing pad.  */
129 
130 bool
remove_stmt_from_eh_lp(gimple * t)131 remove_stmt_from_eh_lp (gimple *t)
132 {
133   return remove_stmt_from_eh_lp_fn (cfun, t);
134 }
135 
136 /* Determine if statement T is inside an EH region in function IFUN.
137    Positive numbers indicate a landing pad index; negative numbers
138    indicate a MUST_NOT_THROW region index; zero indicates that the
139    statement is not recorded in the region table.  */
140 
141 int
lookup_stmt_eh_lp_fn(struct function * ifun,const gimple * t)142 lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t)
143 {
144   if (ifun->eh->throw_stmt_table == NULL)
145     return 0;
146 
147   int *lp_nr = ifun->eh->throw_stmt_table->get (const_cast <gimple *> (t));
148   return lp_nr ? *lp_nr : 0;
149 }
150 
151 /* Likewise, but always use the current function.  */
152 
153 int
lookup_stmt_eh_lp(const gimple * t)154 lookup_stmt_eh_lp (const gimple *t)
155 {
156   /* We can get called from initialized data when -fnon-call-exceptions
157      is on; prevent crash.  */
158   if (!cfun)
159     return 0;
160   return lookup_stmt_eh_lp_fn (cfun, t);
161 }
162 
163 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
164    nodes and LABEL_DECL nodes.  We will use this during the second phase to
165    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
166 
167 struct finally_tree_node
168 {
169   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
170      when deciding whether a GOTO to a certain LABEL_DECL (which is a
171      tree) leaves the TRY block, its necessary to record a tree in
172      this field.  Thus a treemple is used. */
173   treemple child;
174   gtry *parent;
175 };
176 
177 /* Hashtable helpers.  */
178 
179 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180 {
181   static inline hashval_t hash (const finally_tree_node *);
182   static inline bool equal (const finally_tree_node *,
183 			    const finally_tree_node *);
184 };
185 
186 inline hashval_t
hash(const finally_tree_node * v)187 finally_tree_hasher::hash (const finally_tree_node *v)
188 {
189   return (intptr_t)v->child.t >> 4;
190 }
191 
192 inline bool
equal(const finally_tree_node * v,const finally_tree_node * c)193 finally_tree_hasher::equal (const finally_tree_node *v,
194 			    const finally_tree_node *c)
195 {
196   return v->child.t == c->child.t;
197 }
198 
199 /* Note that this table is *not* marked GTY.  It is short-lived.  */
200 static hash_table<finally_tree_hasher> *finally_tree;
201 
202 static void
record_in_finally_tree(treemple child,gtry * parent)203 record_in_finally_tree (treemple child, gtry *parent)
204 {
205   struct finally_tree_node *n;
206   finally_tree_node **slot;
207 
208   n = XNEW (struct finally_tree_node);
209   n->child = child;
210   n->parent = parent;
211 
212   slot = finally_tree->find_slot (n, INSERT);
213   gcc_assert (!*slot);
214   *slot = n;
215 }
216 
217 static void
218 collect_finally_tree (gimple *stmt, gtry *region);
219 
220 /* Go through the gimple sequence.  Works with collect_finally_tree to
221    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222 
223 static void
collect_finally_tree_1(gimple_seq seq,gtry * region)224 collect_finally_tree_1 (gimple_seq seq, gtry *region)
225 {
226   gimple_stmt_iterator gsi;
227 
228   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229     collect_finally_tree (gsi_stmt (gsi), region);
230 }
231 
232 static void
collect_finally_tree(gimple * stmt,gtry * region)233 collect_finally_tree (gimple *stmt, gtry *region)
234 {
235   treemple temp;
236 
237   switch (gimple_code (stmt))
238     {
239     case GIMPLE_LABEL:
240       temp.t = gimple_label_label (as_a <glabel *> (stmt));
241       record_in_finally_tree (temp, region);
242       break;
243 
244     case GIMPLE_TRY:
245       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246         {
247           temp.g = stmt;
248           record_in_finally_tree (temp, region);
249           collect_finally_tree_1 (gimple_try_eval (stmt),
250 				  as_a <gtry *> (stmt));
251 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252         }
253       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254         {
255           collect_finally_tree_1 (gimple_try_eval (stmt), region);
256           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257         }
258       break;
259 
260     case GIMPLE_CATCH:
261       collect_finally_tree_1 (gimple_catch_handler (
262 				 as_a <gcatch *> (stmt)),
263 			      region);
264       break;
265 
266     case GIMPLE_EH_FILTER:
267       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268       break;
269 
270     case GIMPLE_EH_ELSE:
271       {
272 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275       }
276       break;
277 
278     default:
279       /* A type, a decl, or some kind of statement that we're not
280 	 interested in.  Don't walk them.  */
281       break;
282     }
283 }
284 
285 
286 /* Use the finally tree to determine if a jump from START to TARGET
287    would leave the try_finally node that START lives in.  */
288 
289 static bool
outside_finally_tree(treemple start,gimple * target)290 outside_finally_tree (treemple start, gimple *target)
291 {
292   struct finally_tree_node n, *p;
293 
294   do
295     {
296       n.child = start;
297       p = finally_tree->find (&n);
298       if (!p)
299 	return true;
300       start.g = p->parent;
301     }
302   while (start.g != target);
303 
304   return false;
305 }
306 
307 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
308    nodes into a set of gotos, magic labels, and eh regions.
309    The eh region creation is straight-forward, but frobbing all the gotos
310    and such into shape isn't.  */
311 
312 /* The sequence into which we record all EH stuff.  This will be
313    placed at the end of the function when we're all done.  */
314 static gimple_seq eh_seq;
315 
316 /* Record whether an EH region contains something that can throw,
317    indexed by EH region number.  */
318 static bitmap eh_region_may_contain_throw_map;
319 
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322    The idea is to record a gimple statement for everything except for
323    the conditionals, which get their labels recorded. Since labels are
324    of type 'tree', we need this node to store both gimple and tree
325    objects.  REPL_STMT is the sequence used to replace the goto/return
326    statement.  CONT_STMT is used to store the statement that allows
327    the return/goto to jump to the original destination. */
328 
329 struct goto_queue_node
330 {
331   treemple stmt;
332   location_t location;
333   gimple_seq repl_stmt;
334   gimple *cont_stmt;
335   int index;
336   /* This is used when index >= 0 to indicate that stmt is a label (as
337      opposed to a goto stmt).  */
338   int is_label;
339 };
340 
341 /* State of the world while lowering.  */
342 
343 struct leh_state
344 {
345   /* What's "current" while constructing the eh region tree.  These
346      correspond to variables of the same name in cfun->eh, which we
347      don't have easy access to.  */
348   eh_region cur_region;
349 
350   /* What's "current" for the purposes of __builtin_eh_pointer.  For
351      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
352      the associated ALLOWED_EXCEPTIONS, etc.  */
353   eh_region ehp_region;
354 
355   /* Processing of TRY_FINALLY requires a bit more state.  This is
356      split out into a separate structure so that we don't have to
357      copy so much when processing other nodes.  */
358   struct leh_tf_state *tf;
359 
360   /* Outer non-clean up region.  */
361   eh_region outer_non_cleanup;
362 };
363 
364 struct leh_tf_state
365 {
366   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
367      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
368      this so that outside_finally_tree can reliably reference the tree used
369      in the collect_finally_tree data structures.  */
370   gtry *try_finally_expr;
371   gtry *top_p;
372 
373   /* While lowering a top_p usually it is expanded into multiple statements,
374      thus we need the following field to store them. */
375   gimple_seq top_p_seq;
376 
377   /* The state outside this try_finally node.  */
378   struct leh_state *outer;
379 
380   /* The exception region created for it.  */
381   eh_region region;
382 
383   /* The goto queue.  */
384   struct goto_queue_node *goto_queue;
385   size_t goto_queue_size;
386   size_t goto_queue_active;
387 
388   /* Pointer map to help in searching goto_queue when it is large.  */
389   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
390 
391   /* The set of unique labels seen as entries in the goto queue.  */
392   vec<tree> dest_array;
393 
394   /* A label to be added at the end of the completed transformed
395      sequence.  It will be set if may_fallthru was true *at one time*,
396      though subsequent transformations may have cleared that flag.  */
397   tree fallthru_label;
398 
399   /* True if it is possible to fall out the bottom of the try block.
400      Cleared if the fallthru is converted to a goto.  */
401   bool may_fallthru;
402 
403   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
404   bool may_return;
405 
406   /* True if the finally block can receive an exception edge.
407      Cleared if the exception case is handled by code duplication.  */
408   bool may_throw;
409 };
410 
411 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
412 
413 /* Search for STMT in the goto queue.  Return the replacement,
414    or null if the statement isn't in the queue.  */
415 
416 #define LARGE_GOTO_QUEUE 20
417 
418 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
419 
420 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)421 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
422 {
423   unsigned int i;
424 
425   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
426     {
427       for (i = 0; i < tf->goto_queue_active; i++)
428 	if ( tf->goto_queue[i].stmt.g == stmt.g)
429 	  return tf->goto_queue[i].repl_stmt;
430       return NULL;
431     }
432 
433   /* If we have a large number of entries in the goto_queue, create a
434      pointer map and use that for searching.  */
435 
436   if (!tf->goto_queue_map)
437     {
438       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
439       for (i = 0; i < tf->goto_queue_active; i++)
440 	{
441 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
442 						  &tf->goto_queue[i]);
443 	  gcc_assert (!existed);
444 	}
445     }
446 
447   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
448   if (slot != NULL)
449     return ((*slot)->repl_stmt);
450 
451   return NULL;
452 }
453 
454 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
455    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
456    then we can just splat it in, otherwise we add the new stmts immediately
457    after the GIMPLE_COND and redirect.  */
458 
459 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)460 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
461 				gimple_stmt_iterator *gsi)
462 {
463   tree label;
464   gimple_seq new_seq;
465   treemple temp;
466   location_t loc = gimple_location (gsi_stmt (*gsi));
467 
468   temp.tp = tp;
469   new_seq = find_goto_replacement (tf, temp);
470   if (!new_seq)
471     return;
472 
473   if (gimple_seq_singleton_p (new_seq)
474       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
475     {
476       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
477       return;
478     }
479 
480   label = create_artificial_label (loc);
481   /* Set the new label for the GIMPLE_COND */
482   *tp = label;
483 
484   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
485   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
486 }
487 
488 /* The real work of replace_goto_queue.  Returns with TSI updated to
489    point to the next statement.  */
490 
491 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
492 
493 static void
replace_goto_queue_1(gimple * stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)494 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
495 		      gimple_stmt_iterator *gsi)
496 {
497   gimple_seq seq;
498   treemple temp;
499   temp.g = NULL;
500 
501   switch (gimple_code (stmt))
502     {
503     case GIMPLE_GOTO:
504     case GIMPLE_RETURN:
505       temp.g = stmt;
506       seq = find_goto_replacement (tf, temp);
507       if (seq)
508 	{
509 	  gimple_stmt_iterator i;
510 	  seq = gimple_seq_copy (seq);
511 	  for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
512 	    gimple_set_location (gsi_stmt (i), gimple_location (stmt));
513 	  gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
514 	  gsi_remove (gsi, false);
515 	  return;
516 	}
517       break;
518 
519     case GIMPLE_COND:
520       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
521       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
522       break;
523 
524     case GIMPLE_TRY:
525       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
526       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
527       break;
528     case GIMPLE_CATCH:
529       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 				      as_a <gcatch *> (stmt)),
531 				    tf);
532       break;
533     case GIMPLE_EH_FILTER:
534       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
535       break;
536     case GIMPLE_EH_ELSE:
537       {
538 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
539 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
540 				      tf);
541 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
542 				      tf);
543       }
544       break;
545 
546     default:
547       /* These won't have gotos in them.  */
548       break;
549     }
550 
551   gsi_next (gsi);
552 }
553 
554 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
555 
556 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)557 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
558 {
559   gimple_stmt_iterator gsi = gsi_start (*seq);
560 
561   while (!gsi_end_p (gsi))
562     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
563 }
564 
565 /* Replace all goto queue members.  */
566 
567 static void
replace_goto_queue(struct leh_tf_state * tf)568 replace_goto_queue (struct leh_tf_state *tf)
569 {
570   if (tf->goto_queue_active == 0)
571     return;
572   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
573   replace_goto_queue_stmt_list (&eh_seq, tf);
574 }
575 
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
578    a gimple return. */
579 
580 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)581 record_in_goto_queue (struct leh_tf_state *tf,
582                       treemple new_stmt,
583                       int index,
584                       bool is_label,
585 		      location_t location)
586 {
587   size_t active, size;
588   struct goto_queue_node *q;
589 
590   gcc_assert (!tf->goto_queue_map);
591 
592   active = tf->goto_queue_active;
593   size = tf->goto_queue_size;
594   if (active >= size)
595     {
596       size = (size ? size * 2 : 32);
597       tf->goto_queue_size = size;
598       tf->goto_queue
599          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
600     }
601 
602   q = &tf->goto_queue[active];
603   tf->goto_queue_active = active + 1;
604 
605   memset (q, 0, sizeof (*q));
606   q->stmt = new_stmt;
607   q->index = index;
608   q->location = location;
609   q->is_label = is_label;
610 }
611 
612 /* Record the LABEL label in the goto queue contained in TF.
613    TF is not null.  */
614 
615 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)616 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
617 			    location_t location)
618 {
619   int index;
620   treemple temp, new_stmt;
621 
622   if (!label)
623     return;
624 
625   /* Computed and non-local gotos do not get processed.  Given
626      their nature we can neither tell whether we've escaped the
627      finally block nor redirect them if we knew.  */
628   if (TREE_CODE (label) != LABEL_DECL)
629     return;
630 
631   /* No need to record gotos that don't leave the try block.  */
632   temp.t = label;
633   if (!outside_finally_tree (temp, tf->try_finally_expr))
634     return;
635 
636   if (! tf->dest_array.exists ())
637     {
638       tf->dest_array.create (10);
639       tf->dest_array.quick_push (label);
640       index = 0;
641     }
642   else
643     {
644       int n = tf->dest_array.length ();
645       for (index = 0; index < n; ++index)
646         if (tf->dest_array[index] == label)
647           break;
648       if (index == n)
649         tf->dest_array.safe_push (label);
650     }
651 
652   /* In the case of a GOTO we want to record the destination label,
653      since with a GIMPLE_COND we have an easy access to the then/else
654      labels. */
655   new_stmt = stmt;
656   record_in_goto_queue (tf, new_stmt, index, true, location);
657 }
658 
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660    node, and if so record that fact in the goto queue associated with that
661    try_finally node.  */
662 
663 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple * stmt)664 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
665 {
666   struct leh_tf_state *tf = state->tf;
667   treemple new_stmt;
668 
669   if (!tf)
670     return;
671 
672   switch (gimple_code (stmt))
673     {
674     case GIMPLE_COND:
675       {
676 	gcond *cond_stmt = as_a <gcond *> (stmt);
677 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
678 	record_in_goto_queue_label (tf, new_stmt,
679 				    gimple_cond_true_label (cond_stmt),
680 				    EXPR_LOCATION (*new_stmt.tp));
681 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
682 	record_in_goto_queue_label (tf, new_stmt,
683 				    gimple_cond_false_label (cond_stmt),
684 				    EXPR_LOCATION (*new_stmt.tp));
685       }
686       break;
687     case GIMPLE_GOTO:
688       new_stmt.g = stmt;
689       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
690 				  gimple_location (stmt));
691       break;
692 
693     case GIMPLE_RETURN:
694       tf->may_return = true;
695       new_stmt.g = stmt;
696       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
697       break;
698 
699     default:
700       gcc_unreachable ();
701     }
702 }
703 
704 
705 #if CHECKING_P
706 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
707    was in fact structured, and we've not yet done jump threading, then none
708    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
709 
710 static void
verify_norecord_switch_expr(struct leh_state * state,gswitch * switch_expr)711 verify_norecord_switch_expr (struct leh_state *state,
712 			     gswitch *switch_expr)
713 {
714   struct leh_tf_state *tf = state->tf;
715   size_t i, n;
716 
717   if (!tf)
718     return;
719 
720   n = gimple_switch_num_labels (switch_expr);
721 
722   for (i = 0; i < n; ++i)
723     {
724       treemple temp;
725       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
726       temp.t = lab;
727       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
728     }
729 }
730 #else
731 #define verify_norecord_switch_expr(state, switch_expr)
732 #endif
733 
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
735    non-null, insert it before the new branch.  */
736 
737 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)738 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
739 {
740   gimple *x;
741 
742   /* In the case of a return, the queue node must be a gimple statement.  */
743   gcc_assert (!q->is_label);
744 
745   /* Note that the return value may have already been computed, e.g.,
746 
747 	int x;
748 	int foo (void)
749 	{
750 	  x = 0;
751 	  try {
752 	    return x;
753 	  } finally {
754 	    x++;
755 	  }
756 	}
757 
758      should return 0, not 1.  We don't have to do anything to make
759      this happens because the return value has been placed in the
760      RESULT_DECL already.  */
761 
762   q->cont_stmt = q->stmt.g;
763 
764   if (mod)
765     gimple_seq_add_seq (&q->repl_stmt, mod);
766 
767   x = gimple_build_goto (finlab);
768   gimple_set_location (x, q->location);
769   gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771 
772 /* Similar, but easier, for GIMPLE_GOTO.  */
773 
774 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)775 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
776 		     struct leh_tf_state *tf)
777 {
778   ggoto *x;
779 
780   gcc_assert (q->is_label);
781 
782   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
783 
784   if (mod)
785     gimple_seq_add_seq (&q->repl_stmt, mod);
786 
787   x = gimple_build_goto (finlab);
788   gimple_set_location (x, q->location);
789   gimple_seq_add_stmt (&q->repl_stmt, x);
790 }
791 
792 /* Emit a standard landing pad sequence into SEQ for REGION.  */
793 
794 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)795 emit_post_landing_pad (gimple_seq *seq, eh_region region)
796 {
797   eh_landing_pad lp = region->landing_pads;
798   glabel *x;
799 
800   if (lp == NULL)
801     lp = gen_eh_landing_pad (region);
802 
803   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
804   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
805 
806   x = gimple_build_label (lp->post_landing_pad);
807   gimple_seq_add_stmt (seq, x);
808 }
809 
810 /* Emit a RESX statement into SEQ for REGION.  */
811 
812 static void
emit_resx(gimple_seq * seq,eh_region region)813 emit_resx (gimple_seq *seq, eh_region region)
814 {
815   gresx *x = gimple_build_resx (region->index);
816   gimple_seq_add_stmt (seq, x);
817   if (region->outer)
818     record_stmt_eh_region (region->outer, x);
819 }
820 
821 /* Note that the current EH region may contain a throw, or a
822    call to a function which itself may contain a throw.  */
823 
824 static void
note_eh_region_may_contain_throw(eh_region region)825 note_eh_region_may_contain_throw (eh_region region)
826 {
827   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
828     {
829       if (region->type == ERT_MUST_NOT_THROW)
830 	break;
831       region = region->outer;
832       if (region == NULL)
833 	break;
834     }
835 }
836 
837 /* Check if REGION has been marked as containing a throw.  If REGION is
838    NULL, this predicate is false.  */
839 
840 static inline bool
eh_region_may_contain_throw(eh_region r)841 eh_region_may_contain_throw (eh_region r)
842 {
843   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 }
845 
846 /* We want to transform
847 	try { body; } catch { stuff; }
848    to
849 	normal_sequence:
850 	  body;
851 	  over:
852 	eh_sequence:
853 	  landing_pad:
854 	  stuff;
855 	  goto over;
856 
857    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
858    should be placed before the second operand, or NULL.  OVER is
859    an existing label that should be put at the exit, or NULL.  */
860 
861 static gimple_seq
frob_into_branch_around(gtry * tp,eh_region region,tree over)862 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 {
864   gimple *x;
865   gimple_seq cleanup, result;
866   location_t loc = gimple_location (tp);
867 
868   cleanup = gimple_try_cleanup (tp);
869   result = gimple_try_eval (tp);
870 
871   if (region)
872     emit_post_landing_pad (&eh_seq, region);
873 
874   if (gimple_seq_may_fallthru (cleanup))
875     {
876       if (!over)
877 	over = create_artificial_label (loc);
878       x = gimple_build_goto (over);
879       gimple_set_location (x, loc);
880       gimple_seq_add_stmt (&cleanup, x);
881     }
882   gimple_seq_add_seq (&eh_seq, cleanup);
883 
884   if (over)
885     {
886       x = gimple_build_label (over);
887       gimple_seq_add_stmt (&result, x);
888     }
889   return result;
890 }
891 
892 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
893    Make sure to record all new labels found.  */
894 
895 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)896 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
897 			     location_t loc)
898 {
899   gtry *region = NULL;
900   gimple_seq new_seq;
901   gimple_stmt_iterator gsi;
902 
903   new_seq = copy_gimple_seq_and_replace_locals (seq);
904 
905   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
906     {
907       gimple *stmt = gsi_stmt (gsi);
908       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
909 	{
910 	  tree block = gimple_block (stmt);
911 	  gimple_set_location (stmt, loc);
912 	  gimple_set_block (stmt, block);
913 	}
914     }
915 
916   if (outer_state->tf)
917     region = outer_state->tf->try_finally_expr;
918   collect_finally_tree_1 (new_seq, region);
919 
920   return new_seq;
921 }
922 
923 /* A subroutine of lower_try_finally.  Create a fallthru label for
924    the given try_finally state.  The only tricky bit here is that
925    we have to make sure to record the label in our outer context.  */
926 
927 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)928 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
929 {
930   tree label = tf->fallthru_label;
931   treemple temp;
932 
933   if (!label)
934     {
935       label = create_artificial_label (gimple_location (tf->try_finally_expr));
936       tf->fallthru_label = label;
937       if (tf->outer->tf)
938         {
939           temp.t = label;
940           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
941         }
942     }
943   return label;
944 }
945 
946 /* A subroutine of lower_try_finally.  If FINALLY consits of a
947    GIMPLE_EH_ELSE node, return it.  */
948 
949 static inline geh_else *
get_eh_else(gimple_seq finally)950 get_eh_else (gimple_seq finally)
951 {
952   gimple *x = gimple_seq_first_stmt (finally);
953   if (gimple_code (x) == GIMPLE_EH_ELSE)
954     {
955       gcc_assert (gimple_seq_singleton_p (finally));
956       return as_a <geh_else *> (x);
957     }
958   return NULL;
959 }
960 
961 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
962    langhook returns non-null, then the language requires that the exception
963    path out of a try_finally be treated specially.  To wit: the code within
964    the finally block may not itself throw an exception.  We have two choices
965    here. First we can duplicate the finally block and wrap it in a
966    must_not_throw region.  Second, we can generate code like
967 
968 	try {
969 	  finally_block;
970 	} catch {
971 	  if (fintmp == eh_edge)
972 	    protect_cleanup_actions;
973 	}
974 
975    where "fintmp" is the temporary used in the switch statement generation
976    alternative considered below.  For the nonce, we always choose the first
977    option.
978 
979    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
980 
981 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)982 honor_protect_cleanup_actions (struct leh_state *outer_state,
983 			       struct leh_state *this_state,
984 			       struct leh_tf_state *tf)
985 {
986   gimple_seq finally = gimple_try_cleanup (tf->top_p);
987 
988   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
989      It does need to be handled here, so as to separate the (different)
990      EH path from the normal path.  But we should not attempt to wrap
991      it with a must-not-throw node (which indeed gets in the way).  */
992   if (geh_else *eh_else = get_eh_else (finally))
993     {
994       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
995       finally = gimple_eh_else_e_body (eh_else);
996 
997       /* Let the ELSE see the exception that's being processed, but
998 	 since the cleanup is outside the try block, process it with
999 	 outer_state, otherwise it may be used as a cleanup for
1000 	 itself, and Bad Things (TM) ensue.  */
1001       eh_region save_ehp = outer_state->ehp_region;
1002       outer_state->ehp_region = this_state->cur_region;
1003       lower_eh_constructs_1 (outer_state, &finally);
1004       outer_state->ehp_region = save_ehp;
1005     }
1006   else
1007     {
1008       /* First check for nothing to do.  */
1009       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1010 	return;
1011       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1012       if (actions == NULL)
1013 	return;
1014 
1015       if (this_state)
1016 	finally = lower_try_finally_dup_block (finally, outer_state,
1017 	  gimple_location (tf->try_finally_expr));
1018 
1019       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1020 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1021 	 to be in an enclosing scope, but needs to be implemented at this level
1022 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1023 	 cp/decl.c).  Since it's logically at an outer level, we should call
1024 	 terminate before we get to it, so strip it away before adding the
1025 	 MUST_NOT_THROW filter.  */
1026       gimple_stmt_iterator gsi = gsi_start (finally);
1027       gimple *x = gsi_stmt (gsi);
1028       if (gimple_code (x) == GIMPLE_TRY
1029 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1030 	  && gimple_try_catch_is_cleanup (x))
1031 	{
1032 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1033 	  gsi_remove (&gsi, false);
1034 	}
1035 
1036       /* Wrap the block with protect_cleanup_actions as the action.  */
1037       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1038       gtry *try_stmt = gimple_build_try (finally,
1039 					 gimple_seq_alloc_with_stmt (eh_mnt),
1040 					 GIMPLE_TRY_CATCH);
1041       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1042     }
1043 
1044   /* Drop all of this into the exception sequence.  */
1045   emit_post_landing_pad (&eh_seq, tf->region);
1046   gimple_seq_add_seq (&eh_seq, finally);
1047   if (gimple_seq_may_fallthru (finally))
1048     emit_resx (&eh_seq, tf->region);
1049 
1050   /* Having now been handled, EH isn't to be considered with
1051      the rest of the outgoing edges.  */
1052   tf->may_throw = false;
1053 }
1054 
1055 /* A subroutine of lower_try_finally.  We have determined that there is
1056    no fallthru edge out of the finally block.  This means that there is
1057    no outgoing edge corresponding to any incoming edge.  Restructure the
1058    try_finally node for this special case.  */
1059 
1060 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1061 lower_try_finally_nofallthru (struct leh_state *state,
1062 			      struct leh_tf_state *tf)
1063 {
1064   tree lab;
1065   gimple *x;
1066   geh_else *eh_else;
1067   gimple_seq finally;
1068   struct goto_queue_node *q, *qe;
1069 
1070   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1071 
1072   /* We expect that tf->top_p is a GIMPLE_TRY. */
1073   finally = gimple_try_cleanup (tf->top_p);
1074   tf->top_p_seq = gimple_try_eval (tf->top_p);
1075 
1076   x = gimple_build_label (lab);
1077   gimple_seq_add_stmt (&tf->top_p_seq, x);
1078 
1079   q = tf->goto_queue;
1080   qe = q + tf->goto_queue_active;
1081   for (; q < qe; ++q)
1082     if (q->index < 0)
1083       do_return_redirection (q, lab, NULL);
1084     else
1085       do_goto_redirection (q, lab, NULL, tf);
1086 
1087   replace_goto_queue (tf);
1088 
1089   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1090   eh_else = get_eh_else (finally);
1091   if (eh_else)
1092     {
1093       finally = gimple_eh_else_n_body (eh_else);
1094       lower_eh_constructs_1 (state, &finally);
1095       gimple_seq_add_seq (&tf->top_p_seq, finally);
1096 
1097       if (tf->may_throw)
1098 	{
1099 	  finally = gimple_eh_else_e_body (eh_else);
1100 	  lower_eh_constructs_1 (state, &finally);
1101 
1102 	  emit_post_landing_pad (&eh_seq, tf->region);
1103 	  gimple_seq_add_seq (&eh_seq, finally);
1104 	}
1105     }
1106   else
1107     {
1108       lower_eh_constructs_1 (state, &finally);
1109       gimple_seq_add_seq (&tf->top_p_seq, finally);
1110 
1111       if (tf->may_throw)
1112 	{
1113 	  emit_post_landing_pad (&eh_seq, tf->region);
1114 
1115 	  x = gimple_build_goto (lab);
1116 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1117 	  gimple_seq_add_stmt (&eh_seq, x);
1118 	}
1119     }
1120 }
1121 
1122 /* A subroutine of lower_try_finally.  We have determined that there is
1123    exactly one destination of the finally block.  Restructure the
1124    try_finally node for this special case.  */
1125 
1126 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1127 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1128 {
1129   struct goto_queue_node *q, *qe;
1130   geh_else *eh_else;
1131   glabel *label_stmt;
1132   gimple *x;
1133   gimple_seq finally;
1134   gimple_stmt_iterator gsi;
1135   tree finally_label;
1136   location_t loc = gimple_location (tf->try_finally_expr);
1137 
1138   finally = gimple_try_cleanup (tf->top_p);
1139   tf->top_p_seq = gimple_try_eval (tf->top_p);
1140 
1141   /* Since there's only one destination, and the destination edge can only
1142      either be EH or non-EH, that implies that all of our incoming edges
1143      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1144   eh_else = get_eh_else (finally);
1145   if (eh_else)
1146     {
1147       if (tf->may_throw)
1148 	finally = gimple_eh_else_e_body (eh_else);
1149       else
1150 	finally = gimple_eh_else_n_body (eh_else);
1151     }
1152 
1153   lower_eh_constructs_1 (state, &finally);
1154 
1155   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1156     {
1157       gimple *stmt = gsi_stmt (gsi);
1158       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1159 	{
1160 	  tree block = gimple_block (stmt);
1161 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1162 	  gimple_set_block (stmt, block);
1163 	}
1164     }
1165 
1166   if (tf->may_throw)
1167     {
1168       /* Only reachable via the exception edge.  Add the given label to
1169          the head of the FINALLY block.  Append a RESX at the end.  */
1170       emit_post_landing_pad (&eh_seq, tf->region);
1171       gimple_seq_add_seq (&eh_seq, finally);
1172       emit_resx (&eh_seq, tf->region);
1173       return;
1174     }
1175 
1176   if (tf->may_fallthru)
1177     {
1178       /* Only reachable via the fallthru edge.  Do nothing but let
1179 	 the two blocks run together; we'll fall out the bottom.  */
1180       gimple_seq_add_seq (&tf->top_p_seq, finally);
1181       return;
1182     }
1183 
1184   finally_label = create_artificial_label (loc);
1185   label_stmt = gimple_build_label (finally_label);
1186   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1187 
1188   gimple_seq_add_seq (&tf->top_p_seq, finally);
1189 
1190   q = tf->goto_queue;
1191   qe = q + tf->goto_queue_active;
1192 
1193   if (tf->may_return)
1194     {
1195       /* Reachable by return expressions only.  Redirect them.  */
1196       for (; q < qe; ++q)
1197 	do_return_redirection (q, finally_label, NULL);
1198       replace_goto_queue (tf);
1199     }
1200   else
1201     {
1202       /* Reachable by goto expressions only.  Redirect them.  */
1203       for (; q < qe; ++q)
1204 	do_goto_redirection (q, finally_label, NULL, tf);
1205       replace_goto_queue (tf);
1206 
1207       if (tf->dest_array[0] == tf->fallthru_label)
1208 	{
1209 	  /* Reachable by goto to fallthru label only.  Redirect it
1210 	     to the new label (already created, sadly), and do not
1211 	     emit the final branch out, or the fallthru label.  */
1212 	  tf->fallthru_label = NULL;
1213 	  return;
1214 	}
1215     }
1216 
1217   /* Place the original return/goto to the original destination
1218      immediately after the finally block. */
1219   x = tf->goto_queue[0].cont_stmt;
1220   gimple_seq_add_stmt (&tf->top_p_seq, x);
1221   maybe_record_in_goto_queue (state, x);
1222 }
1223 
1224 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1225    and outgoing from the finally block.  Implement this by duplicating the
1226    finally block for every destination.  */
1227 
1228 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1229 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1230 {
1231   gimple_seq finally;
1232   gimple_seq new_stmt;
1233   gimple_seq seq;
1234   gimple *x;
1235   geh_else *eh_else;
1236   tree tmp;
1237   location_t tf_loc = gimple_location (tf->try_finally_expr);
1238 
1239   finally = gimple_try_cleanup (tf->top_p);
1240 
1241   /* Notice EH_ELSE, and simplify some of the remaining code
1242      by considering FINALLY to be the normal return path only.  */
1243   eh_else = get_eh_else (finally);
1244   if (eh_else)
1245     finally = gimple_eh_else_n_body (eh_else);
1246 
1247   tf->top_p_seq = gimple_try_eval (tf->top_p);
1248   new_stmt = NULL;
1249 
1250   if (tf->may_fallthru)
1251     {
1252       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1253       lower_eh_constructs_1 (state, &seq);
1254       gimple_seq_add_seq (&new_stmt, seq);
1255 
1256       tmp = lower_try_finally_fallthru_label (tf);
1257       x = gimple_build_goto (tmp);
1258       gimple_set_location (x, tf_loc);
1259       gimple_seq_add_stmt (&new_stmt, x);
1260     }
1261 
1262   if (tf->may_throw)
1263     {
1264       /* We don't need to copy the EH path of EH_ELSE,
1265 	 since it is only emitted once.  */
1266       if (eh_else)
1267 	seq = gimple_eh_else_e_body (eh_else);
1268       else
1269 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1270       lower_eh_constructs_1 (state, &seq);
1271 
1272       emit_post_landing_pad (&eh_seq, tf->region);
1273       gimple_seq_add_seq (&eh_seq, seq);
1274       emit_resx (&eh_seq, tf->region);
1275     }
1276 
1277   if (tf->goto_queue)
1278     {
1279       struct goto_queue_node *q, *qe;
1280       int return_index, index;
1281       struct labels_s
1282       {
1283 	struct goto_queue_node *q;
1284 	tree label;
1285       } *labels;
1286 
1287       return_index = tf->dest_array.length ();
1288       labels = XCNEWVEC (struct labels_s, return_index + 1);
1289 
1290       q = tf->goto_queue;
1291       qe = q + tf->goto_queue_active;
1292       for (; q < qe; q++)
1293 	{
1294 	  index = q->index < 0 ? return_index : q->index;
1295 
1296 	  if (!labels[index].q)
1297 	    labels[index].q = q;
1298 	}
1299 
1300       for (index = 0; index < return_index + 1; index++)
1301 	{
1302 	  tree lab;
1303 
1304 	  q = labels[index].q;
1305 	  if (! q)
1306 	    continue;
1307 
1308 	  lab = labels[index].label
1309 	    = create_artificial_label (tf_loc);
1310 
1311 	  if (index == return_index)
1312 	    do_return_redirection (q, lab, NULL);
1313 	  else
1314 	    do_goto_redirection (q, lab, NULL, tf);
1315 
1316 	  x = gimple_build_label (lab);
1317           gimple_seq_add_stmt (&new_stmt, x);
1318 
1319 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1320 	  lower_eh_constructs_1 (state, &seq);
1321           gimple_seq_add_seq (&new_stmt, seq);
1322 
1323           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1324 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1325 	}
1326 
1327       for (q = tf->goto_queue; q < qe; q++)
1328 	{
1329 	  tree lab;
1330 
1331 	  index = q->index < 0 ? return_index : q->index;
1332 
1333 	  if (labels[index].q == q)
1334 	    continue;
1335 
1336 	  lab = labels[index].label;
1337 
1338 	  if (index == return_index)
1339 	    do_return_redirection (q, lab, NULL);
1340 	  else
1341 	    do_goto_redirection (q, lab, NULL, tf);
1342 	}
1343 
1344       replace_goto_queue (tf);
1345       free (labels);
1346     }
1347 
1348   /* Need to link new stmts after running replace_goto_queue due
1349      to not wanting to process the same goto stmts twice.  */
1350   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1351 }
1352 
1353 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1354    and outgoing from the finally block.  Implement this by instrumenting
1355    each incoming edge and creating a switch statement at the end of the
1356    finally block that branches to the appropriate destination.  */
1357 
1358 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1359 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1360 {
1361   struct goto_queue_node *q, *qe;
1362   tree finally_tmp, finally_label;
1363   int return_index, eh_index, fallthru_index;
1364   int nlabels, ndests, j, last_case_index;
1365   tree last_case;
1366   auto_vec<tree> case_label_vec;
1367   gimple_seq switch_body = NULL;
1368   gimple *x;
1369   geh_else *eh_else;
1370   tree tmp;
1371   gimple *switch_stmt;
1372   gimple_seq finally;
1373   hash_map<tree, gimple *> *cont_map = NULL;
1374   /* The location of the TRY_FINALLY stmt.  */
1375   location_t tf_loc = gimple_location (tf->try_finally_expr);
1376   /* The location of the finally block.  */
1377   location_t finally_loc;
1378 
1379   finally = gimple_try_cleanup (tf->top_p);
1380   eh_else = get_eh_else (finally);
1381 
1382   /* Mash the TRY block to the head of the chain.  */
1383   tf->top_p_seq = gimple_try_eval (tf->top_p);
1384 
1385   /* The location of the finally is either the last stmt in the finally
1386      block or the location of the TRY_FINALLY itself.  */
1387   x = gimple_seq_last_stmt (finally);
1388   finally_loc = x ? gimple_location (x) : tf_loc;
1389 
1390   /* Prepare for switch statement generation.  */
1391   nlabels = tf->dest_array.length ();
1392   return_index = nlabels;
1393   eh_index = return_index + tf->may_return;
1394   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1395   ndests = fallthru_index + tf->may_fallthru;
1396 
1397   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1398   finally_label = create_artificial_label (finally_loc);
1399 
1400   /* We use vec::quick_push on case_label_vec throughout this function,
1401      since we know the size in advance and allocate precisely as muce
1402      space as needed.  */
1403   case_label_vec.create (ndests);
1404   last_case = NULL;
1405   last_case_index = 0;
1406 
1407   /* Begin inserting code for getting to the finally block.  Things
1408      are done in this order to correspond to the sequence the code is
1409      laid out.  */
1410 
1411   if (tf->may_fallthru)
1412     {
1413       x = gimple_build_assign (finally_tmp,
1414 			       build_int_cst (integer_type_node,
1415 					      fallthru_index));
1416       gimple_set_location (x, finally_loc);
1417       gimple_seq_add_stmt (&tf->top_p_seq, x);
1418 
1419       tmp = build_int_cst (integer_type_node, fallthru_index);
1420       last_case = build_case_label (tmp, NULL,
1421 				    create_artificial_label (finally_loc));
1422       case_label_vec.quick_push (last_case);
1423       last_case_index++;
1424 
1425       x = gimple_build_label (CASE_LABEL (last_case));
1426       gimple_seq_add_stmt (&switch_body, x);
1427 
1428       tmp = lower_try_finally_fallthru_label (tf);
1429       x = gimple_build_goto (tmp);
1430       gimple_set_location (x, finally_loc);
1431       gimple_seq_add_stmt (&switch_body, x);
1432     }
1433 
1434   /* For EH_ELSE, emit the exception path (plus resx) now, then
1435      subsequently we only need consider the normal path.  */
1436   if (eh_else)
1437     {
1438       if (tf->may_throw)
1439 	{
1440 	  finally = gimple_eh_else_e_body (eh_else);
1441 	  lower_eh_constructs_1 (state, &finally);
1442 
1443 	  emit_post_landing_pad (&eh_seq, tf->region);
1444 	  gimple_seq_add_seq (&eh_seq, finally);
1445 	  emit_resx (&eh_seq, tf->region);
1446 	}
1447 
1448       finally = gimple_eh_else_n_body (eh_else);
1449     }
1450   else if (tf->may_throw)
1451     {
1452       emit_post_landing_pad (&eh_seq, tf->region);
1453 
1454       x = gimple_build_assign (finally_tmp,
1455 			       build_int_cst (integer_type_node, eh_index));
1456       gimple_seq_add_stmt (&eh_seq, x);
1457 
1458       x = gimple_build_goto (finally_label);
1459       gimple_set_location (x, tf_loc);
1460       gimple_seq_add_stmt (&eh_seq, x);
1461 
1462       tmp = build_int_cst (integer_type_node, eh_index);
1463       last_case = build_case_label (tmp, NULL,
1464 				    create_artificial_label (tf_loc));
1465       case_label_vec.quick_push (last_case);
1466       last_case_index++;
1467 
1468       x = gimple_build_label (CASE_LABEL (last_case));
1469       gimple_seq_add_stmt (&eh_seq, x);
1470       emit_resx (&eh_seq, tf->region);
1471     }
1472 
1473   x = gimple_build_label (finally_label);
1474   gimple_seq_add_stmt (&tf->top_p_seq, x);
1475 
1476   lower_eh_constructs_1 (state, &finally);
1477   gimple_seq_add_seq (&tf->top_p_seq, finally);
1478 
1479   /* Redirect each incoming goto edge.  */
1480   q = tf->goto_queue;
1481   qe = q + tf->goto_queue_active;
1482   j = last_case_index + tf->may_return;
1483   /* Prepare the assignments to finally_tmp that are executed upon the
1484      entrance through a particular edge. */
1485   for (; q < qe; ++q)
1486     {
1487       gimple_seq mod = NULL;
1488       int switch_id;
1489       unsigned int case_index;
1490 
1491       if (q->index < 0)
1492 	{
1493 	  x = gimple_build_assign (finally_tmp,
1494 				   build_int_cst (integer_type_node,
1495 						  return_index));
1496 	  gimple_seq_add_stmt (&mod, x);
1497 	  do_return_redirection (q, finally_label, mod);
1498 	  switch_id = return_index;
1499 	}
1500       else
1501 	{
1502 	  x = gimple_build_assign (finally_tmp,
1503 				   build_int_cst (integer_type_node, q->index));
1504 	  gimple_seq_add_stmt (&mod, x);
1505 	  do_goto_redirection (q, finally_label, mod, tf);
1506 	  switch_id = q->index;
1507 	}
1508 
1509       case_index = j + q->index;
1510       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1511         {
1512           tree case_lab;
1513 	  tmp = build_int_cst (integer_type_node, switch_id);
1514           case_lab = build_case_label (tmp, NULL,
1515 				       create_artificial_label (tf_loc));
1516           /* We store the cont_stmt in the pointer map, so that we can recover
1517              it in the loop below.  */
1518           if (!cont_map)
1519 	    cont_map = new hash_map<tree, gimple *>;
1520           cont_map->put (case_lab, q->cont_stmt);
1521           case_label_vec.quick_push (case_lab);
1522         }
1523     }
1524   for (j = last_case_index; j < last_case_index + nlabels; j++)
1525     {
1526       gimple *cont_stmt;
1527 
1528       last_case = case_label_vec[j];
1529 
1530       gcc_assert (last_case);
1531       gcc_assert (cont_map);
1532 
1533       cont_stmt = *cont_map->get (last_case);
1534 
1535       x = gimple_build_label (CASE_LABEL (last_case));
1536       gimple_seq_add_stmt (&switch_body, x);
1537       gimple_seq_add_stmt (&switch_body, cont_stmt);
1538       maybe_record_in_goto_queue (state, cont_stmt);
1539     }
1540   if (cont_map)
1541     delete cont_map;
1542 
1543   replace_goto_queue (tf);
1544 
1545   /* Make sure that the last case is the default label, as one is required.
1546      Then sort the labels, which is also required in GIMPLE.  */
1547   CASE_LOW (last_case) = NULL;
1548   tree tem = case_label_vec.pop ();
1549   gcc_assert (tem == last_case);
1550   sort_case_labels (case_label_vec);
1551 
1552   /* Build the switch statement, setting last_case to be the default
1553      label.  */
1554   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1555 				     case_label_vec);
1556   gimple_set_location (switch_stmt, finally_loc);
1557 
1558   /* Need to link SWITCH_STMT after running replace_goto_queue
1559      due to not wanting to process the same goto stmts twice.  */
1560   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1561   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1562 }
1563 
1564 /* Decide whether or not we are going to duplicate the finally block.
1565    There are several considerations.
1566 
1567    Second, we'd like to prevent egregious code growth.  One way to
1568    do this is to estimate the size of the finally block, multiply
1569    that by the number of copies we'd need to make, and compare against
1570    the estimate of the size of the switch machinery we'd have to add.  */
1571 
1572 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1573 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1574 {
1575   int f_estimate, sw_estimate;
1576   geh_else *eh_else;
1577 
1578   /* If there's an EH_ELSE involved, the exception path is separate
1579      and really doesn't come into play for this computation.  */
1580   eh_else = get_eh_else (finally);
1581   if (eh_else)
1582     {
1583       ndests -= may_throw;
1584       finally = gimple_eh_else_n_body (eh_else);
1585     }
1586 
1587   if (!optimize)
1588     {
1589       gimple_stmt_iterator gsi;
1590 
1591       if (ndests == 1)
1592         return true;
1593 
1594       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1595 	{
1596 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1597 	     on the EH paths and, consequently, useless cleanups.  */
1598 	  gimple *stmt = gsi_stmt (gsi);
1599 	  if (!is_gimple_debug (stmt)
1600 	      && !gimple_clobber_p (stmt)
1601 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1602 	    return false;
1603 	}
1604       return true;
1605     }
1606 
1607   /* Finally estimate N times, plus N gotos.  */
1608   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1609   f_estimate = (f_estimate + 1) * ndests;
1610 
1611   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1612   sw_estimate = 10 + 2 * ndests;
1613 
1614   /* Optimize for size clearly wants our best guess.  */
1615   if (optimize_function_for_size_p (cfun))
1616     return f_estimate < sw_estimate;
1617 
1618   /* ??? These numbers are completely made up so far.  */
1619   if (optimize > 1)
1620     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1621   else
1622     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1623 }
1624 
1625 /* REG is current region of a LEH state.
1626    is the enclosing region for a possible cleanup region, or the region
1627    itself.  Returns TRUE if such a region would be unreachable.
1628 
1629    Cleanup regions within a must-not-throw region aren't actually reachable
1630    even if there are throwing stmts within them, because the personality
1631    routine will call terminate before unwinding.  */
1632 
1633 static bool
cleanup_is_dead_in(leh_state * state)1634 cleanup_is_dead_in (leh_state *state)
1635 {
1636   if (flag_checking)
1637     {
1638       eh_region reg = state->cur_region;
1639       while (reg && reg->type == ERT_CLEANUP)
1640 	reg = reg->outer;
1641 
1642       gcc_assert (reg == state->outer_non_cleanup);
1643     }
1644 
1645   eh_region reg = state->outer_non_cleanup;
1646   return (reg && reg->type == ERT_MUST_NOT_THROW);
1647 }
1648 
1649 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1650    to a sequence of labels and blocks, plus the exception region trees
1651    that record all the magic.  This is complicated by the need to
1652    arrange for the FINALLY block to be executed on all exits.  */
1653 
1654 static gimple_seq
lower_try_finally(struct leh_state * state,gtry * tp)1655 lower_try_finally (struct leh_state *state, gtry *tp)
1656 {
1657   struct leh_tf_state this_tf;
1658   struct leh_state this_state;
1659   int ndests;
1660   gimple_seq old_eh_seq;
1661 
1662   /* Process the try block.  */
1663 
1664   memset (&this_tf, 0, sizeof (this_tf));
1665   this_tf.try_finally_expr = tp;
1666   this_tf.top_p = tp;
1667   this_tf.outer = state;
1668   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state))
1669     {
1670       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1671       this_state.cur_region = this_tf.region;
1672     }
1673   else
1674     {
1675       this_tf.region = NULL;
1676       this_state.cur_region = state->cur_region;
1677     }
1678 
1679   this_state.outer_non_cleanup = state->outer_non_cleanup;
1680   this_state.ehp_region = state->ehp_region;
1681   this_state.tf = &this_tf;
1682 
1683   old_eh_seq = eh_seq;
1684   eh_seq = NULL;
1685 
1686   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1687 
1688   /* Determine if the try block is escaped through the bottom.  */
1689   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1690 
1691   /* Determine if any exceptions are possible within the try block.  */
1692   if (this_tf.region)
1693     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1694   if (this_tf.may_throw)
1695     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1696 
1697   /* Determine how many edges (still) reach the finally block.  Or rather,
1698      how many destinations are reached by the finally block.  Use this to
1699      determine how we process the finally block itself.  */
1700 
1701   ndests = this_tf.dest_array.length ();
1702   ndests += this_tf.may_fallthru;
1703   ndests += this_tf.may_return;
1704   ndests += this_tf.may_throw;
1705 
1706   /* If the FINALLY block is not reachable, dike it out.  */
1707   if (ndests == 0)
1708     {
1709       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1710       gimple_try_set_cleanup (tp, NULL);
1711     }
1712   /* If the finally block doesn't fall through, then any destination
1713      we might try to impose there isn't reached either.  There may be
1714      some minor amount of cleanup and redirection still needed.  */
1715   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1716     lower_try_finally_nofallthru (state, &this_tf);
1717 
1718   /* We can easily special-case redirection to a single destination.  */
1719   else if (ndests == 1)
1720     lower_try_finally_onedest (state, &this_tf);
1721   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1722 				    gimple_try_cleanup (tp)))
1723     lower_try_finally_copy (state, &this_tf);
1724   else
1725     lower_try_finally_switch (state, &this_tf);
1726 
1727   /* If someone requested we add a label at the end of the transformed
1728      block, do so.  */
1729   if (this_tf.fallthru_label)
1730     {
1731       /* This must be reached only if ndests == 0. */
1732       gimple *x = gimple_build_label (this_tf.fallthru_label);
1733       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1734     }
1735 
1736   this_tf.dest_array.release ();
1737   free (this_tf.goto_queue);
1738   if (this_tf.goto_queue_map)
1739     delete this_tf.goto_queue_map;
1740 
1741   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1742      If there was no old eh_seq, then the append is trivially already done.  */
1743   if (old_eh_seq)
1744     {
1745       if (eh_seq == NULL)
1746 	eh_seq = old_eh_seq;
1747       else
1748 	{
1749 	  gimple_seq new_eh_seq = eh_seq;
1750 	  eh_seq = old_eh_seq;
1751 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1752 	}
1753     }
1754 
1755   return this_tf.top_p_seq;
1756 }
1757 
1758 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1759    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1760    exception region trees that records all the magic.  */
1761 
1762 static gimple_seq
lower_catch(struct leh_state * state,gtry * tp)1763 lower_catch (struct leh_state *state, gtry *tp)
1764 {
1765   eh_region try_region = NULL;
1766   struct leh_state this_state = *state;
1767   gimple_stmt_iterator gsi;
1768   tree out_label;
1769   gimple_seq new_seq, cleanup;
1770   gimple *x;
1771   geh_dispatch *eh_dispatch;
1772   location_t try_catch_loc = gimple_location (tp);
1773   location_t catch_loc = UNKNOWN_LOCATION;
1774 
1775   if (flag_exceptions)
1776     {
1777       try_region = gen_eh_region_try (state->cur_region);
1778       this_state.cur_region = try_region;
1779       this_state.outer_non_cleanup = this_state.cur_region;
1780     }
1781 
1782   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1783 
1784   if (!eh_region_may_contain_throw (try_region))
1785     return gimple_try_eval (tp);
1786 
1787   new_seq = NULL;
1788   eh_dispatch = gimple_build_eh_dispatch (try_region->index);
1789   gimple_seq_add_stmt (&new_seq, eh_dispatch);
1790   emit_resx (&new_seq, try_region);
1791 
1792   this_state.cur_region = state->cur_region;
1793   this_state.outer_non_cleanup = state->outer_non_cleanup;
1794   this_state.ehp_region = try_region;
1795 
1796   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1797      itself, so that e.g. for coverage purposes the nested cleanups don't
1798      appear before the cleanup body.  See PR64634 for details.  */
1799   gimple_seq old_eh_seq = eh_seq;
1800   eh_seq = NULL;
1801 
1802   out_label = NULL;
1803   cleanup = gimple_try_cleanup (tp);
1804   for (gsi = gsi_start (cleanup);
1805        !gsi_end_p (gsi);
1806        gsi_next (&gsi))
1807     {
1808       eh_catch c;
1809       gcatch *catch_stmt;
1810       gimple_seq handler;
1811 
1812       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1813       if (catch_loc == UNKNOWN_LOCATION)
1814 	catch_loc = gimple_location (catch_stmt);
1815       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1816 
1817       handler = gimple_catch_handler (catch_stmt);
1818       lower_eh_constructs_1 (&this_state, &handler);
1819 
1820       c->label = create_artificial_label (UNKNOWN_LOCATION);
1821       x = gimple_build_label (c->label);
1822       gimple_seq_add_stmt (&new_seq, x);
1823 
1824       gimple_seq_add_seq (&new_seq, handler);
1825 
1826       if (gimple_seq_may_fallthru (new_seq))
1827 	{
1828 	  if (!out_label)
1829 	    out_label = create_artificial_label (try_catch_loc);
1830 
1831 	  x = gimple_build_goto (out_label);
1832 	  gimple_seq_add_stmt (&new_seq, x);
1833 	}
1834       if (!c->type_list)
1835 	break;
1836     }
1837 
1838   /* Try to set a location on the dispatching construct to avoid inheriting
1839      the location of the previous statement.  */
1840   gimple_set_location (eh_dispatch, catch_loc);
1841 
1842   gimple_try_set_cleanup (tp, new_seq);
1843 
1844   gimple_seq new_eh_seq = eh_seq;
1845   eh_seq = old_eh_seq;
1846   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1847   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1848   return ret_seq;
1849 }
1850 
1851 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1852    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1853    region trees that record all the magic.  */
1854 
1855 static gimple_seq
lower_eh_filter(struct leh_state * state,gtry * tp)1856 lower_eh_filter (struct leh_state *state, gtry *tp)
1857 {
1858   struct leh_state this_state = *state;
1859   eh_region this_region = NULL;
1860   gimple *inner, *x;
1861   gimple_seq new_seq;
1862 
1863   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1864 
1865   if (flag_exceptions)
1866     {
1867       this_region = gen_eh_region_allowed (state->cur_region,
1868 				           gimple_eh_filter_types (inner));
1869       this_state.cur_region = this_region;
1870       this_state.outer_non_cleanup = this_state.cur_region;
1871     }
1872 
1873   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1874 
1875   if (!eh_region_may_contain_throw (this_region))
1876     return gimple_try_eval (tp);
1877 
1878   this_state.cur_region = state->cur_region;
1879   this_state.ehp_region = this_region;
1880 
1881   new_seq = NULL;
1882   x = gimple_build_eh_dispatch (this_region->index);
1883   gimple_set_location (x, gimple_location (tp));
1884   gimple_seq_add_stmt (&new_seq, x);
1885   emit_resx (&new_seq, this_region);
1886 
1887   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1888   x = gimple_build_label (this_region->u.allowed.label);
1889   gimple_seq_add_stmt (&new_seq, x);
1890 
1891   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1892   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1893 
1894   gimple_try_set_cleanup (tp, new_seq);
1895 
1896   return frob_into_branch_around (tp, this_region, NULL);
1897 }
1898 
1899 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1900    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1901    plus the exception region trees that record all the magic.  */
1902 
1903 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gtry * tp)1904 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1905 {
1906   struct leh_state this_state = *state;
1907 
1908   if (flag_exceptions)
1909     {
1910       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1911       eh_region this_region;
1912 
1913       this_region = gen_eh_region_must_not_throw (state->cur_region);
1914       this_region->u.must_not_throw.failure_decl
1915 	= gimple_eh_must_not_throw_fndecl (
1916 	    as_a <geh_mnt *> (inner));
1917       this_region->u.must_not_throw.failure_loc
1918 	= LOCATION_LOCUS (gimple_location (tp));
1919 
1920       /* In order to get mangling applied to this decl, we must mark it
1921 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1922 	 needs to happen.  */
1923       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1924 
1925       this_state.cur_region = this_region;
1926       this_state.outer_non_cleanup = this_state.cur_region;
1927     }
1928 
1929   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1930 
1931   return gimple_try_eval (tp);
1932 }
1933 
1934 /* Implement a cleanup expression.  This is similar to try-finally,
1935    except that we only execute the cleanup block for exception edges.  */
1936 
1937 static gimple_seq
lower_cleanup(struct leh_state * state,gtry * tp)1938 lower_cleanup (struct leh_state *state, gtry *tp)
1939 {
1940   struct leh_state this_state = *state;
1941   eh_region this_region = NULL;
1942   struct leh_tf_state fake_tf;
1943   gimple_seq result;
1944   bool cleanup_dead = cleanup_is_dead_in (state);
1945 
1946   if (flag_exceptions && !cleanup_dead)
1947     {
1948       this_region = gen_eh_region_cleanup (state->cur_region);
1949       this_state.cur_region = this_region;
1950       this_state.outer_non_cleanup = state->outer_non_cleanup;
1951     }
1952 
1953   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1954 
1955   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1956     return gimple_try_eval (tp);
1957 
1958   /* Build enough of a try-finally state so that we can reuse
1959      honor_protect_cleanup_actions.  */
1960   memset (&fake_tf, 0, sizeof (fake_tf));
1961   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1962   fake_tf.outer = state;
1963   fake_tf.region = this_region;
1964   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1965   fake_tf.may_throw = true;
1966 
1967   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1968 
1969   if (fake_tf.may_throw)
1970     {
1971       /* In this case honor_protect_cleanup_actions had nothing to do,
1972 	 and we should process this normally.  */
1973       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1974       result = frob_into_branch_around (tp, this_region,
1975                                         fake_tf.fallthru_label);
1976     }
1977   else
1978     {
1979       /* In this case honor_protect_cleanup_actions did nearly all of
1980 	 the work.  All we have left is to append the fallthru_label.  */
1981 
1982       result = gimple_try_eval (tp);
1983       if (fake_tf.fallthru_label)
1984 	{
1985 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1986 	  gimple_seq_add_stmt (&result, x);
1987 	}
1988     }
1989   return result;
1990 }
1991 
1992 /* Main loop for lowering eh constructs. Also moves gsi to the next
1993    statement. */
1994 
1995 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)1996 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1997 {
1998   gimple_seq replace;
1999   gimple *x;
2000   gimple *stmt = gsi_stmt (*gsi);
2001 
2002   switch (gimple_code (stmt))
2003     {
2004     case GIMPLE_CALL:
2005       {
2006 	tree fndecl = gimple_call_fndecl (stmt);
2007 	tree rhs, lhs;
2008 
2009 	if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
2010 	  switch (DECL_FUNCTION_CODE (fndecl))
2011 	    {
2012 	    case BUILT_IN_EH_POINTER:
2013 	      /* The front end may have generated a call to
2014 		 __builtin_eh_pointer (0) within a catch region.  Replace
2015 		 this zero argument with the current catch region number.  */
2016 	      if (state->ehp_region)
2017 		{
2018 		  tree nr = build_int_cst (integer_type_node,
2019 					   state->ehp_region->index);
2020 		  gimple_call_set_arg (stmt, 0, nr);
2021 		}
2022 	      else
2023 		{
2024 		  /* The user has dome something silly.  Remove it.  */
2025 		  rhs = null_pointer_node;
2026 		  goto do_replace;
2027 		}
2028 	      break;
2029 
2030 	    case BUILT_IN_EH_FILTER:
2031 	      /* ??? This should never appear, but since it's a builtin it
2032 		 is accessible to abuse by users.  Just remove it and
2033 		 replace the use with the arbitrary value zero.  */
2034 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2035 	    do_replace:
2036 	      lhs = gimple_call_lhs (stmt);
2037 	      x = gimple_build_assign (lhs, rhs);
2038 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2039 	      /* FALLTHRU */
2040 
2041 	    case BUILT_IN_EH_COPY_VALUES:
2042 	      /* Likewise this should not appear.  Remove it.  */
2043 	      gsi_remove (gsi, true);
2044 	      return;
2045 
2046 	    default:
2047 	      break;
2048 	    }
2049       }
2050       /* FALLTHRU */
2051 
2052     case GIMPLE_ASSIGN:
2053       /* If the stmt can throw, use a new temporary for the assignment
2054          to a LHS.  This makes sure the old value of the LHS is
2055 	 available on the EH edge.  Only do so for statements that
2056 	 potentially fall through (no noreturn calls e.g.), otherwise
2057 	 this new assignment might create fake fallthru regions.  */
2058       if (stmt_could_throw_p (cfun, stmt)
2059 	  && gimple_has_lhs (stmt)
2060 	  && gimple_stmt_may_fallthru (stmt)
2061 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2062 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2063 	{
2064 	  tree lhs = gimple_get_lhs (stmt);
2065 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2066 	  gimple *s = gimple_build_assign (lhs, tmp);
2067 	  gimple_set_location (s, gimple_location (stmt));
2068 	  gimple_set_block (s, gimple_block (stmt));
2069 	  gimple_set_lhs (stmt, tmp);
2070 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2071 	}
2072       /* Look for things that can throw exceptions, and record them.  */
2073       if (state->cur_region && stmt_could_throw_p (cfun, stmt))
2074 	{
2075 	  record_stmt_eh_region (state->cur_region, stmt);
2076 	  note_eh_region_may_contain_throw (state->cur_region);
2077 	}
2078       break;
2079 
2080     case GIMPLE_COND:
2081     case GIMPLE_GOTO:
2082     case GIMPLE_RETURN:
2083       maybe_record_in_goto_queue (state, stmt);
2084       break;
2085 
2086     case GIMPLE_SWITCH:
2087       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2088       break;
2089 
2090     case GIMPLE_TRY:
2091       {
2092 	gtry *try_stmt = as_a <gtry *> (stmt);
2093 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2094 	  replace = lower_try_finally (state, try_stmt);
2095 	else
2096 	  {
2097 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2098 	    if (!x)
2099 	      {
2100 		replace = gimple_try_eval (try_stmt);
2101 		lower_eh_constructs_1 (state, &replace);
2102 	      }
2103 	    else
2104 	      switch (gimple_code (x))
2105 		{
2106 		case GIMPLE_CATCH:
2107 		  replace = lower_catch (state, try_stmt);
2108 		  break;
2109 		case GIMPLE_EH_FILTER:
2110 		  replace = lower_eh_filter (state, try_stmt);
2111 		  break;
2112 		case GIMPLE_EH_MUST_NOT_THROW:
2113 		  replace = lower_eh_must_not_throw (state, try_stmt);
2114 		  break;
2115 		case GIMPLE_EH_ELSE:
2116 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2117 		  gcc_unreachable ();
2118 		default:
2119 		  replace = lower_cleanup (state, try_stmt);
2120 		  break;
2121 		}
2122 	  }
2123       }
2124 
2125       /* Remove the old stmt and insert the transformed sequence
2126 	 instead. */
2127       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2128       gsi_remove (gsi, true);
2129 
2130       /* Return since we don't want gsi_next () */
2131       return;
2132 
2133     case GIMPLE_EH_ELSE:
2134       /* We should be eliminating this in lower_try_finally et al.  */
2135       gcc_unreachable ();
2136 
2137     default:
2138       /* A type, a decl, or some kind of statement that we're not
2139 	 interested in.  Don't walk them.  */
2140       break;
2141     }
2142 
2143   gsi_next (gsi);
2144 }
2145 
2146 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2147 
2148 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2149 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2150 {
2151   gimple_stmt_iterator gsi;
2152   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2153     lower_eh_constructs_2 (state, &gsi);
2154 }
2155 
2156 namespace {
2157 
2158 const pass_data pass_data_lower_eh =
2159 {
2160   GIMPLE_PASS, /* type */
2161   "eh", /* name */
2162   OPTGROUP_NONE, /* optinfo_flags */
2163   TV_TREE_EH, /* tv_id */
2164   PROP_gimple_lcf, /* properties_required */
2165   PROP_gimple_leh, /* properties_provided */
2166   0, /* properties_destroyed */
2167   0, /* todo_flags_start */
2168   0, /* todo_flags_finish */
2169 };
2170 
2171 class pass_lower_eh : public gimple_opt_pass
2172 {
2173 public:
pass_lower_eh(gcc::context * ctxt)2174   pass_lower_eh (gcc::context *ctxt)
2175     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2176   {}
2177 
2178   /* opt_pass methods: */
2179   virtual unsigned int execute (function *);
2180 
2181 }; // class pass_lower_eh
2182 
2183 unsigned int
execute(function * fun)2184 pass_lower_eh::execute (function *fun)
2185 {
2186   struct leh_state null_state;
2187   gimple_seq bodyp;
2188 
2189   bodyp = gimple_body (current_function_decl);
2190   if (bodyp == NULL)
2191     return 0;
2192 
2193   finally_tree = new hash_table<finally_tree_hasher> (31);
2194   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2195   memset (&null_state, 0, sizeof (null_state));
2196 
2197   collect_finally_tree_1 (bodyp, NULL);
2198   lower_eh_constructs_1 (&null_state, &bodyp);
2199   gimple_set_body (current_function_decl, bodyp);
2200 
2201   /* We assume there's a return statement, or something, at the end of
2202      the function, and thus ploping the EH sequence afterward won't
2203      change anything.  */
2204   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2205   gimple_seq_add_seq (&bodyp, eh_seq);
2206 
2207   /* We assume that since BODYP already existed, adding EH_SEQ to it
2208      didn't change its value, and we don't have to re-set the function.  */
2209   gcc_assert (bodyp == gimple_body (current_function_decl));
2210 
2211   delete finally_tree;
2212   finally_tree = NULL;
2213   BITMAP_FREE (eh_region_may_contain_throw_map);
2214   eh_seq = NULL;
2215 
2216   /* If this function needs a language specific EH personality routine
2217      and the frontend didn't already set one do so now.  */
2218   if (function_needs_eh_personality (fun) == eh_personality_lang
2219       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2220     DECL_FUNCTION_PERSONALITY (current_function_decl)
2221       = lang_hooks.eh_personality ();
2222 
2223   return 0;
2224 }
2225 
2226 } // anon namespace
2227 
2228 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2229 make_pass_lower_eh (gcc::context *ctxt)
2230 {
2231   return new pass_lower_eh (ctxt);
2232 }
2233 
2234 /* Create the multiple edges from an EH_DISPATCH statement to all of
2235    the possible handlers for its EH region.  Return true if there's
2236    no fallthru edge; false if there is.  */
2237 
2238 bool
make_eh_dispatch_edges(geh_dispatch * stmt)2239 make_eh_dispatch_edges (geh_dispatch *stmt)
2240 {
2241   eh_region r;
2242   eh_catch c;
2243   basic_block src, dst;
2244 
2245   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2246   src = gimple_bb (stmt);
2247 
2248   switch (r->type)
2249     {
2250     case ERT_TRY:
2251       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2252 	{
2253 	  dst = label_to_block (cfun, c->label);
2254 	  make_edge (src, dst, 0);
2255 
2256 	  /* A catch-all handler doesn't have a fallthru.  */
2257 	  if (c->type_list == NULL)
2258 	    return false;
2259 	}
2260       break;
2261 
2262     case ERT_ALLOWED_EXCEPTIONS:
2263       dst = label_to_block (cfun, r->u.allowed.label);
2264       make_edge (src, dst, 0);
2265       break;
2266 
2267     default:
2268       gcc_unreachable ();
2269     }
2270 
2271   return true;
2272 }
2273 
2274 /* Create the single EH edge from STMT to its nearest landing pad,
2275    if there is such a landing pad within the current function.  */
2276 
2277 void
make_eh_edges(gimple * stmt)2278 make_eh_edges (gimple *stmt)
2279 {
2280   basic_block src, dst;
2281   eh_landing_pad lp;
2282   int lp_nr;
2283 
2284   lp_nr = lookup_stmt_eh_lp (stmt);
2285   if (lp_nr <= 0)
2286     return;
2287 
2288   lp = get_eh_landing_pad_from_number (lp_nr);
2289   gcc_assert (lp != NULL);
2290 
2291   src = gimple_bb (stmt);
2292   dst = label_to_block (cfun, lp->post_landing_pad);
2293   make_edge (src, dst, EDGE_EH);
2294 }
2295 
2296 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2297    do not actually perform the final edge redirection.
2298 
2299    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2300    we intend to change the destination EH region as well; this means
2301    EH_LANDING_PAD_NR must already be set on the destination block label.
2302    If false, we're being called from generic cfg manipulation code and we
2303    should preserve our place within the region tree.  */
2304 
2305 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2306 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2307 {
2308   eh_landing_pad old_lp, new_lp;
2309   basic_block old_bb;
2310   gimple *throw_stmt;
2311   int old_lp_nr, new_lp_nr;
2312   tree old_label, new_label;
2313   edge_iterator ei;
2314   edge e;
2315 
2316   old_bb = edge_in->dest;
2317   old_label = gimple_block_label (old_bb);
2318   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2319   gcc_assert (old_lp_nr > 0);
2320   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2321 
2322   throw_stmt = last_stmt (edge_in->src);
2323   gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2324 
2325   new_label = gimple_block_label (new_bb);
2326 
2327   /* Look for an existing region that might be using NEW_BB already.  */
2328   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2329   if (new_lp_nr)
2330     {
2331       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2332       gcc_assert (new_lp);
2333 
2334       /* Unless CHANGE_REGION is true, the new and old landing pad
2335 	 had better be associated with the same EH region.  */
2336       gcc_assert (change_region || new_lp->region == old_lp->region);
2337     }
2338   else
2339     {
2340       new_lp = NULL;
2341       gcc_assert (!change_region);
2342     }
2343 
2344   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2345   FOR_EACH_EDGE (e, ei, old_bb->preds)
2346     if (e != edge_in && (e->flags & EDGE_EH))
2347       break;
2348 
2349   if (new_lp)
2350     {
2351       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2352 	 there's nothing to do with the EH tree.  If there are no more
2353 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2354 	 If CHANGE_REGION is true, then our caller is expecting to remove
2355 	 the landing pad.  */
2356       if (e == NULL && !change_region)
2357 	remove_eh_landing_pad (old_lp);
2358     }
2359   else
2360     {
2361       /* No correct landing pad exists.  If there are no more edges
2362 	 into OLD_LP, then we can simply re-use the existing landing pad.
2363 	 Otherwise, we have to create a new landing pad.  */
2364       if (e == NULL)
2365 	{
2366 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2367 	  new_lp = old_lp;
2368 	}
2369       else
2370 	new_lp = gen_eh_landing_pad (old_lp->region);
2371       new_lp->post_landing_pad = new_label;
2372       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2373     }
2374 
2375   /* Maybe move the throwing statement to the new region.  */
2376   if (old_lp != new_lp)
2377     {
2378       remove_stmt_from_eh_lp (throw_stmt);
2379       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2380     }
2381 }
2382 
2383 /* Redirect EH edge E to NEW_BB.  */
2384 
2385 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2386 redirect_eh_edge (edge edge_in, basic_block new_bb)
2387 {
2388   redirect_eh_edge_1 (edge_in, new_bb, false);
2389   return ssa_redirect_edge (edge_in, new_bb);
2390 }
2391 
2392 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2393    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2394    The actual edge update will happen in the caller.  */
2395 
2396 void
redirect_eh_dispatch_edge(geh_dispatch * stmt,edge e,basic_block new_bb)2397 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2398 {
2399   tree new_lab = gimple_block_label (new_bb);
2400   bool any_changed = false;
2401   basic_block old_bb;
2402   eh_region r;
2403   eh_catch c;
2404 
2405   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2406   switch (r->type)
2407     {
2408     case ERT_TRY:
2409       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2410 	{
2411 	  old_bb = label_to_block (cfun, c->label);
2412 	  if (old_bb == e->dest)
2413 	    {
2414 	      c->label = new_lab;
2415 	      any_changed = true;
2416 	    }
2417 	}
2418       break;
2419 
2420     case ERT_ALLOWED_EXCEPTIONS:
2421       old_bb = label_to_block (cfun, r->u.allowed.label);
2422       gcc_assert (old_bb == e->dest);
2423       r->u.allowed.label = new_lab;
2424       any_changed = true;
2425       break;
2426 
2427     default:
2428       gcc_unreachable ();
2429     }
2430 
2431   gcc_assert (any_changed);
2432 }
2433 
2434 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2435 
2436 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2437 operation_could_trap_helper_p (enum tree_code op,
2438 			       bool fp_operation,
2439 			       bool honor_trapv,
2440 			       bool honor_nans,
2441 			       bool honor_snans,
2442 			       tree divisor,
2443 			       bool *handled)
2444 {
2445   *handled = true;
2446   switch (op)
2447     {
2448     case TRUNC_DIV_EXPR:
2449     case CEIL_DIV_EXPR:
2450     case FLOOR_DIV_EXPR:
2451     case ROUND_DIV_EXPR:
2452     case EXACT_DIV_EXPR:
2453     case CEIL_MOD_EXPR:
2454     case FLOOR_MOD_EXPR:
2455     case ROUND_MOD_EXPR:
2456     case TRUNC_MOD_EXPR:
2457       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2458         return true;
2459       if (TREE_CODE (divisor) == VECTOR_CST)
2460 	{
2461 	  /* Inspired by initializer_each_zero_or_onep.  */
2462 	  unsigned HOST_WIDE_INT nelts = vector_cst_encoded_nelts (divisor);
2463 	  if (VECTOR_CST_STEPPED_P (divisor)
2464 	      && !TYPE_VECTOR_SUBPARTS (TREE_TYPE (divisor))
2465 		    .is_constant (&nelts))
2466 	    return true;
2467 	  for (unsigned int i = 0; i < nelts; ++i)
2468 	    {
2469 	      tree elt = vector_cst_elt (divisor, i);
2470 	      if (integer_zerop (elt))
2471 		return true;
2472 	    }
2473 	}
2474       return false;
2475 
2476     case RDIV_EXPR:
2477       if (fp_operation)
2478 	{
2479 	  if (honor_snans)
2480 	    return true;
2481 	  return flag_trapping_math;
2482 	}
2483       /* Fixed point operations also use RDIV_EXPR.  */
2484       if (!TREE_CONSTANT (divisor) || fixed_zerop (divisor))
2485 	return true;
2486       return false;
2487 
2488     case LT_EXPR:
2489     case LE_EXPR:
2490     case GT_EXPR:
2491     case GE_EXPR:
2492     case LTGT_EXPR:
2493       /* Some floating point comparisons may trap.  */
2494       return honor_nans;
2495 
2496     case EQ_EXPR:
2497     case NE_EXPR:
2498     case UNORDERED_EXPR:
2499     case ORDERED_EXPR:
2500     case UNLT_EXPR:
2501     case UNLE_EXPR:
2502     case UNGT_EXPR:
2503     case UNGE_EXPR:
2504     case UNEQ_EXPR:
2505       return honor_snans;
2506 
2507     case NEGATE_EXPR:
2508     case ABS_EXPR:
2509     case CONJ_EXPR:
2510       /* These operations don't trap with floating point.  */
2511       if (honor_trapv)
2512 	return true;
2513       return false;
2514 
2515     case ABSU_EXPR:
2516       /* ABSU_EXPR never traps.  */
2517       return false;
2518 
2519     case PLUS_EXPR:
2520     case MINUS_EXPR:
2521     case MULT_EXPR:
2522       /* Any floating arithmetic may trap.  */
2523       if (fp_operation && flag_trapping_math)
2524 	return true;
2525       if (honor_trapv)
2526 	return true;
2527       return false;
2528 
2529     case COMPLEX_EXPR:
2530     case CONSTRUCTOR:
2531       /* Constructing an object cannot trap.  */
2532       return false;
2533 
2534     case COND_EXPR:
2535     case VEC_COND_EXPR:
2536       /* Whether *COND_EXPR can trap depends on whether the
2537 	 first argument can trap, so signal it as not handled.
2538 	 Whether lhs is floating or not doesn't matter.  */
2539       *handled = false;
2540       return false;
2541 
2542     default:
2543       /* Any floating arithmetic may trap.  */
2544       if (fp_operation && flag_trapping_math)
2545 	return true;
2546 
2547       *handled = false;
2548       return false;
2549     }
2550 }
2551 
2552 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2553    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2554    type operands that may trap.  If OP is a division operator, DIVISOR contains
2555    the value of the divisor.  */
2556 
2557 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2558 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2559 			tree divisor)
2560 {
2561   bool honor_nans = (fp_operation && flag_trapping_math
2562 		     && !flag_finite_math_only);
2563   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2564   bool handled;
2565 
2566   /* This function cannot tell whether or not COND_EXPR could trap,
2567      because that depends on its condition op.  */
2568   gcc_assert (op != COND_EXPR);
2569 
2570   if (TREE_CODE_CLASS (op) != tcc_comparison
2571       && TREE_CODE_CLASS (op) != tcc_unary
2572       && TREE_CODE_CLASS (op) != tcc_binary)
2573     return false;
2574 
2575   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2576 					honor_nans, honor_snans, divisor,
2577 					&handled);
2578 }
2579 
2580 
2581 /* Returns true if it is possible to prove that the index of
2582    an array access REF (an ARRAY_REF expression) falls into the
2583    array bounds.  */
2584 
2585 static bool
in_array_bounds_p(tree ref)2586 in_array_bounds_p (tree ref)
2587 {
2588   tree idx = TREE_OPERAND (ref, 1);
2589   tree min, max;
2590 
2591   if (TREE_CODE (idx) != INTEGER_CST)
2592     return false;
2593 
2594   min = array_ref_low_bound (ref);
2595   max = array_ref_up_bound (ref);
2596   if (!min
2597       || !max
2598       || TREE_CODE (min) != INTEGER_CST
2599       || TREE_CODE (max) != INTEGER_CST)
2600     return false;
2601 
2602   if (tree_int_cst_lt (idx, min)
2603       || tree_int_cst_lt (max, idx))
2604     return false;
2605 
2606   return true;
2607 }
2608 
2609 /* Returns true if it is possible to prove that the range of
2610    an array access REF (an ARRAY_RANGE_REF expression) falls
2611    into the array bounds.  */
2612 
2613 static bool
range_in_array_bounds_p(tree ref)2614 range_in_array_bounds_p (tree ref)
2615 {
2616   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2617   tree range_min, range_max, min, max;
2618 
2619   range_min = TYPE_MIN_VALUE (domain_type);
2620   range_max = TYPE_MAX_VALUE (domain_type);
2621   if (!range_min
2622       || !range_max
2623       || TREE_CODE (range_min) != INTEGER_CST
2624       || TREE_CODE (range_max) != INTEGER_CST)
2625     return false;
2626 
2627   min = array_ref_low_bound (ref);
2628   max = array_ref_up_bound (ref);
2629   if (!min
2630       || !max
2631       || TREE_CODE (min) != INTEGER_CST
2632       || TREE_CODE (max) != INTEGER_CST)
2633     return false;
2634 
2635   if (tree_int_cst_lt (range_min, min)
2636       || tree_int_cst_lt (max, range_max))
2637     return false;
2638 
2639   return true;
2640 }
2641 
2642 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2643    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2644    This routine expects only GIMPLE lhs or rhs input.  */
2645 
2646 bool
tree_could_trap_p(tree expr)2647 tree_could_trap_p (tree expr)
2648 {
2649   enum tree_code code;
2650   bool fp_operation = false;
2651   bool honor_trapv = false;
2652   tree t, base, div = NULL_TREE;
2653 
2654   if (!expr)
2655     return false;
2656 
2657   /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but
2658      they won't appear as operands in GIMPLE form, so this is just for the
2659      GENERIC uses where it needs to recurse on the operands and so
2660      *COND_EXPR itself doesn't trap.  */
2661   if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR)
2662     return false;
2663 
2664   code = TREE_CODE (expr);
2665   t = TREE_TYPE (expr);
2666 
2667   if (t)
2668     {
2669       if (COMPARISON_CLASS_P (expr))
2670 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2671       else
2672 	fp_operation = FLOAT_TYPE_P (t);
2673       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2674     }
2675 
2676   if (TREE_CODE_CLASS (code) == tcc_binary)
2677     div = TREE_OPERAND (expr, 1);
2678   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2679     return true;
2680 
2681  restart:
2682   switch (code)
2683     {
2684     case COMPONENT_REF:
2685     case REALPART_EXPR:
2686     case IMAGPART_EXPR:
2687     case BIT_FIELD_REF:
2688     case VIEW_CONVERT_EXPR:
2689     case WITH_SIZE_EXPR:
2690       expr = TREE_OPERAND (expr, 0);
2691       code = TREE_CODE (expr);
2692       goto restart;
2693 
2694     case ARRAY_RANGE_REF:
2695       base = TREE_OPERAND (expr, 0);
2696       if (tree_could_trap_p (base))
2697 	return true;
2698       if (TREE_THIS_NOTRAP (expr))
2699 	return false;
2700       return !range_in_array_bounds_p (expr);
2701 
2702     case ARRAY_REF:
2703       base = TREE_OPERAND (expr, 0);
2704       if (tree_could_trap_p (base))
2705 	return true;
2706       if (TREE_THIS_NOTRAP (expr))
2707 	return false;
2708       return !in_array_bounds_p (expr);
2709 
2710     case TARGET_MEM_REF:
2711     case MEM_REF:
2712       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2713 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2714 	return true;
2715       if (TREE_THIS_NOTRAP (expr))
2716 	return false;
2717       /* We cannot prove that the access is in-bounds when we have
2718          variable-index TARGET_MEM_REFs.  */
2719       if (code == TARGET_MEM_REF
2720 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2721 	return true;
2722       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2723 	{
2724 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2725 	  poly_offset_int off = mem_ref_offset (expr);
2726 	  if (maybe_lt (off, 0))
2727 	    return true;
2728 	  if (TREE_CODE (base) == STRING_CST)
2729 	    return maybe_le (TREE_STRING_LENGTH (base), off);
2730 	  tree size = DECL_SIZE_UNIT (base);
2731 	  if (size == NULL_TREE
2732 	      || !poly_int_tree_p (size)
2733 	      || maybe_le (wi::to_poly_offset (size), off))
2734 	    return true;
2735 	  /* Now we are sure the first byte of the access is inside
2736 	     the object.  */
2737 	  return false;
2738 	}
2739       return true;
2740 
2741     case INDIRECT_REF:
2742       return !TREE_THIS_NOTRAP (expr);
2743 
2744     case ASM_EXPR:
2745       return TREE_THIS_VOLATILE (expr);
2746 
2747     case CALL_EXPR:
2748       /* Internal function calls do not trap.  */
2749       if (CALL_EXPR_FN (expr) == NULL_TREE)
2750 	return false;
2751       t = get_callee_fndecl (expr);
2752       /* Assume that indirect and calls to weak functions may trap.  */
2753       if (!t || !DECL_P (t))
2754 	return true;
2755       if (DECL_WEAK (t))
2756 	return tree_could_trap_p (t);
2757       return false;
2758 
2759     case FUNCTION_DECL:
2760       /* Assume that accesses to weak functions may trap, unless we know
2761 	 they are certainly defined in current TU or in some other
2762 	 LTO partition.  */
2763       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2764 	{
2765 	  cgraph_node *node = cgraph_node::get (expr);
2766 	  if (node)
2767 	    node = node->function_symbol ();
2768 	  return !(node && node->in_other_partition);
2769 	}
2770       return false;
2771 
2772     case VAR_DECL:
2773       /* Assume that accesses to weak vars may trap, unless we know
2774 	 they are certainly defined in current TU or in some other
2775 	 LTO partition.  */
2776       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2777 	{
2778 	  varpool_node *node = varpool_node::get (expr);
2779 	  if (node)
2780 	    node = node->ultimate_alias_target ();
2781 	  return !(node && node->in_other_partition);
2782 	}
2783       return false;
2784 
2785     default:
2786       return false;
2787     }
2788 }
2789 
2790 /* Return non-NULL if there is an integer operation with trapping overflow
2791    we can rewrite into non-trapping.  Called via walk_tree from
2792    rewrite_to_non_trapping_overflow.  */
2793 
2794 static tree
find_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2795 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2796 {
2797   if (EXPR_P (*tp)
2798       && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2799       && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2800     return *tp;
2801   if (IS_TYPE_OR_DECL_P (*tp)
2802       || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2803     *walk_subtrees = 0;
2804   return NULL_TREE;
2805 }
2806 
2807 /* Rewrite selected operations into unsigned arithmetics, so that they
2808    don't trap on overflow.  */
2809 
2810 static tree
replace_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2811 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2812 {
2813   if (find_trapping_overflow (tp, walk_subtrees, data))
2814     {
2815       tree type = TREE_TYPE (*tp);
2816       tree utype = unsigned_type_for (type);
2817       *walk_subtrees = 0;
2818       int len = TREE_OPERAND_LENGTH (*tp);
2819       for (int i = 0; i < len; ++i)
2820 	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2821 		   data, (hash_set<tree> *) data);
2822 
2823       if (TREE_CODE (*tp) == ABS_EXPR)
2824 	{
2825 	  TREE_SET_CODE (*tp, ABSU_EXPR);
2826 	  TREE_TYPE (*tp) = utype;
2827 	  *tp = fold_convert (type, *tp);
2828 	}
2829       else
2830 	{
2831 	  TREE_TYPE (*tp) = utype;
2832 	  len = TREE_OPERAND_LENGTH (*tp);
2833 	  for (int i = 0; i < len; ++i)
2834 	    TREE_OPERAND (*tp, i)
2835 	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2836 	  *tp = fold_convert (type, *tp);
2837 	}
2838     }
2839   return NULL_TREE;
2840 }
2841 
2842 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2843    using unsigned arithmetics to avoid traps in it.  */
2844 
2845 tree
rewrite_to_non_trapping_overflow(tree expr)2846 rewrite_to_non_trapping_overflow (tree expr)
2847 {
2848   if (!flag_trapv)
2849     return expr;
2850   hash_set<tree> pset;
2851   if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2852     return expr;
2853   expr = unshare_expr (expr);
2854   pset.empty ();
2855   walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2856   return expr;
2857 }
2858 
2859 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2860    an assignment or a conditional) may throw.  */
2861 
2862 static bool
stmt_could_throw_1_p(gassign * stmt)2863 stmt_could_throw_1_p (gassign *stmt)
2864 {
2865   enum tree_code code = gimple_assign_rhs_code (stmt);
2866   bool honor_nans = false;
2867   bool honor_snans = false;
2868   bool fp_operation = false;
2869   bool honor_trapv = false;
2870   tree t;
2871   size_t i;
2872   bool handled, ret;
2873 
2874   if (TREE_CODE_CLASS (code) == tcc_comparison
2875       || TREE_CODE_CLASS (code) == tcc_unary
2876       || TREE_CODE_CLASS (code) == tcc_binary)
2877     {
2878       if (TREE_CODE_CLASS (code) == tcc_comparison)
2879 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2880       else
2881 	t = TREE_TYPE (gimple_assign_lhs (stmt));
2882       fp_operation = FLOAT_TYPE_P (t);
2883       if (fp_operation)
2884 	{
2885 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2886 	  honor_snans = flag_signaling_nans != 0;
2887 	}
2888       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2889 	honor_trapv = true;
2890     }
2891 
2892   /* First check the LHS.  */
2893   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2894     return true;
2895 
2896   /* Check if the main expression may trap.  */
2897   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2898 				       honor_nans, honor_snans,
2899 				       gimple_assign_rhs2 (stmt),
2900 				       &handled);
2901   if (handled)
2902     return ret;
2903 
2904   /* If the expression does not trap, see if any of the individual operands may
2905      trap.  */
2906   for (i = 1; i < gimple_num_ops (stmt); i++)
2907     if (tree_could_trap_p (gimple_op (stmt, i)))
2908       return true;
2909 
2910   return false;
2911 }
2912 
2913 
2914 /* Return true if statement STMT within FUN could throw an exception.  */
2915 
2916 bool
stmt_could_throw_p(function * fun,gimple * stmt)2917 stmt_could_throw_p (function *fun, gimple *stmt)
2918 {
2919   if (!flag_exceptions)
2920     return false;
2921 
2922   /* The only statements that can throw an exception are assignments,
2923      conditionals, calls, resx, and asms.  */
2924   switch (gimple_code (stmt))
2925     {
2926     case GIMPLE_RESX:
2927       return true;
2928 
2929     case GIMPLE_CALL:
2930       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2931 
2932     case GIMPLE_COND:
2933       {
2934 	if (fun && !fun->can_throw_non_call_exceptions)
2935 	  return false;
2936 	gcond *cond = as_a <gcond *> (stmt);
2937 	tree lhs = gimple_cond_lhs (cond);
2938 	return operation_could_trap_p (gimple_cond_code (cond),
2939 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2940 				       false, NULL_TREE);
2941       }
2942 
2943     case GIMPLE_ASSIGN:
2944       if ((fun && !fun->can_throw_non_call_exceptions)
2945 	  || gimple_clobber_p (stmt))
2946         return false;
2947       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2948 
2949     case GIMPLE_ASM:
2950       if (fun && !fun->can_throw_non_call_exceptions)
2951         return false;
2952       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2953 
2954     default:
2955       return false;
2956     }
2957 }
2958 
2959 /* Return true if STMT in function FUN must be assumed necessary because of
2960    non-call exceptions.  */
2961 
2962 bool
stmt_unremovable_because_of_non_call_eh_p(function * fun,gimple * stmt)2963 stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt)
2964 {
2965   return (fun->can_throw_non_call_exceptions
2966 	  && !fun->can_delete_dead_exceptions
2967 	  && stmt_could_throw_p (fun, stmt));
2968 }
2969 
2970 /* Return true if expression T could throw an exception.  */
2971 
2972 bool
tree_could_throw_p(tree t)2973 tree_could_throw_p (tree t)
2974 {
2975   if (!flag_exceptions)
2976     return false;
2977   if (TREE_CODE (t) == MODIFY_EXPR)
2978     {
2979       if (cfun->can_throw_non_call_exceptions
2980           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2981         return true;
2982       t = TREE_OPERAND (t, 1);
2983     }
2984 
2985   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2986     t = TREE_OPERAND (t, 0);
2987   if (TREE_CODE (t) == CALL_EXPR)
2988     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2989   if (cfun->can_throw_non_call_exceptions)
2990     return tree_could_trap_p (t);
2991   return false;
2992 }
2993 
2994 /* Return true if STMT can throw an exception that is not caught within its
2995    function FUN.  FUN can be NULL but the function is extra conservative
2996    then.  */
2997 
2998 bool
stmt_can_throw_external(function * fun,gimple * stmt)2999 stmt_can_throw_external (function *fun, gimple *stmt)
3000 {
3001   int lp_nr;
3002 
3003   if (!stmt_could_throw_p (fun, stmt))
3004     return false;
3005   if (!fun)
3006     return true;
3007 
3008   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3009   return lp_nr == 0;
3010 }
3011 
3012 /* Return true if STMT can throw an exception that is caught within its
3013    function FUN.  */
3014 
3015 bool
stmt_can_throw_internal(function * fun,gimple * stmt)3016 stmt_can_throw_internal (function *fun, gimple *stmt)
3017 {
3018   int lp_nr;
3019 
3020   gcc_checking_assert (fun);
3021   if (!stmt_could_throw_p (fun, stmt))
3022     return false;
3023 
3024   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3025   return lp_nr > 0;
3026 }
3027 
3028 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
3029    remove any entry it might have from the EH table.  Return true if
3030    any change was made.  */
3031 
3032 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple * stmt)3033 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
3034 {
3035   if (stmt_could_throw_p (ifun, stmt))
3036     return false;
3037   return remove_stmt_from_eh_lp_fn (ifun, stmt);
3038 }
3039 
3040 /* Likewise, but always use the current function.  */
3041 
3042 bool
maybe_clean_eh_stmt(gimple * stmt)3043 maybe_clean_eh_stmt (gimple *stmt)
3044 {
3045   return maybe_clean_eh_stmt_fn (cfun, stmt);
3046 }
3047 
3048 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
3049    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
3050    in the table if it should be in there.  Return TRUE if a replacement was
3051    done that my require an EH edge purge.  */
3052 
3053 bool
maybe_clean_or_replace_eh_stmt(gimple * old_stmt,gimple * new_stmt)3054 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
3055 {
3056   int lp_nr = lookup_stmt_eh_lp (old_stmt);
3057 
3058   if (lp_nr != 0)
3059     {
3060       bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt);
3061 
3062       if (new_stmt == old_stmt && new_stmt_could_throw)
3063 	return false;
3064 
3065       remove_stmt_from_eh_lp (old_stmt);
3066       if (new_stmt_could_throw)
3067 	{
3068 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
3069 	  return false;
3070 	}
3071       else
3072 	return true;
3073     }
3074 
3075   return false;
3076 }
3077 
3078 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3079    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3080    operand is the return value of duplicate_eh_regions.  */
3081 
3082 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple * new_stmt,struct function * old_fun,gimple * old_stmt,hash_map<void *,void * > * map,int default_lp_nr)3083 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3084 			    struct function *old_fun, gimple *old_stmt,
3085 			    hash_map<void *, void *> *map,
3086 			    int default_lp_nr)
3087 {
3088   int old_lp_nr, new_lp_nr;
3089 
3090   if (!stmt_could_throw_p (new_fun, new_stmt))
3091     return false;
3092 
3093   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3094   if (old_lp_nr == 0)
3095     {
3096       if (default_lp_nr == 0)
3097 	return false;
3098       new_lp_nr = default_lp_nr;
3099     }
3100   else if (old_lp_nr > 0)
3101     {
3102       eh_landing_pad old_lp, new_lp;
3103 
3104       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3105       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3106       new_lp_nr = new_lp->index;
3107     }
3108   else
3109     {
3110       eh_region old_r, new_r;
3111 
3112       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3113       new_r = static_cast<eh_region> (*map->get (old_r));
3114       new_lp_nr = -new_r->index;
3115     }
3116 
3117   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3118   return true;
3119 }
3120 
3121 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3122    and thus no remapping is required.  */
3123 
3124 bool
maybe_duplicate_eh_stmt(gimple * new_stmt,gimple * old_stmt)3125 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3126 {
3127   int lp_nr;
3128 
3129   if (!stmt_could_throw_p (cfun, new_stmt))
3130     return false;
3131 
3132   lp_nr = lookup_stmt_eh_lp (old_stmt);
3133   if (lp_nr == 0)
3134     return false;
3135 
3136   add_stmt_to_eh_lp (new_stmt, lp_nr);
3137   return true;
3138 }
3139 
3140 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3141    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3142    this only handles handlers consisting of a single call, as that's the
3143    important case for C++: a destructor call for a particular object showing
3144    up in multiple handlers.  */
3145 
3146 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)3147 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3148 {
3149   gimple_stmt_iterator gsi;
3150   gimple *ones, *twos;
3151   unsigned int ai;
3152 
3153   gsi = gsi_start (oneh);
3154   if (!gsi_one_before_end_p (gsi))
3155     return false;
3156   ones = gsi_stmt (gsi);
3157 
3158   gsi = gsi_start (twoh);
3159   if (!gsi_one_before_end_p (gsi))
3160     return false;
3161   twos = gsi_stmt (gsi);
3162 
3163   if (!is_gimple_call (ones)
3164       || !is_gimple_call (twos)
3165       || gimple_call_lhs (ones)
3166       || gimple_call_lhs (twos)
3167       || gimple_call_chain (ones)
3168       || gimple_call_chain (twos)
3169       || !gimple_call_same_target_p (ones, twos)
3170       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3171     return false;
3172 
3173   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3174     if (!operand_equal_p (gimple_call_arg (ones, ai),
3175                           gimple_call_arg (twos, ai), 0))
3176       return false;
3177 
3178   return true;
3179 }
3180 
3181 /* Optimize
3182     try { A() } finally { try { ~B() } catch { ~A() } }
3183     try { ... } finally { ~A() }
3184    into
3185     try { A() } catch { ~B() }
3186     try { ~B() ... } finally { ~A() }
3187 
3188    This occurs frequently in C++, where A is a local variable and B is a
3189    temporary used in the initializer for A.  */
3190 
3191 static void
optimize_double_finally(gtry * one,gtry * two)3192 optimize_double_finally (gtry *one, gtry *two)
3193 {
3194   gimple *oneh;
3195   gimple_stmt_iterator gsi;
3196   gimple_seq cleanup;
3197 
3198   cleanup = gimple_try_cleanup (one);
3199   gsi = gsi_start (cleanup);
3200   if (!gsi_one_before_end_p (gsi))
3201     return;
3202 
3203   oneh = gsi_stmt (gsi);
3204   if (gimple_code (oneh) != GIMPLE_TRY
3205       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3206     return;
3207 
3208   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3209     {
3210       gimple_seq seq = gimple_try_eval (oneh);
3211 
3212       gimple_try_set_cleanup (one, seq);
3213       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3214       seq = copy_gimple_seq_and_replace_locals (seq);
3215       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3216       gimple_try_set_eval (two, seq);
3217     }
3218 }
3219 
3220 /* Perform EH refactoring optimizations that are simpler to do when code
3221    flow has been lowered but EH structures haven't.  */
3222 
3223 static void
refactor_eh_r(gimple_seq seq)3224 refactor_eh_r (gimple_seq seq)
3225 {
3226   gimple_stmt_iterator gsi;
3227   gimple *one, *two;
3228 
3229   one = NULL;
3230   two = NULL;
3231   gsi = gsi_start (seq);
3232   while (1)
3233     {
3234       one = two;
3235       if (gsi_end_p (gsi))
3236 	two = NULL;
3237       else
3238 	two = gsi_stmt (gsi);
3239       if (one && two)
3240 	if (gtry *try_one = dyn_cast <gtry *> (one))
3241 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3242 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3243 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3244 	      optimize_double_finally (try_one, try_two);
3245       if (one)
3246 	switch (gimple_code (one))
3247 	  {
3248 	  case GIMPLE_TRY:
3249 	    refactor_eh_r (gimple_try_eval (one));
3250 	    refactor_eh_r (gimple_try_cleanup (one));
3251 	    break;
3252 	  case GIMPLE_CATCH:
3253 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3254 	    break;
3255 	  case GIMPLE_EH_FILTER:
3256 	    refactor_eh_r (gimple_eh_filter_failure (one));
3257 	    break;
3258 	  case GIMPLE_EH_ELSE:
3259 	    {
3260 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3261 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3262 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3263 	    }
3264 	    break;
3265 	  default:
3266 	    break;
3267 	  }
3268       if (two)
3269 	gsi_next (&gsi);
3270       else
3271 	break;
3272     }
3273 }
3274 
3275 namespace {
3276 
3277 const pass_data pass_data_refactor_eh =
3278 {
3279   GIMPLE_PASS, /* type */
3280   "ehopt", /* name */
3281   OPTGROUP_NONE, /* optinfo_flags */
3282   TV_TREE_EH, /* tv_id */
3283   PROP_gimple_lcf, /* properties_required */
3284   0, /* properties_provided */
3285   0, /* properties_destroyed */
3286   0, /* todo_flags_start */
3287   0, /* todo_flags_finish */
3288 };
3289 
3290 class pass_refactor_eh : public gimple_opt_pass
3291 {
3292 public:
pass_refactor_eh(gcc::context * ctxt)3293   pass_refactor_eh (gcc::context *ctxt)
3294     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3295   {}
3296 
3297   /* opt_pass methods: */
gate(function *)3298   virtual bool gate (function *) { return flag_exceptions != 0; }
execute(function *)3299   virtual unsigned int execute (function *)
3300     {
3301       refactor_eh_r (gimple_body (current_function_decl));
3302       return 0;
3303     }
3304 
3305 }; // class pass_refactor_eh
3306 
3307 } // anon namespace
3308 
3309 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3310 make_pass_refactor_eh (gcc::context *ctxt)
3311 {
3312   return new pass_refactor_eh (ctxt);
3313 }
3314 
3315 /* At the end of gimple optimization, we can lower RESX.  */
3316 
3317 static bool
lower_resx(basic_block bb,gresx * stmt,hash_map<eh_region,tree> * mnt_map)3318 lower_resx (basic_block bb, gresx *stmt,
3319 	    hash_map<eh_region, tree> *mnt_map)
3320 {
3321   int lp_nr;
3322   eh_region src_r, dst_r;
3323   gimple_stmt_iterator gsi;
3324   gimple *x;
3325   tree fn, src_nr;
3326   bool ret = false;
3327 
3328   lp_nr = lookup_stmt_eh_lp (stmt);
3329   if (lp_nr != 0)
3330     dst_r = get_eh_region_from_lp_number (lp_nr);
3331   else
3332     dst_r = NULL;
3333 
3334   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3335   gsi = gsi_last_bb (bb);
3336 
3337   if (src_r == NULL)
3338     {
3339       /* We can wind up with no source region when pass_cleanup_eh shows
3340 	 that there are no entries into an eh region and deletes it, but
3341 	 then the block that contains the resx isn't removed.  This can
3342 	 happen without optimization when the switch statement created by
3343 	 lower_try_finally_switch isn't simplified to remove the eh case.
3344 
3345 	 Resolve this by expanding the resx node to an abort.  */
3346 
3347       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3348       x = gimple_build_call (fn, 0);
3349       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3350 
3351       while (EDGE_COUNT (bb->succs) > 0)
3352 	remove_edge (EDGE_SUCC (bb, 0));
3353     }
3354   else if (dst_r)
3355     {
3356       /* When we have a destination region, we resolve this by copying
3357 	 the excptr and filter values into place, and changing the edge
3358 	 to immediately after the landing pad.  */
3359       edge e;
3360 
3361       if (lp_nr < 0)
3362 	{
3363 	  basic_block new_bb;
3364 	  tree lab;
3365 
3366 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3367 	     the failure decl into a new block, if needed.  */
3368 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3369 
3370 	  tree *slot = mnt_map->get (dst_r);
3371 	  if (slot == NULL)
3372 	    {
3373 	      gimple_stmt_iterator gsi2;
3374 
3375 	      new_bb = create_empty_bb (bb);
3376 	      new_bb->count = bb->count;
3377 	      add_bb_to_loop (new_bb, bb->loop_father);
3378 	      lab = gimple_block_label (new_bb);
3379 	      gsi2 = gsi_start_bb (new_bb);
3380 
3381 	      fn = dst_r->u.must_not_throw.failure_decl;
3382 	      x = gimple_build_call (fn, 0);
3383 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3384 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3385 
3386 	      mnt_map->put (dst_r, lab);
3387 	    }
3388 	  else
3389 	    {
3390 	      lab = *slot;
3391 	      new_bb = label_to_block (cfun, lab);
3392 	    }
3393 
3394 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3395 	  e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3396 	}
3397       else
3398 	{
3399 	  edge_iterator ei;
3400 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3401 
3402 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3403 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3404 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3405 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3406 
3407 	  /* Update the flags for the outgoing edge.  */
3408 	  e = single_succ_edge (bb);
3409 	  gcc_assert (e->flags & EDGE_EH);
3410 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3411 	  e->probability = profile_probability::always ();
3412 
3413 	  /* If there are no more EH users of the landing pad, delete it.  */
3414 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3415 	    if (e->flags & EDGE_EH)
3416 	      break;
3417 	  if (e == NULL)
3418 	    {
3419 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3420 	      remove_eh_landing_pad (lp);
3421 	    }
3422 	}
3423 
3424       ret = true;
3425     }
3426   else
3427     {
3428       tree var;
3429 
3430       /* When we don't have a destination region, this exception escapes
3431 	 up the call chain.  We resolve this by generating a call to the
3432 	 _Unwind_Resume library function.  */
3433 
3434       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3435 	 with no arguments for C++.  Check for that.  */
3436       if (src_r->use_cxa_end_cleanup)
3437 	{
3438 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3439 	  x = gimple_build_call (fn, 0);
3440 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3441 	}
3442       else
3443 	{
3444 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3445 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3446 	  x = gimple_build_call (fn, 1, src_nr);
3447 	  var = create_tmp_var (ptr_type_node);
3448 	  var = make_ssa_name (var, x);
3449 	  gimple_call_set_lhs (x, var);
3450 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3451 
3452 	  /* When exception handling is delegated to a caller function, we
3453 	     have to guarantee that shadow memory variables living on stack
3454 	     will be cleaner before control is given to a parent function.  */
3455 	  if (sanitize_flags_p (SANITIZE_ADDRESS))
3456 	    {
3457 	      tree decl
3458 		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3459 	      gimple *g = gimple_build_call (decl, 0);
3460 	      gimple_set_location (g, gimple_location (stmt));
3461 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3462 	    }
3463 
3464 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3465 	  x = gimple_build_call (fn, 1, var);
3466 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3467 	}
3468 
3469       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3470     }
3471 
3472   gsi_remove (&gsi, true);
3473 
3474   return ret;
3475 }
3476 
3477 namespace {
3478 
3479 const pass_data pass_data_lower_resx =
3480 {
3481   GIMPLE_PASS, /* type */
3482   "resx", /* name */
3483   OPTGROUP_NONE, /* optinfo_flags */
3484   TV_TREE_EH, /* tv_id */
3485   PROP_gimple_lcf, /* properties_required */
3486   0, /* properties_provided */
3487   0, /* properties_destroyed */
3488   0, /* todo_flags_start */
3489   0, /* todo_flags_finish */
3490 };
3491 
3492 class pass_lower_resx : public gimple_opt_pass
3493 {
3494 public:
pass_lower_resx(gcc::context * ctxt)3495   pass_lower_resx (gcc::context *ctxt)
3496     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3497   {}
3498 
3499   /* opt_pass methods: */
gate(function *)3500   virtual bool gate (function *) { return flag_exceptions != 0; }
3501   virtual unsigned int execute (function *);
3502 
3503 }; // class pass_lower_resx
3504 
3505 unsigned
execute(function * fun)3506 pass_lower_resx::execute (function *fun)
3507 {
3508   basic_block bb;
3509   bool dominance_invalidated = false;
3510   bool any_rewritten = false;
3511 
3512   hash_map<eh_region, tree> mnt_map;
3513 
3514   FOR_EACH_BB_FN (bb, fun)
3515     {
3516       gimple *last = last_stmt (bb);
3517       if (last && is_gimple_resx (last))
3518 	{
3519 	  dominance_invalidated |=
3520 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3521 	  any_rewritten = true;
3522 	}
3523     }
3524 
3525   if (dominance_invalidated)
3526     {
3527       free_dominance_info (CDI_DOMINATORS);
3528       free_dominance_info (CDI_POST_DOMINATORS);
3529     }
3530 
3531   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3532 }
3533 
3534 } // anon namespace
3535 
3536 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3537 make_pass_lower_resx (gcc::context *ctxt)
3538 {
3539   return new pass_lower_resx (ctxt);
3540 }
3541 
3542 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3543    external throw.  */
3544 
3545 static void
optimize_clobbers(basic_block bb)3546 optimize_clobbers (basic_block bb)
3547 {
3548   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3549   bool any_clobbers = false;
3550   bool seen_stack_restore = false;
3551   edge_iterator ei;
3552   edge e;
3553 
3554   /* Only optimize anything if the bb contains at least one clobber,
3555      ends with resx (checked by caller), optionally contains some
3556      debug stmts or labels, or at most one __builtin_stack_restore
3557      call, and has an incoming EH edge.  */
3558   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3559     {
3560       gimple *stmt = gsi_stmt (gsi);
3561       if (is_gimple_debug (stmt))
3562 	continue;
3563       if (gimple_clobber_p (stmt))
3564 	{
3565 	  any_clobbers = true;
3566 	  continue;
3567 	}
3568       if (!seen_stack_restore
3569 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3570 	{
3571 	  seen_stack_restore = true;
3572 	  continue;
3573 	}
3574       if (gimple_code (stmt) == GIMPLE_LABEL)
3575 	break;
3576       return;
3577     }
3578   if (!any_clobbers)
3579     return;
3580   FOR_EACH_EDGE (e, ei, bb->preds)
3581     if (e->flags & EDGE_EH)
3582       break;
3583   if (e == NULL)
3584     return;
3585   gsi = gsi_last_bb (bb);
3586   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3587     {
3588       gimple *stmt = gsi_stmt (gsi);
3589       if (!gimple_clobber_p (stmt))
3590 	continue;
3591       unlink_stmt_vdef (stmt);
3592       gsi_remove (&gsi, true);
3593       release_defs (stmt);
3594     }
3595 }
3596 
3597 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3598    internal throw to successor BB.
3599    SUNK, if not NULL, is an array of sequences indexed by basic-block
3600    index to sink to and to pick up sinking opportunities from.
3601    If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
3602    but set *FOUND_OPPORTUNITY to true.  */
3603 
3604 static int
3605 sink_clobbers (basic_block bb,
3606 	       gimple_seq *sunk = NULL, bool *found_opportunity = NULL)
3607 {
3608   edge e;
3609   edge_iterator ei;
3610   gimple_stmt_iterator gsi, dgsi;
3611   basic_block succbb;
3612   bool any_clobbers = false;
3613   unsigned todo = 0;
3614 
3615   /* Only optimize if BB has a single EH successor and
3616      all predecessor edges are EH too.  */
3617   if (!single_succ_p (bb)
3618       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3619     return 0;
3620 
3621   FOR_EACH_EDGE (e, ei, bb->preds)
3622     {
3623       if ((e->flags & EDGE_EH) == 0)
3624 	return 0;
3625     }
3626 
3627   /* And BB contains only CLOBBER stmts before the final
3628      RESX.  */
3629   gsi = gsi_last_bb (bb);
3630   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3631     {
3632       gimple *stmt = gsi_stmt (gsi);
3633       if (is_gimple_debug (stmt))
3634 	continue;
3635       if (gimple_code (stmt) == GIMPLE_LABEL)
3636 	break;
3637       if (!gimple_clobber_p (stmt))
3638 	return 0;
3639       any_clobbers = true;
3640     }
3641   if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index])))
3642     return 0;
3643 
3644   /* If this was a dry run, tell it we found clobbers to sink.  */
3645   if (found_opportunity)
3646     {
3647       *found_opportunity = true;
3648       return 0;
3649     }
3650 
3651   edge succe = single_succ_edge (bb);
3652   succbb = succe->dest;
3653 
3654   /* See if there is a virtual PHI node to take an updated virtual
3655      operand from.  */
3656   gphi *vphi = NULL;
3657   for (gphi_iterator gpi = gsi_start_phis (succbb);
3658        !gsi_end_p (gpi); gsi_next (&gpi))
3659     {
3660       tree res = gimple_phi_result (gpi.phi ());
3661       if (virtual_operand_p (res))
3662 	{
3663 	  vphi = gpi.phi ();
3664 	  break;
3665 	}
3666     }
3667 
3668   gimple *first_sunk = NULL;
3669   gimple *last_sunk = NULL;
3670   if (sunk && !(succbb->flags & BB_VISITED))
3671     dgsi = gsi_start (sunk[succbb->index]);
3672   else
3673     dgsi = gsi_after_labels (succbb);
3674   gsi = gsi_last_bb (bb);
3675   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3676     {
3677       gimple *stmt = gsi_stmt (gsi);
3678       tree lhs;
3679       if (is_gimple_debug (stmt))
3680 	continue;
3681       if (gimple_code (stmt) == GIMPLE_LABEL)
3682 	break;
3683       lhs = gimple_assign_lhs (stmt);
3684       /* Unfortunately we don't have dominance info updated at this
3685 	 point, so checking if
3686 	 dominated_by_p (CDI_DOMINATORS, succbb,
3687 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3688 	 would be too costly.  Thus, avoid sinking any clobbers that
3689 	 refer to non-(D) SSA_NAMEs.  */
3690       if (TREE_CODE (lhs) == MEM_REF
3691 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3692 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3693 	{
3694 	  unlink_stmt_vdef (stmt);
3695 	  gsi_remove (&gsi, true);
3696 	  release_defs (stmt);
3697 	  continue;
3698 	}
3699 
3700       /* As we do not change stmt order when sinking across a
3701          forwarder edge we can keep virtual operands in place.  */
3702       gsi_remove (&gsi, false);
3703       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3704       if (!first_sunk)
3705 	first_sunk = stmt;
3706       last_sunk = stmt;
3707     }
3708   if (sunk && !gimple_seq_empty_p (sunk[bb->index]))
3709     {
3710       if (!first_sunk)
3711 	first_sunk = gsi_stmt (gsi_last (sunk[bb->index]));
3712       last_sunk = gsi_stmt (gsi_start (sunk[bb->index]));
3713       gsi_insert_seq_before_without_update (&dgsi,
3714 					    sunk[bb->index], GSI_NEW_STMT);
3715       sunk[bb->index] = NULL;
3716     }
3717   if (first_sunk)
3718     {
3719       /* Adjust virtual operands if we sunk across a virtual PHI.  */
3720       if (vphi)
3721 	{
3722 	  imm_use_iterator iter;
3723 	  use_operand_p use_p;
3724 	  gimple *use_stmt;
3725 	  tree phi_def = gimple_phi_result (vphi);
3726 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def)
3727 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3728               SET_USE (use_p, gimple_vdef (first_sunk));
3729 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def))
3730 	    {
3731 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1;
3732 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0;
3733 	    }
3734 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe),
3735 		   gimple_vuse (last_sunk));
3736 	  SET_USE (gimple_vuse_op (last_sunk), phi_def);
3737 	}
3738       /* If there isn't a single predecessor but no virtual PHI node
3739          arrange for virtual operands to be renamed.  */
3740       else if (!single_pred_p (succbb)
3741 	       && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME)
3742 	{
3743 	  mark_virtual_operand_for_renaming (gimple_vuse (last_sunk));
3744 	  todo |= TODO_update_ssa_only_virtuals;
3745 	}
3746     }
3747 
3748   return todo;
3749 }
3750 
3751 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3752    we have found some duplicate labels and removed some edges.  */
3753 
3754 static bool
lower_eh_dispatch(basic_block src,geh_dispatch * stmt)3755 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3756 {
3757   gimple_stmt_iterator gsi;
3758   int region_nr;
3759   eh_region r;
3760   tree filter, fn;
3761   gimple *x;
3762   bool redirected = false;
3763 
3764   region_nr = gimple_eh_dispatch_region (stmt);
3765   r = get_eh_region_from_number (region_nr);
3766 
3767   gsi = gsi_last_bb (src);
3768 
3769   switch (r->type)
3770     {
3771     case ERT_TRY:
3772       {
3773 	auto_vec<tree> labels;
3774 	tree default_label = NULL;
3775 	eh_catch c;
3776 	edge_iterator ei;
3777 	edge e;
3778 	hash_set<tree> seen_values;
3779 
3780 	/* Collect the labels for a switch.  Zero the post_landing_pad
3781 	   field becase we'll no longer have anything keeping these labels
3782 	   in existence and the optimizer will be free to merge these
3783 	   blocks at will.  */
3784 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3785 	  {
3786 	    tree tp_node, flt_node, lab = c->label;
3787 	    bool have_label = false;
3788 
3789 	    c->label = NULL;
3790 	    tp_node = c->type_list;
3791 	    flt_node = c->filter_list;
3792 
3793 	    if (tp_node == NULL)
3794 	      {
3795 	        default_label = lab;
3796 		break;
3797 	      }
3798 	    do
3799 	      {
3800 		/* Filter out duplicate labels that arise when this handler
3801 		   is shadowed by an earlier one.  When no labels are
3802 		   attached to the handler anymore, we remove
3803 		   the corresponding edge and then we delete unreachable
3804 		   blocks at the end of this pass.  */
3805 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3806 		  {
3807 		    tree t = build_case_label (TREE_VALUE (flt_node),
3808 					       NULL, lab);
3809 		    labels.safe_push (t);
3810 		    seen_values.add (TREE_VALUE (flt_node));
3811 		    have_label = true;
3812 		  }
3813 
3814 		tp_node = TREE_CHAIN (tp_node);
3815 		flt_node = TREE_CHAIN (flt_node);
3816 	      }
3817 	    while (tp_node);
3818 	    if (! have_label)
3819 	      {
3820 		remove_edge (find_edge (src, label_to_block (cfun, lab)));
3821 	        redirected = true;
3822 	      }
3823 	  }
3824 
3825 	/* Clean up the edge flags.  */
3826 	FOR_EACH_EDGE (e, ei, src->succs)
3827 	  {
3828 	    if (e->flags & EDGE_FALLTHRU)
3829 	      {
3830 		/* If there was no catch-all, use the fallthru edge.  */
3831 		if (default_label == NULL)
3832 		  default_label = gimple_block_label (e->dest);
3833 		e->flags &= ~EDGE_FALLTHRU;
3834 	      }
3835 	  }
3836 	gcc_assert (default_label != NULL);
3837 
3838 	/* Don't generate a switch if there's only a default case.
3839 	   This is common in the form of try { A; } catch (...) { B; }.  */
3840 	if (!labels.exists ())
3841 	  {
3842 	    e = single_succ_edge (src);
3843 	    e->flags |= EDGE_FALLTHRU;
3844 	  }
3845 	else
3846 	  {
3847 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3848 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3849 							 region_nr));
3850 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3851 	    filter = make_ssa_name (filter, x);
3852 	    gimple_call_set_lhs (x, filter);
3853 	    gimple_set_location (x, gimple_location (stmt));
3854 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3855 
3856 	    /* Turn the default label into a default case.  */
3857 	    default_label = build_case_label (NULL, NULL, default_label);
3858 	    sort_case_labels (labels);
3859 
3860 	    x = gimple_build_switch (filter, default_label, labels);
3861 	    gimple_set_location (x, gimple_location (stmt));
3862 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3863 	  }
3864       }
3865       break;
3866 
3867     case ERT_ALLOWED_EXCEPTIONS:
3868       {
3869 	edge b_e = BRANCH_EDGE (src);
3870 	edge f_e = FALLTHRU_EDGE (src);
3871 
3872 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3873 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3874 						     region_nr));
3875 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3876 	filter = make_ssa_name (filter, x);
3877 	gimple_call_set_lhs (x, filter);
3878 	gimple_set_location (x, gimple_location (stmt));
3879 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3880 
3881 	r->u.allowed.label = NULL;
3882 	x = gimple_build_cond (EQ_EXPR, filter,
3883 			       build_int_cst (TREE_TYPE (filter),
3884 					      r->u.allowed.filter),
3885 			       NULL_TREE, NULL_TREE);
3886 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3887 
3888 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3889         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3890       }
3891       break;
3892 
3893     default:
3894       gcc_unreachable ();
3895     }
3896 
3897   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3898   gsi_remove (&gsi, true);
3899   return redirected;
3900 }
3901 
3902 namespace {
3903 
3904 const pass_data pass_data_lower_eh_dispatch =
3905 {
3906   GIMPLE_PASS, /* type */
3907   "ehdisp", /* name */
3908   OPTGROUP_NONE, /* optinfo_flags */
3909   TV_TREE_EH, /* tv_id */
3910   PROP_gimple_lcf, /* properties_required */
3911   0, /* properties_provided */
3912   0, /* properties_destroyed */
3913   0, /* todo_flags_start */
3914   0, /* todo_flags_finish */
3915 };
3916 
3917 class pass_lower_eh_dispatch : public gimple_opt_pass
3918 {
3919 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3920   pass_lower_eh_dispatch (gcc::context *ctxt)
3921     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3922   {}
3923 
3924   /* opt_pass methods: */
gate(function * fun)3925   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3926   virtual unsigned int execute (function *);
3927 
3928 }; // class pass_lower_eh_dispatch
3929 
3930 unsigned
execute(function * fun)3931 pass_lower_eh_dispatch::execute (function *fun)
3932 {
3933   basic_block bb;
3934   int flags = 0;
3935   bool redirected = false;
3936   bool any_resx_to_process = false;
3937 
3938   assign_filter_values ();
3939 
3940   FOR_EACH_BB_FN (bb, fun)
3941     {
3942       gimple *last = last_stmt (bb);
3943       if (last == NULL)
3944 	continue;
3945       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3946 	{
3947 	  redirected |= lower_eh_dispatch (bb,
3948 					   as_a <geh_dispatch *> (last));
3949 	  flags |= TODO_update_ssa_only_virtuals;
3950 	}
3951       else if (gimple_code (last) == GIMPLE_RESX)
3952 	{
3953 	  if (stmt_can_throw_external (fun, last))
3954 	    optimize_clobbers (bb);
3955 	  else if (!any_resx_to_process)
3956 	    sink_clobbers (bb, NULL, &any_resx_to_process);
3957 	}
3958       bb->flags &= ~BB_VISITED;
3959     }
3960   if (redirected)
3961     {
3962       free_dominance_info (CDI_DOMINATORS);
3963       delete_unreachable_blocks ();
3964     }
3965 
3966   if (any_resx_to_process)
3967     {
3968       /* Make sure to catch all secondary sinking opportunities by processing
3969 	 blocks in RPO order and after all CFG modifications from lowering
3970 	 and unreachable block removal.  */
3971       int *rpo = XNEWVEC  (int, n_basic_blocks_for_fn (fun));
3972       int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
3973       gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun));
3974       for (int i = 0; i < rpo_n; ++i)
3975 	{
3976 	  bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3977 	  gimple *last = last_stmt (bb);
3978 	  if (last
3979 	      && gimple_code (last) == GIMPLE_RESX
3980 	      && !stmt_can_throw_external (fun, last))
3981 	    flags |= sink_clobbers (bb, sunk);
3982 	  /* If there were any clobbers sunk into this BB, insert them now.  */
3983 	  if (!gimple_seq_empty_p (sunk[bb->index]))
3984 	    {
3985 	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
3986 	      gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT);
3987 	      sunk[bb->index] = NULL;
3988 	    }
3989 	  bb->flags |= BB_VISITED;
3990 	}
3991       free (rpo);
3992       free (sunk);
3993     }
3994 
3995   return flags;
3996 }
3997 
3998 } // anon namespace
3999 
4000 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)4001 make_pass_lower_eh_dispatch (gcc::context *ctxt)
4002 {
4003   return new pass_lower_eh_dispatch (ctxt);
4004 }
4005 
4006 /* Walk statements, see what regions and, optionally, landing pads
4007    are really referenced.
4008 
4009    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
4010    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
4011 
4012    Passing NULL for LP_REACHABLE is valid, in this case only reachable
4013    regions are marked.
4014 
4015    The caller is responsible for freeing the returned sbitmaps.  */
4016 
4017 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)4018 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
4019 {
4020   sbitmap r_reachable, lp_reachable;
4021   basic_block bb;
4022   bool mark_landing_pads = (lp_reachablep != NULL);
4023   gcc_checking_assert (r_reachablep != NULL);
4024 
4025   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
4026   bitmap_clear (r_reachable);
4027   *r_reachablep = r_reachable;
4028 
4029   if (mark_landing_pads)
4030     {
4031       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
4032       bitmap_clear (lp_reachable);
4033       *lp_reachablep = lp_reachable;
4034     }
4035   else
4036     lp_reachable = NULL;
4037 
4038   FOR_EACH_BB_FN (bb, cfun)
4039     {
4040       gimple_stmt_iterator gsi;
4041 
4042       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4043 	{
4044 	  gimple *stmt = gsi_stmt (gsi);
4045 
4046 	  if (mark_landing_pads)
4047 	    {
4048 	      int lp_nr = lookup_stmt_eh_lp (stmt);
4049 
4050 	      /* Negative LP numbers are MUST_NOT_THROW regions which
4051 		 are not considered BB enders.  */
4052 	      if (lp_nr < 0)
4053 		bitmap_set_bit (r_reachable, -lp_nr);
4054 
4055 	      /* Positive LP numbers are real landing pads, and BB enders.  */
4056 	      else if (lp_nr > 0)
4057 		{
4058 		  gcc_assert (gsi_one_before_end_p (gsi));
4059 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
4060 		  bitmap_set_bit (r_reachable, region->index);
4061 		  bitmap_set_bit (lp_reachable, lp_nr);
4062 		}
4063 	    }
4064 
4065 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
4066 	  switch (gimple_code (stmt))
4067 	    {
4068 	    case GIMPLE_RESX:
4069 	      bitmap_set_bit (r_reachable,
4070 			      gimple_resx_region (as_a <gresx *> (stmt)));
4071 	      break;
4072 	    case GIMPLE_EH_DISPATCH:
4073 	      bitmap_set_bit (r_reachable,
4074 			      gimple_eh_dispatch_region (
4075                                 as_a <geh_dispatch *> (stmt)));
4076 	      break;
4077 	    case GIMPLE_CALL:
4078 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
4079 		for (int i = 0; i < 2; ++i)
4080 		  {
4081 		    tree rt = gimple_call_arg (stmt, i);
4082 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
4083 
4084 		    gcc_assert (ri == (int)ri);
4085 		    bitmap_set_bit (r_reachable, ri);
4086 		  }
4087 	      break;
4088 	    default:
4089 	      break;
4090 	    }
4091 	}
4092     }
4093 }
4094 
4095 /* Remove unreachable handlers and unreachable landing pads.  */
4096 
4097 static void
remove_unreachable_handlers(void)4098 remove_unreachable_handlers (void)
4099 {
4100   sbitmap r_reachable, lp_reachable;
4101   eh_region region;
4102   eh_landing_pad lp;
4103   unsigned i;
4104 
4105   mark_reachable_handlers (&r_reachable, &lp_reachable);
4106 
4107   if (dump_file)
4108     {
4109       fprintf (dump_file, "Before removal of unreachable regions:\n");
4110       dump_eh_tree (dump_file, cfun);
4111       fprintf (dump_file, "Reachable regions: ");
4112       dump_bitmap_file (dump_file, r_reachable);
4113       fprintf (dump_file, "Reachable landing pads: ");
4114       dump_bitmap_file (dump_file, lp_reachable);
4115     }
4116 
4117   if (dump_file)
4118     {
4119       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4120 	if (region && !bitmap_bit_p (r_reachable, region->index))
4121 	  fprintf (dump_file,
4122 		   "Removing unreachable region %d\n",
4123 		   region->index);
4124     }
4125 
4126   remove_unreachable_eh_regions (r_reachable);
4127 
4128   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4129     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4130       {
4131 	if (dump_file)
4132 	  fprintf (dump_file,
4133 		   "Removing unreachable landing pad %d\n",
4134 		   lp->index);
4135 	remove_eh_landing_pad (lp);
4136       }
4137 
4138   if (dump_file)
4139     {
4140       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4141       dump_eh_tree (dump_file, cfun);
4142       fprintf (dump_file, "\n\n");
4143     }
4144 
4145   sbitmap_free (r_reachable);
4146   sbitmap_free (lp_reachable);
4147 
4148   if (flag_checking)
4149     verify_eh_tree (cfun);
4150 }
4151 
4152 /* Remove unreachable handlers if any landing pads have been removed after
4153    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4154 
4155 void
maybe_remove_unreachable_handlers(void)4156 maybe_remove_unreachable_handlers (void)
4157 {
4158   eh_landing_pad lp;
4159   unsigned i;
4160 
4161   if (cfun->eh == NULL)
4162     return;
4163 
4164   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4165     if (lp
4166 	&& (lp->post_landing_pad == NULL_TREE
4167 	    || label_to_block (cfun, lp->post_landing_pad) == NULL))
4168       {
4169 	remove_unreachable_handlers ();
4170 	return;
4171       }
4172 }
4173 
4174 /* Remove regions that do not have landing pads.  This assumes
4175    that remove_unreachable_handlers has already been run, and
4176    that we've just manipulated the landing pads since then.
4177 
4178    Preserve regions with landing pads and regions that prevent
4179    exceptions from propagating further, even if these regions
4180    are not reachable.  */
4181 
4182 static void
remove_unreachable_handlers_no_lp(void)4183 remove_unreachable_handlers_no_lp (void)
4184 {
4185   eh_region region;
4186   sbitmap r_reachable;
4187   unsigned i;
4188 
4189   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4190 
4191   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4192     {
4193       if (! region)
4194 	continue;
4195 
4196       if (region->landing_pads != NULL
4197 	  || region->type == ERT_MUST_NOT_THROW)
4198 	bitmap_set_bit (r_reachable, region->index);
4199 
4200       if (dump_file
4201 	  && !bitmap_bit_p (r_reachable, region->index))
4202 	fprintf (dump_file,
4203 		 "Removing unreachable region %d\n",
4204 		 region->index);
4205     }
4206 
4207   remove_unreachable_eh_regions (r_reachable);
4208 
4209   sbitmap_free (r_reachable);
4210 }
4211 
4212 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
4213    optimisticaly split all sorts of edges, including EH edges.  The
4214    optimization passes in between may not have needed them; if not,
4215    we should undo the split.
4216 
4217    Recognize this case by having one EH edge incoming to the BB and
4218    one normal edge outgoing; BB should be empty apart from the
4219    post_landing_pad label.
4220 
4221    Note that this is slightly different from the empty handler case
4222    handled by cleanup_empty_eh, in that the actual handler may yet
4223    have actual code but the landing pad has been separated from the
4224    handler.  As such, cleanup_empty_eh relies on this transformation
4225    having been done first.  */
4226 
4227 static bool
unsplit_eh(eh_landing_pad lp)4228 unsplit_eh (eh_landing_pad lp)
4229 {
4230   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4231   gimple_stmt_iterator gsi;
4232   edge e_in, e_out;
4233 
4234   /* Quickly check the edge counts on BB for singularity.  */
4235   if (!single_pred_p (bb) || !single_succ_p (bb))
4236     return false;
4237   e_in = single_pred_edge (bb);
4238   e_out = single_succ_edge (bb);
4239 
4240   /* Input edge must be EH and output edge must be normal.  */
4241   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4242     return false;
4243 
4244   /* The block must be empty except for the labels and debug insns.  */
4245   gsi = gsi_after_labels (bb);
4246   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4247     gsi_next_nondebug (&gsi);
4248   if (!gsi_end_p (gsi))
4249     return false;
4250 
4251   /* The destination block must not already have a landing pad
4252      for a different region.  */
4253   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4254     {
4255       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4256       tree lab;
4257       int lp_nr;
4258 
4259       if (!label_stmt)
4260 	break;
4261       lab = gimple_label_label (label_stmt);
4262       lp_nr = EH_LANDING_PAD_NR (lab);
4263       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4264 	return false;
4265     }
4266 
4267   /* The new destination block must not already be a destination of
4268      the source block, lest we merge fallthru and eh edges and get
4269      all sorts of confused.  */
4270   if (find_edge (e_in->src, e_out->dest))
4271     return false;
4272 
4273   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4274      thought this should have been cleaned up by a phicprop pass, but
4275      that doesn't appear to handle virtuals.  Propagate by hand.  */
4276   if (!gimple_seq_empty_p (phi_nodes (bb)))
4277     {
4278       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4279 	{
4280 	  gimple *use_stmt;
4281 	  gphi *phi = gpi.phi ();
4282 	  tree lhs = gimple_phi_result (phi);
4283 	  tree rhs = gimple_phi_arg_def (phi, 0);
4284 	  use_operand_p use_p;
4285 	  imm_use_iterator iter;
4286 
4287 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4288 	    {
4289 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4290 		SET_USE (use_p, rhs);
4291 	    }
4292 
4293 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4294 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4295 
4296 	  remove_phi_node (&gpi, true);
4297 	}
4298     }
4299 
4300   if (dump_file && (dump_flags & TDF_DETAILS))
4301     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4302 	     lp->index, e_out->dest->index);
4303 
4304   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4305      a successor edge, humor it.  But do the real CFG change with the
4306      predecessor of E_OUT in order to preserve the ordering of arguments
4307      to the PHI nodes in E_OUT->DEST.  */
4308   redirect_eh_edge_1 (e_in, e_out->dest, false);
4309   redirect_edge_pred (e_out, e_in->src);
4310   e_out->flags = e_in->flags;
4311   e_out->probability = e_in->probability;
4312   remove_edge (e_in);
4313 
4314   return true;
4315 }
4316 
4317 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4318 
4319 static bool
unsplit_all_eh(void)4320 unsplit_all_eh (void)
4321 {
4322   bool changed = false;
4323   eh_landing_pad lp;
4324   int i;
4325 
4326   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4327     if (lp)
4328       changed |= unsplit_eh (lp);
4329 
4330   return changed;
4331 }
4332 
4333 /* Wrapper around unsplit_all_eh that makes it usable everywhere.  */
4334 
4335 void
unsplit_eh_edges(void)4336 unsplit_eh_edges (void)
4337 {
4338   bool changed;
4339 
4340   /* unsplit_all_eh can die looking up unreachable landing pads.  */
4341   maybe_remove_unreachable_handlers ();
4342 
4343   changed = unsplit_all_eh ();
4344 
4345   /* If EH edges have been unsplit, delete unreachable forwarder blocks.  */
4346   if (changed)
4347     {
4348       free_dominance_info (CDI_DOMINATORS);
4349       free_dominance_info (CDI_POST_DOMINATORS);
4350       delete_unreachable_blocks ();
4351     }
4352 }
4353 
4354 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4355    to OLD_BB to NEW_BB; return true on success, false on failure.
4356 
4357    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4358    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4359    Virtual PHIs may be deleted and marked for renaming.  */
4360 
4361 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4362 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4363 			     edge old_bb_out, bool change_region)
4364 {
4365   gphi_iterator ngsi, ogsi;
4366   edge_iterator ei;
4367   edge e;
4368   bitmap ophi_handled;
4369 
4370   /* The destination block must not be a regular successor for any
4371      of the preds of the landing pad.  Thus, avoid turning
4372         <..>
4373 	 |  \ EH
4374 	 |  <..>
4375 	 |  /
4376 	<..>
4377      into
4378         <..>
4379 	|  | EH
4380 	<..>
4381      which CFG verification would choke on.  See PR45172 and PR51089.  */
4382   if (!single_pred_p (new_bb))
4383     FOR_EACH_EDGE (e, ei, old_bb->preds)
4384       if (find_edge (e->src, new_bb))
4385 	return false;
4386 
4387   FOR_EACH_EDGE (e, ei, old_bb->preds)
4388     redirect_edge_var_map_clear (e);
4389 
4390   ophi_handled = BITMAP_ALLOC (NULL);
4391 
4392   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4393      for the edges we're going to move.  */
4394   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4395     {
4396       gphi *ophi, *nphi = ngsi.phi ();
4397       tree nresult, nop;
4398 
4399       nresult = gimple_phi_result (nphi);
4400       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4401 
4402       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4403 	 the source ssa_name.  */
4404       ophi = NULL;
4405       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4406 	{
4407 	  ophi = ogsi.phi ();
4408 	  if (gimple_phi_result (ophi) == nop)
4409 	    break;
4410 	  ophi = NULL;
4411 	}
4412 
4413       /* If we did find the corresponding PHI, copy those inputs.  */
4414       if (ophi)
4415 	{
4416 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4417 	  if (!has_single_use (nop))
4418 	    {
4419 	      imm_use_iterator imm_iter;
4420 	      use_operand_p use_p;
4421 
4422 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4423 		{
4424 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4425 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4426 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4427 		    goto fail;
4428 		}
4429 	    }
4430 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4431 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4432 	    {
4433 	      location_t oloc;
4434 	      tree oop;
4435 
4436 	      if ((e->flags & EDGE_EH) == 0)
4437 		continue;
4438 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4439 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4440 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4441 	    }
4442 	}
4443       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4444 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4445 	 variable is unchanged from input to the block and we can simply
4446 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4447       else
4448 	{
4449 	  location_t nloc
4450 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4451 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4452 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4453 	}
4454     }
4455 
4456   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4457      we don't know what values from the other edges into NEW_BB to use.  */
4458   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4459     {
4460       gphi *ophi = ogsi.phi ();
4461       tree oresult = gimple_phi_result (ophi);
4462       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4463 	goto fail;
4464     }
4465 
4466   /* Finally, move the edges and update the PHIs.  */
4467   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4468     if (e->flags & EDGE_EH)
4469       {
4470 	/* ???  CFG manipluation routines do not try to update loop
4471 	   form on edge redirection.  Do so manually here for now.  */
4472 	/* If we redirect a loop entry or latch edge that will either create
4473 	   a multiple entry loop or rotate the loop.  If the loops merge
4474 	   we may have created a loop with multiple latches.
4475 	   All of this isn't easily fixed thus cancel the affected loop
4476 	   and mark the other loop as possibly having multiple latches.  */
4477 	if (e->dest == e->dest->loop_father->header)
4478 	  {
4479 	    mark_loop_for_removal (e->dest->loop_father);
4480 	    new_bb->loop_father->latch = NULL;
4481 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4482 	  }
4483 	redirect_eh_edge_1 (e, new_bb, change_region);
4484 	redirect_edge_succ (e, new_bb);
4485 	flush_pending_stmts (e);
4486       }
4487     else
4488       ei_next (&ei);
4489 
4490   BITMAP_FREE (ophi_handled);
4491   return true;
4492 
4493  fail:
4494   FOR_EACH_EDGE (e, ei, old_bb->preds)
4495     redirect_edge_var_map_clear (e);
4496   BITMAP_FREE (ophi_handled);
4497   return false;
4498 }
4499 
4500 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4501    old region to NEW_REGION at BB.  */
4502 
4503 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4504 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4505 			  eh_landing_pad lp, eh_region new_region)
4506 {
4507   gimple_stmt_iterator gsi;
4508   eh_landing_pad *pp;
4509 
4510   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4511     continue;
4512   *pp = lp->next_lp;
4513 
4514   lp->region = new_region;
4515   lp->next_lp = new_region->landing_pads;
4516   new_region->landing_pads = lp;
4517 
4518   /* Delete the RESX that was matched within the empty handler block.  */
4519   gsi = gsi_last_bb (bb);
4520   unlink_stmt_vdef (gsi_stmt (gsi));
4521   gsi_remove (&gsi, true);
4522 
4523   /* Clean up E_OUT for the fallthru.  */
4524   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4525   e_out->probability = profile_probability::always ();
4526 }
4527 
4528 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4529    unsplitting than unsplit_eh was prepared to handle, e.g. when
4530    multiple incoming edges and phis are involved.  */
4531 
4532 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4533 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4534 {
4535   gimple_stmt_iterator gsi;
4536   tree lab;
4537 
4538   /* We really ought not have totally lost everything following
4539      a landing pad label.  Given that BB is empty, there had better
4540      be a successor.  */
4541   gcc_assert (e_out != NULL);
4542 
4543   /* The destination block must not already have a landing pad
4544      for a different region.  */
4545   lab = NULL;
4546   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4547     {
4548       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4549       int lp_nr;
4550 
4551       if (!stmt)
4552 	break;
4553       lab = gimple_label_label (stmt);
4554       lp_nr = EH_LANDING_PAD_NR (lab);
4555       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4556 	return false;
4557     }
4558 
4559   /* Attempt to move the PHIs into the successor block.  */
4560   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4561     {
4562       if (dump_file && (dump_flags & TDF_DETAILS))
4563 	fprintf (dump_file,
4564 		 "Unsplit EH landing pad %d to block %i "
4565 		 "(via cleanup_empty_eh).\n",
4566 		 lp->index, e_out->dest->index);
4567       return true;
4568     }
4569 
4570   return false;
4571 }
4572 
4573 /* Return true if edge E_FIRST is part of an empty infinite loop
4574    or leads to such a loop through a series of single successor
4575    empty bbs.  */
4576 
4577 static bool
infinite_empty_loop_p(edge e_first)4578 infinite_empty_loop_p (edge e_first)
4579 {
4580   bool inf_loop = false;
4581   edge e;
4582 
4583   if (e_first->dest == e_first->src)
4584     return true;
4585 
4586   e_first->src->aux = (void *) 1;
4587   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4588     {
4589       gimple_stmt_iterator gsi;
4590       if (e->dest->aux)
4591 	{
4592 	  inf_loop = true;
4593 	  break;
4594 	}
4595       e->dest->aux = (void *) 1;
4596       gsi = gsi_after_labels (e->dest);
4597       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4598 	gsi_next_nondebug (&gsi);
4599       if (!gsi_end_p (gsi))
4600 	break;
4601     }
4602   e_first->src->aux = NULL;
4603   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4604     e->dest->aux = NULL;
4605 
4606   return inf_loop;
4607 }
4608 
4609 /* Examine the block associated with LP to determine if it's an empty
4610    handler for its EH region.  If so, attempt to redirect EH edges to
4611    an outer region.  Return true the CFG was updated in any way.  This
4612    is similar to jump forwarding, just across EH edges.  */
4613 
4614 static bool
cleanup_empty_eh(eh_landing_pad lp)4615 cleanup_empty_eh (eh_landing_pad lp)
4616 {
4617   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4618   gimple_stmt_iterator gsi;
4619   gimple *resx;
4620   eh_region new_region;
4621   edge_iterator ei;
4622   edge e, e_out;
4623   bool has_non_eh_pred;
4624   bool ret = false;
4625   int new_lp_nr;
4626 
4627   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4628   switch (EDGE_COUNT (bb->succs))
4629     {
4630     case 0:
4631       e_out = NULL;
4632       break;
4633     case 1:
4634       e_out = single_succ_edge (bb);
4635       break;
4636     default:
4637       return false;
4638     }
4639 
4640   gsi = gsi_last_nondebug_bb (bb);
4641   resx = gsi_stmt (gsi);
4642   if (resx && is_gimple_resx (resx))
4643     {
4644       if (stmt_can_throw_external (cfun, resx))
4645 	optimize_clobbers (bb);
4646       else if (sink_clobbers (bb))
4647 	ret = true;
4648     }
4649 
4650   gsi = gsi_after_labels (bb);
4651 
4652   /* Make sure to skip debug statements.  */
4653   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4654     gsi_next_nondebug (&gsi);
4655 
4656   /* If the block is totally empty, look for more unsplitting cases.  */
4657   if (gsi_end_p (gsi))
4658     {
4659       /* For the degenerate case of an infinite loop bail out.
4660 	 If bb has no successors and is totally empty, which can happen e.g.
4661 	 because of incorrect noreturn attribute, bail out too.  */
4662       if (e_out == NULL
4663 	  || infinite_empty_loop_p (e_out))
4664 	return ret;
4665 
4666       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4667     }
4668 
4669   /* The block should consist only of a single RESX statement, modulo a
4670      preceding call to __builtin_stack_restore if there is no outgoing
4671      edge, since the call can be eliminated in this case.  */
4672   resx = gsi_stmt (gsi);
4673   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4674     {
4675       gsi_next_nondebug (&gsi);
4676       resx = gsi_stmt (gsi);
4677     }
4678   if (!is_gimple_resx (resx))
4679     return ret;
4680   gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4681 
4682   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4683   has_non_eh_pred = false;
4684   FOR_EACH_EDGE (e, ei, bb->preds)
4685     if (!(e->flags & EDGE_EH))
4686       has_non_eh_pred = true;
4687 
4688   /* Find the handler that's outer of the empty handler by looking at
4689      where the RESX instruction was vectored.  */
4690   new_lp_nr = lookup_stmt_eh_lp (resx);
4691   new_region = get_eh_region_from_lp_number (new_lp_nr);
4692 
4693   /* If there's no destination region within the current function,
4694      redirection is trivial via removing the throwing statements from
4695      the EH region, removing the EH edges, and allowing the block
4696      to go unreachable.  */
4697   if (new_region == NULL)
4698     {
4699       gcc_assert (e_out == NULL);
4700       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4701 	if (e->flags & EDGE_EH)
4702 	  {
4703 	    gimple *stmt = last_stmt (e->src);
4704 	    remove_stmt_from_eh_lp (stmt);
4705 	    remove_edge (e);
4706 	  }
4707 	else
4708 	  ei_next (&ei);
4709       goto succeed;
4710     }
4711 
4712   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4713      to handle the abort and allow the blocks to go unreachable.  */
4714   if (new_region->type == ERT_MUST_NOT_THROW)
4715     {
4716       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4717 	if (e->flags & EDGE_EH)
4718 	  {
4719 	    gimple *stmt = last_stmt (e->src);
4720 	    remove_stmt_from_eh_lp (stmt);
4721 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4722 	    remove_edge (e);
4723 	  }
4724 	else
4725 	  ei_next (&ei);
4726       goto succeed;
4727     }
4728 
4729   /* Try to redirect the EH edges and merge the PHIs into the destination
4730      landing pad block.  If the merge succeeds, we'll already have redirected
4731      all the EH edges.  The handler itself will go unreachable if there were
4732      no normal edges.  */
4733   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4734     goto succeed;
4735 
4736   /* Finally, if all input edges are EH edges, then we can (potentially)
4737      reduce the number of transfers from the runtime by moving the landing
4738      pad from the original region to the new region.  This is a win when
4739      we remove the last CLEANUP region along a particular exception
4740      propagation path.  Since nothing changes except for the region with
4741      which the landing pad is associated, the PHI nodes do not need to be
4742      adjusted at all.  */
4743   if (!has_non_eh_pred)
4744     {
4745       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4746       if (dump_file && (dump_flags & TDF_DETAILS))
4747 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4748 		 lp->index, new_region->index);
4749 
4750       /* ??? The CFG didn't change, but we may have rendered the
4751 	 old EH region unreachable.  Trigger a cleanup there.  */
4752       return true;
4753     }
4754 
4755   return ret;
4756 
4757  succeed:
4758   if (dump_file && (dump_flags & TDF_DETAILS))
4759     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4760   remove_eh_landing_pad (lp);
4761   return true;
4762 }
4763 
4764 /* Do a post-order traversal of the EH region tree.  Examine each
4765    post_landing_pad block and see if we can eliminate it as empty.  */
4766 
4767 static bool
cleanup_all_empty_eh(void)4768 cleanup_all_empty_eh (void)
4769 {
4770   bool changed = false;
4771   eh_landing_pad lp;
4772   int i;
4773 
4774   /* The post-order traversal may lead to quadraticness in the redirection
4775      of incoming EH edges from inner LPs, so first try to walk the region
4776      tree from inner to outer LPs in order to eliminate these edges.  */
4777   for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i)
4778     {
4779       lp = (*cfun->eh->lp_array)[i];
4780       if (lp)
4781 	changed |= cleanup_empty_eh (lp);
4782     }
4783 
4784   /* Now do the post-order traversal to eliminate outer empty LPs.  */
4785   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4786     if (lp)
4787       changed |= cleanup_empty_eh (lp);
4788 
4789   return changed;
4790 }
4791 
4792 /* Perform cleanups and lowering of exception handling
4793     1) cleanups regions with handlers doing nothing are optimized out
4794     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4795     3) Info about regions that are containing instructions, and regions
4796        reachable via local EH edges is collected
4797     4) Eh tree is pruned for regions no longer necessary.
4798 
4799    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4800 	 Unify those that have the same failure decl and locus.
4801 */
4802 
4803 static unsigned int
execute_cleanup_eh_1(void)4804 execute_cleanup_eh_1 (void)
4805 {
4806   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4807      looking up unreachable landing pads.  */
4808   remove_unreachable_handlers ();
4809 
4810   /* Watch out for the region tree vanishing due to all unreachable.  */
4811   if (cfun->eh->region_tree)
4812     {
4813       bool changed = false;
4814 
4815       if (optimize)
4816 	changed |= unsplit_all_eh ();
4817       changed |= cleanup_all_empty_eh ();
4818 
4819       if (changed)
4820 	{
4821 	  free_dominance_info (CDI_DOMINATORS);
4822 	  free_dominance_info (CDI_POST_DOMINATORS);
4823 
4824           /* We delayed all basic block deletion, as we may have performed
4825 	     cleanups on EH edges while non-EH edges were still present.  */
4826 	  delete_unreachable_blocks ();
4827 
4828 	  /* We manipulated the landing pads.  Remove any region that no
4829 	     longer has a landing pad.  */
4830 	  remove_unreachable_handlers_no_lp ();
4831 
4832 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4833 	}
4834     }
4835 
4836   return 0;
4837 }
4838 
4839 namespace {
4840 
4841 const pass_data pass_data_cleanup_eh =
4842 {
4843   GIMPLE_PASS, /* type */
4844   "ehcleanup", /* name */
4845   OPTGROUP_NONE, /* optinfo_flags */
4846   TV_TREE_EH, /* tv_id */
4847   PROP_gimple_lcf, /* properties_required */
4848   0, /* properties_provided */
4849   0, /* properties_destroyed */
4850   0, /* todo_flags_start */
4851   0, /* todo_flags_finish */
4852 };
4853 
4854 class pass_cleanup_eh : public gimple_opt_pass
4855 {
4856 public:
pass_cleanup_eh(gcc::context * ctxt)4857   pass_cleanup_eh (gcc::context *ctxt)
4858     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4859   {}
4860 
4861   /* opt_pass methods: */
clone()4862   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate(function * fun)4863   virtual bool gate (function *fun)
4864     {
4865       return fun->eh != NULL && fun->eh->region_tree != NULL;
4866     }
4867 
4868   virtual unsigned int execute (function *);
4869 
4870 }; // class pass_cleanup_eh
4871 
4872 unsigned int
execute(function * fun)4873 pass_cleanup_eh::execute (function *fun)
4874 {
4875   int ret = execute_cleanup_eh_1 ();
4876 
4877   /* If the function no longer needs an EH personality routine
4878      clear it.  This exposes cross-language inlining opportunities
4879      and avoids references to a never defined personality routine.  */
4880   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4881       && function_needs_eh_personality (fun) != eh_personality_lang)
4882     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4883 
4884   return ret;
4885 }
4886 
4887 } // anon namespace
4888 
4889 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4890 make_pass_cleanup_eh (gcc::context *ctxt)
4891 {
4892   return new pass_cleanup_eh (ctxt);
4893 }
4894 
4895 /* Disable warnings about missing quoting in GCC diagnostics for
4896    the verification errors.  Their format strings don't follow GCC
4897    diagnostic conventions but are only used for debugging.  */
4898 #if __GNUC__ >= 10
4899 #  pragma GCC diagnostic push
4900 #  pragma GCC diagnostic ignored "-Wformat-diag"
4901 #endif
4902 
4903 /* Verify that BB containing STMT as the last statement, has precisely the
4904    edge that make_eh_edges would create.  */
4905 
4906 DEBUG_FUNCTION bool
verify_eh_edges(gimple * stmt)4907 verify_eh_edges (gimple *stmt)
4908 {
4909   basic_block bb = gimple_bb (stmt);
4910   eh_landing_pad lp = NULL;
4911   int lp_nr;
4912   edge_iterator ei;
4913   edge e, eh_edge;
4914 
4915   lp_nr = lookup_stmt_eh_lp (stmt);
4916   if (lp_nr > 0)
4917     lp = get_eh_landing_pad_from_number (lp_nr);
4918 
4919   eh_edge = NULL;
4920   FOR_EACH_EDGE (e, ei, bb->succs)
4921     {
4922       if (e->flags & EDGE_EH)
4923 	{
4924 	  if (eh_edge)
4925 	    {
4926 	      error ("BB %i has multiple EH edges", bb->index);
4927 	      return true;
4928 	    }
4929 	  else
4930 	    eh_edge = e;
4931 	}
4932     }
4933 
4934   if (lp == NULL)
4935     {
4936       if (eh_edge)
4937 	{
4938 	  error ("BB %i cannot throw but has an EH edge", bb->index);
4939 	  return true;
4940 	}
4941       return false;
4942     }
4943 
4944   if (!stmt_could_throw_p (cfun, stmt))
4945     {
4946       error ("BB %i last statement has incorrectly set lp", bb->index);
4947       return true;
4948     }
4949 
4950   if (eh_edge == NULL)
4951     {
4952       error ("BB %i is missing an EH edge", bb->index);
4953       return true;
4954     }
4955 
4956   if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
4957     {
4958       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4959       return true;
4960     }
4961 
4962   return false;
4963 }
4964 
4965 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4966 
4967 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(geh_dispatch * stmt)4968 verify_eh_dispatch_edge (geh_dispatch *stmt)
4969 {
4970   eh_region r;
4971   eh_catch c;
4972   basic_block src, dst;
4973   bool want_fallthru = true;
4974   edge_iterator ei;
4975   edge e, fall_edge;
4976 
4977   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4978   src = gimple_bb (stmt);
4979 
4980   FOR_EACH_EDGE (e, ei, src->succs)
4981     gcc_assert (e->aux == NULL);
4982 
4983   switch (r->type)
4984     {
4985     case ERT_TRY:
4986       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4987 	{
4988 	  dst = label_to_block (cfun, c->label);
4989 	  e = find_edge (src, dst);
4990 	  if (e == NULL)
4991 	    {
4992 	      error ("BB %i is missing an edge", src->index);
4993 	      return true;
4994 	    }
4995 	  e->aux = (void *)e;
4996 
4997 	  /* A catch-all handler doesn't have a fallthru.  */
4998 	  if (c->type_list == NULL)
4999 	    {
5000 	      want_fallthru = false;
5001 	      break;
5002 	    }
5003 	}
5004       break;
5005 
5006     case ERT_ALLOWED_EXCEPTIONS:
5007       dst = label_to_block (cfun, r->u.allowed.label);
5008       e = find_edge (src, dst);
5009       if (e == NULL)
5010 	{
5011 	  error ("BB %i is missing an edge", src->index);
5012 	  return true;
5013 	}
5014       e->aux = (void *)e;
5015       break;
5016 
5017     default:
5018       gcc_unreachable ();
5019     }
5020 
5021   fall_edge = NULL;
5022   FOR_EACH_EDGE (e, ei, src->succs)
5023     {
5024       if (e->flags & EDGE_FALLTHRU)
5025 	{
5026 	  if (fall_edge != NULL)
5027 	    {
5028 	      error ("BB %i too many fallthru edges", src->index);
5029 	      return true;
5030 	    }
5031 	  fall_edge = e;
5032 	}
5033       else if (e->aux)
5034 	e->aux = NULL;
5035       else
5036 	{
5037 	  error ("BB %i has incorrect edge", src->index);
5038 	  return true;
5039 	}
5040     }
5041   if ((fall_edge != NULL) ^ want_fallthru)
5042     {
5043       error ("BB %i has incorrect fallthru edge", src->index);
5044       return true;
5045     }
5046 
5047   return false;
5048 }
5049 
5050 #if __GNUC__ >= 10
5051 #  pragma GCC diagnostic pop
5052 #endif
5053