1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
47 #include "attribs.h"
48 #include "asan.h"
49 #include "gimplify.h"
50 
51 /* In some instances a tree and a gimple need to be stored in a same table,
52    i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree *tp; tree t; gimple *g;} treemple;
54 
55 /* Misc functions used in this file.  */
56 
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58    Really this means any statement that could_throw_p.  We could
59    stuff this information into the stmt_ann data structure, but:
60 
61    (1) We absolutely rely on this information being kept until
62    we get to rtl.  Once we're done with lowering here, if we lose
63    the information there's no way to recover it!
64 
65    (2) There are many more statements that *cannot* throw as
66    compared to those that can.  We should be saving some amount
67    of space by only allocating memory for those that can throw.  */
68 
69 /* Add statement T in function IFUN to landing pad NUM.  */
70 
71 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple * t,int num)72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73 {
74   gcc_assert (num != 0);
75 
76   if (!get_eh_throw_stmt_table (ifun))
77     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78 
79   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80 }
81 
82 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
83 
84 void
add_stmt_to_eh_lp(gimple * t,int num)85 add_stmt_to_eh_lp (gimple *t, int num)
86 {
87   add_stmt_to_eh_lp_fn (cfun, t, num);
88 }
89 
90 /* Add statement T to the single EH landing pad in REGION.  */
91 
92 static void
record_stmt_eh_region(eh_region region,gimple * t)93 record_stmt_eh_region (eh_region region, gimple *t)
94 {
95   if (region == NULL)
96     return;
97   if (region->type == ERT_MUST_NOT_THROW)
98     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99   else
100     {
101       eh_landing_pad lp = region->landing_pads;
102       if (lp == NULL)
103 	lp = gen_eh_landing_pad (region);
104       else
105 	gcc_assert (lp->next_lp == NULL);
106       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107     }
108 }
109 
110 
111 /* Remove statement T in function IFUN from its EH landing pad.  */
112 
113 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple * t)114 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115 {
116   if (!get_eh_throw_stmt_table (ifun))
117     return false;
118 
119   if (!get_eh_throw_stmt_table (ifun)->get (t))
120     return false;
121 
122   get_eh_throw_stmt_table (ifun)->remove (t);
123       return true;
124 }
125 
126 
127 /* Remove statement T in the current function (cfun) from its
128    EH landing pad.  */
129 
130 bool
remove_stmt_from_eh_lp(gimple * t)131 remove_stmt_from_eh_lp (gimple *t)
132 {
133   return remove_stmt_from_eh_lp_fn (cfun, t);
134 }
135 
136 /* Determine if statement T is inside an EH region in function IFUN.
137    Positive numbers indicate a landing pad index; negative numbers
138    indicate a MUST_NOT_THROW region index; zero indicates that the
139    statement is not recorded in the region table.  */
140 
141 int
lookup_stmt_eh_lp_fn(struct function * ifun,const gimple * t)142 lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t)
143 {
144   if (ifun->eh->throw_stmt_table == NULL)
145     return 0;
146 
147   int *lp_nr = ifun->eh->throw_stmt_table->get (const_cast <gimple *> (t));
148   return lp_nr ? *lp_nr : 0;
149 }
150 
151 /* Likewise, but always use the current function.  */
152 
153 int
lookup_stmt_eh_lp(const gimple * t)154 lookup_stmt_eh_lp (const gimple *t)
155 {
156   /* We can get called from initialized data when -fnon-call-exceptions
157      is on; prevent crash.  */
158   if (!cfun)
159     return 0;
160   return lookup_stmt_eh_lp_fn (cfun, t);
161 }
162 
163 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
164    nodes and LABEL_DECL nodes.  We will use this during the second phase to
165    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
166 
167 struct finally_tree_node
168 {
169   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
170      when deciding whether a GOTO to a certain LABEL_DECL (which is a
171      tree) leaves the TRY block, its necessary to record a tree in
172      this field.  Thus a treemple is used. */
173   treemple child;
174   gtry *parent;
175 };
176 
177 /* Hashtable helpers.  */
178 
179 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180 {
181   static inline hashval_t hash (const finally_tree_node *);
182   static inline bool equal (const finally_tree_node *,
183 			    const finally_tree_node *);
184 };
185 
186 inline hashval_t
hash(const finally_tree_node * v)187 finally_tree_hasher::hash (const finally_tree_node *v)
188 {
189   return (intptr_t)v->child.t >> 4;
190 }
191 
192 inline bool
equal(const finally_tree_node * v,const finally_tree_node * c)193 finally_tree_hasher::equal (const finally_tree_node *v,
194 			    const finally_tree_node *c)
195 {
196   return v->child.t == c->child.t;
197 }
198 
199 /* Note that this table is *not* marked GTY.  It is short-lived.  */
200 static hash_table<finally_tree_hasher> *finally_tree;
201 
202 static void
record_in_finally_tree(treemple child,gtry * parent)203 record_in_finally_tree (treemple child, gtry *parent)
204 {
205   struct finally_tree_node *n;
206   finally_tree_node **slot;
207 
208   n = XNEW (struct finally_tree_node);
209   n->child = child;
210   n->parent = parent;
211 
212   slot = finally_tree->find_slot (n, INSERT);
213   gcc_assert (!*slot);
214   *slot = n;
215 }
216 
217 static void
218 collect_finally_tree (gimple *stmt, gtry *region);
219 
220 /* Go through the gimple sequence.  Works with collect_finally_tree to
221    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222 
223 static void
collect_finally_tree_1(gimple_seq seq,gtry * region)224 collect_finally_tree_1 (gimple_seq seq, gtry *region)
225 {
226   gimple_stmt_iterator gsi;
227 
228   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229     collect_finally_tree (gsi_stmt (gsi), region);
230 }
231 
232 static void
collect_finally_tree(gimple * stmt,gtry * region)233 collect_finally_tree (gimple *stmt, gtry *region)
234 {
235   treemple temp;
236 
237   switch (gimple_code (stmt))
238     {
239     case GIMPLE_LABEL:
240       temp.t = gimple_label_label (as_a <glabel *> (stmt));
241       record_in_finally_tree (temp, region);
242       break;
243 
244     case GIMPLE_TRY:
245       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246         {
247           temp.g = stmt;
248           record_in_finally_tree (temp, region);
249           collect_finally_tree_1 (gimple_try_eval (stmt),
250 				  as_a <gtry *> (stmt));
251 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252         }
253       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254         {
255           collect_finally_tree_1 (gimple_try_eval (stmt), region);
256           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257         }
258       break;
259 
260     case GIMPLE_CATCH:
261       collect_finally_tree_1 (gimple_catch_handler (
262 				 as_a <gcatch *> (stmt)),
263 			      region);
264       break;
265 
266     case GIMPLE_EH_FILTER:
267       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268       break;
269 
270     case GIMPLE_EH_ELSE:
271       {
272 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275       }
276       break;
277 
278     default:
279       /* A type, a decl, or some kind of statement that we're not
280 	 interested in.  Don't walk them.  */
281       break;
282     }
283 }
284 
285 
286 /* Use the finally tree to determine if a jump from START to TARGET
287    would leave the try_finally node that START lives in.  */
288 
289 static bool
outside_finally_tree(treemple start,gimple * target)290 outside_finally_tree (treemple start, gimple *target)
291 {
292   struct finally_tree_node n, *p;
293 
294   do
295     {
296       n.child = start;
297       p = finally_tree->find (&n);
298       if (!p)
299 	return true;
300       start.g = p->parent;
301     }
302   while (start.g != target);
303 
304   return false;
305 }
306 
307 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
308    nodes into a set of gotos, magic labels, and eh regions.
309    The eh region creation is straight-forward, but frobbing all the gotos
310    and such into shape isn't.  */
311 
312 /* The sequence into which we record all EH stuff.  This will be
313    placed at the end of the function when we're all done.  */
314 static gimple_seq eh_seq;
315 
316 /* Record whether an EH region contains something that can throw,
317    indexed by EH region number.  */
318 static bitmap eh_region_may_contain_throw_map;
319 
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322    The idea is to record a gimple statement for everything except for
323    the conditionals, which get their labels recorded. Since labels are
324    of type 'tree', we need this node to store both gimple and tree
325    objects.  REPL_STMT is the sequence used to replace the goto/return
326    statement.  CONT_STMT is used to store the statement that allows
327    the return/goto to jump to the original destination. */
328 
329 struct goto_queue_node
330 {
331   treemple stmt;
332   location_t location;
333   gimple_seq repl_stmt;
334   gimple *cont_stmt;
335   int index;
336   /* This is used when index >= 0 to indicate that stmt is a label (as
337      opposed to a goto stmt).  */
338   int is_label;
339 };
340 
341 /* State of the world while lowering.  */
342 
343 struct leh_state
344 {
345   /* What's "current" while constructing the eh region tree.  These
346      correspond to variables of the same name in cfun->eh, which we
347      don't have easy access to.  */
348   eh_region cur_region;
349 
350   /* What's "current" for the purposes of __builtin_eh_pointer.  For
351      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
352      the associated ALLOWED_EXCEPTIONS, etc.  */
353   eh_region ehp_region;
354 
355   /* Processing of TRY_FINALLY requires a bit more state.  This is
356      split out into a separate structure so that we don't have to
357      copy so much when processing other nodes.  */
358   struct leh_tf_state *tf;
359 
360   /* Outer non-clean up region.  */
361   eh_region outer_non_cleanup;
362 };
363 
364 struct leh_tf_state
365 {
366   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
367      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
368      this so that outside_finally_tree can reliably reference the tree used
369      in the collect_finally_tree data structures.  */
370   gtry *try_finally_expr;
371   gtry *top_p;
372 
373   /* While lowering a top_p usually it is expanded into multiple statements,
374      thus we need the following field to store them. */
375   gimple_seq top_p_seq;
376 
377   /* The state outside this try_finally node.  */
378   struct leh_state *outer;
379 
380   /* The exception region created for it.  */
381   eh_region region;
382 
383   /* The goto queue.  */
384   struct goto_queue_node *goto_queue;
385   size_t goto_queue_size;
386   size_t goto_queue_active;
387 
388   /* Pointer map to help in searching goto_queue when it is large.  */
389   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
390 
391   /* The set of unique labels seen as entries in the goto queue.  */
392   vec<tree> dest_array;
393 
394   /* A label to be added at the end of the completed transformed
395      sequence.  It will be set if may_fallthru was true *at one time*,
396      though subsequent transformations may have cleared that flag.  */
397   tree fallthru_label;
398 
399   /* True if it is possible to fall out the bottom of the try block.
400      Cleared if the fallthru is converted to a goto.  */
401   bool may_fallthru;
402 
403   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
404   bool may_return;
405 
406   /* True if the finally block can receive an exception edge.
407      Cleared if the exception case is handled by code duplication.  */
408   bool may_throw;
409 };
410 
411 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
412 
413 /* Search for STMT in the goto queue.  Return the replacement,
414    or null if the statement isn't in the queue.  */
415 
416 #define LARGE_GOTO_QUEUE 20
417 
418 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
419 
420 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)421 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
422 {
423   unsigned int i;
424 
425   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
426     {
427       for (i = 0; i < tf->goto_queue_active; i++)
428 	if ( tf->goto_queue[i].stmt.g == stmt.g)
429 	  return tf->goto_queue[i].repl_stmt;
430       return NULL;
431     }
432 
433   /* If we have a large number of entries in the goto_queue, create a
434      pointer map and use that for searching.  */
435 
436   if (!tf->goto_queue_map)
437     {
438       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
439       for (i = 0; i < tf->goto_queue_active; i++)
440 	{
441 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
442 						  &tf->goto_queue[i]);
443 	  gcc_assert (!existed);
444 	}
445     }
446 
447   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
448   if (slot != NULL)
449     return ((*slot)->repl_stmt);
450 
451   return NULL;
452 }
453 
454 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
455    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
456    then we can just splat it in, otherwise we add the new stmts immediately
457    after the GIMPLE_COND and redirect.  */
458 
459 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)460 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
461 				gimple_stmt_iterator *gsi)
462 {
463   tree label;
464   gimple_seq new_seq;
465   treemple temp;
466   location_t loc = gimple_location (gsi_stmt (*gsi));
467 
468   temp.tp = tp;
469   new_seq = find_goto_replacement (tf, temp);
470   if (!new_seq)
471     return;
472 
473   if (gimple_seq_singleton_p (new_seq)
474       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
475     {
476       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
477       return;
478     }
479 
480   label = create_artificial_label (loc);
481   /* Set the new label for the GIMPLE_COND */
482   *tp = label;
483 
484   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
485   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
486 }
487 
488 /* The real work of replace_goto_queue.  Returns with TSI updated to
489    point to the next statement.  */
490 
491 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
492 
493 static void
replace_goto_queue_1(gimple * stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)494 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
495 		      gimple_stmt_iterator *gsi)
496 {
497   gimple_seq seq;
498   treemple temp;
499   temp.g = NULL;
500 
501   switch (gimple_code (stmt))
502     {
503     case GIMPLE_GOTO:
504     case GIMPLE_RETURN:
505       temp.g = stmt;
506       seq = find_goto_replacement (tf, temp);
507       if (seq)
508 	{
509 	  gimple_stmt_iterator i;
510 	  seq = gimple_seq_copy (seq);
511 	  for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
512 	    gimple_set_location (gsi_stmt (i), gimple_location (stmt));
513 	  gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
514 	  gsi_remove (gsi, false);
515 	  return;
516 	}
517       break;
518 
519     case GIMPLE_COND:
520       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
521       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
522       break;
523 
524     case GIMPLE_TRY:
525       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
526       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
527       break;
528     case GIMPLE_CATCH:
529       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 				      as_a <gcatch *> (stmt)),
531 				    tf);
532       break;
533     case GIMPLE_EH_FILTER:
534       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
535       break;
536     case GIMPLE_EH_ELSE:
537       {
538 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
539 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
540 				      tf);
541 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
542 				      tf);
543       }
544       break;
545 
546     default:
547       /* These won't have gotos in them.  */
548       break;
549     }
550 
551   gsi_next (gsi);
552 }
553 
554 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
555 
556 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)557 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
558 {
559   gimple_stmt_iterator gsi = gsi_start (*seq);
560 
561   while (!gsi_end_p (gsi))
562     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
563 }
564 
565 /* Replace all goto queue members.  */
566 
567 static void
replace_goto_queue(struct leh_tf_state * tf)568 replace_goto_queue (struct leh_tf_state *tf)
569 {
570   if (tf->goto_queue_active == 0)
571     return;
572   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
573   replace_goto_queue_stmt_list (&eh_seq, tf);
574 }
575 
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
578    a gimple return. */
579 
580 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)581 record_in_goto_queue (struct leh_tf_state *tf,
582                       treemple new_stmt,
583                       int index,
584                       bool is_label,
585 		      location_t location)
586 {
587   size_t active, size;
588   struct goto_queue_node *q;
589 
590   gcc_assert (!tf->goto_queue_map);
591 
592   active = tf->goto_queue_active;
593   size = tf->goto_queue_size;
594   if (active >= size)
595     {
596       size = (size ? size * 2 : 32);
597       tf->goto_queue_size = size;
598       tf->goto_queue
599          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
600     }
601 
602   q = &tf->goto_queue[active];
603   tf->goto_queue_active = active + 1;
604 
605   memset (q, 0, sizeof (*q));
606   q->stmt = new_stmt;
607   q->index = index;
608   q->location = location;
609   q->is_label = is_label;
610 }
611 
612 /* Record the LABEL label in the goto queue contained in TF.
613    TF is not null.  */
614 
615 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)616 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
617 			    location_t location)
618 {
619   int index;
620   treemple temp, new_stmt;
621 
622   if (!label)
623     return;
624 
625   /* Computed and non-local gotos do not get processed.  Given
626      their nature we can neither tell whether we've escaped the
627      finally block nor redirect them if we knew.  */
628   if (TREE_CODE (label) != LABEL_DECL)
629     return;
630 
631   /* No need to record gotos that don't leave the try block.  */
632   temp.t = label;
633   if (!outside_finally_tree (temp, tf->try_finally_expr))
634     return;
635 
636   if (! tf->dest_array.exists ())
637     {
638       tf->dest_array.create (10);
639       tf->dest_array.quick_push (label);
640       index = 0;
641     }
642   else
643     {
644       int n = tf->dest_array.length ();
645       for (index = 0; index < n; ++index)
646         if (tf->dest_array[index] == label)
647           break;
648       if (index == n)
649         tf->dest_array.safe_push (label);
650     }
651 
652   /* In the case of a GOTO we want to record the destination label,
653      since with a GIMPLE_COND we have an easy access to the then/else
654      labels. */
655   new_stmt = stmt;
656   record_in_goto_queue (tf, new_stmt, index, true, location);
657 }
658 
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660    node, and if so record that fact in the goto queue associated with that
661    try_finally node.  */
662 
663 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple * stmt)664 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
665 {
666   struct leh_tf_state *tf = state->tf;
667   treemple new_stmt;
668 
669   if (!tf)
670     return;
671 
672   switch (gimple_code (stmt))
673     {
674     case GIMPLE_COND:
675       {
676 	gcond *cond_stmt = as_a <gcond *> (stmt);
677 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
678 	record_in_goto_queue_label (tf, new_stmt,
679 				    gimple_cond_true_label (cond_stmt),
680 				    EXPR_LOCATION (*new_stmt.tp));
681 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
682 	record_in_goto_queue_label (tf, new_stmt,
683 				    gimple_cond_false_label (cond_stmt),
684 				    EXPR_LOCATION (*new_stmt.tp));
685       }
686       break;
687     case GIMPLE_GOTO:
688       new_stmt.g = stmt;
689       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
690 				  gimple_location (stmt));
691       break;
692 
693     case GIMPLE_RETURN:
694       tf->may_return = true;
695       new_stmt.g = stmt;
696       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
697       break;
698 
699     default:
700       gcc_unreachable ();
701     }
702 }
703 
704 
705 #if CHECKING_P
706 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
707    was in fact structured, and we've not yet done jump threading, then none
708    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
709 
710 static void
verify_norecord_switch_expr(struct leh_state * state,gswitch * switch_expr)711 verify_norecord_switch_expr (struct leh_state *state,
712 			     gswitch *switch_expr)
713 {
714   struct leh_tf_state *tf = state->tf;
715   size_t i, n;
716 
717   if (!tf)
718     return;
719 
720   n = gimple_switch_num_labels (switch_expr);
721 
722   for (i = 0; i < n; ++i)
723     {
724       treemple temp;
725       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
726       temp.t = lab;
727       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
728     }
729 }
730 #else
731 #define verify_norecord_switch_expr(state, switch_expr)
732 #endif
733 
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
735    non-null, insert it before the new branch.  */
736 
737 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)738 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
739 {
740   gimple *x;
741 
742   /* In the case of a return, the queue node must be a gimple statement.  */
743   gcc_assert (!q->is_label);
744 
745   /* Note that the return value may have already been computed, e.g.,
746 
747 	int x;
748 	int foo (void)
749 	{
750 	  x = 0;
751 	  try {
752 	    return x;
753 	  } finally {
754 	    x++;
755 	  }
756 	}
757 
758      should return 0, not 1.  We don't have to do anything to make
759      this happens because the return value has been placed in the
760      RESULT_DECL already.  */
761 
762   q->cont_stmt = q->stmt.g;
763 
764   if (mod)
765     gimple_seq_add_seq (&q->repl_stmt, mod);
766 
767   x = gimple_build_goto (finlab);
768   gimple_set_location (x, q->location);
769   gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771 
772 /* Similar, but easier, for GIMPLE_GOTO.  */
773 
774 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)775 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
776 		     struct leh_tf_state *tf)
777 {
778   ggoto *x;
779 
780   gcc_assert (q->is_label);
781 
782   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
783 
784   if (mod)
785     gimple_seq_add_seq (&q->repl_stmt, mod);
786 
787   x = gimple_build_goto (finlab);
788   gimple_set_location (x, q->location);
789   gimple_seq_add_stmt (&q->repl_stmt, x);
790 }
791 
792 /* Emit a standard landing pad sequence into SEQ for REGION.  */
793 
794 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)795 emit_post_landing_pad (gimple_seq *seq, eh_region region)
796 {
797   eh_landing_pad lp = region->landing_pads;
798   glabel *x;
799 
800   if (lp == NULL)
801     lp = gen_eh_landing_pad (region);
802 
803   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
804   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
805 
806   x = gimple_build_label (lp->post_landing_pad);
807   gimple_seq_add_stmt (seq, x);
808 }
809 
810 /* Emit a RESX statement into SEQ for REGION.  */
811 
812 static void
emit_resx(gimple_seq * seq,eh_region region)813 emit_resx (gimple_seq *seq, eh_region region)
814 {
815   gresx *x = gimple_build_resx (region->index);
816   gimple_seq_add_stmt (seq, x);
817   if (region->outer)
818     record_stmt_eh_region (region->outer, x);
819 }
820 
821 /* Note that the current EH region may contain a throw, or a
822    call to a function which itself may contain a throw.  */
823 
824 static void
note_eh_region_may_contain_throw(eh_region region)825 note_eh_region_may_contain_throw (eh_region region)
826 {
827   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
828     {
829       if (region->type == ERT_MUST_NOT_THROW)
830 	break;
831       region = region->outer;
832       if (region == NULL)
833 	break;
834     }
835 }
836 
837 /* Check if REGION has been marked as containing a throw.  If REGION is
838    NULL, this predicate is false.  */
839 
840 static inline bool
eh_region_may_contain_throw(eh_region r)841 eh_region_may_contain_throw (eh_region r)
842 {
843   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 }
845 
846 /* We want to transform
847 	try { body; } catch { stuff; }
848    to
849 	normal_sequence:
850 	  body;
851 	  over:
852 	eh_sequence:
853 	  landing_pad:
854 	  stuff;
855 	  goto over;
856 
857    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
858    should be placed before the second operand, or NULL.  OVER is
859    an existing label that should be put at the exit, or NULL.  */
860 
861 static gimple_seq
frob_into_branch_around(gtry * tp,eh_region region,tree over)862 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 {
864   gimple *x;
865   gimple_seq cleanup, result;
866   location_t loc = gimple_location (tp);
867 
868   cleanup = gimple_try_cleanup (tp);
869   result = gimple_try_eval (tp);
870 
871   if (region)
872     emit_post_landing_pad (&eh_seq, region);
873 
874   if (gimple_seq_may_fallthru (cleanup))
875     {
876       if (!over)
877 	over = create_artificial_label (loc);
878       x = gimple_build_goto (over);
879       gimple_set_location (x, loc);
880       gimple_seq_add_stmt (&cleanup, x);
881     }
882   gimple_seq_add_seq (&eh_seq, cleanup);
883 
884   if (over)
885     {
886       x = gimple_build_label (over);
887       gimple_seq_add_stmt (&result, x);
888     }
889   return result;
890 }
891 
892 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
893    Make sure to record all new labels found.  */
894 
895 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)896 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
897 			     location_t loc)
898 {
899   gtry *region = NULL;
900   gimple_seq new_seq;
901   gimple_stmt_iterator gsi;
902 
903   new_seq = copy_gimple_seq_and_replace_locals (seq);
904 
905   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
906     {
907       gimple *stmt = gsi_stmt (gsi);
908       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
909 	{
910 	  tree block = gimple_block (stmt);
911 	  gimple_set_location (stmt, loc);
912 	  gimple_set_block (stmt, block);
913 	}
914     }
915 
916   if (outer_state->tf)
917     region = outer_state->tf->try_finally_expr;
918   collect_finally_tree_1 (new_seq, region);
919 
920   return new_seq;
921 }
922 
923 /* A subroutine of lower_try_finally.  Create a fallthru label for
924    the given try_finally state.  The only tricky bit here is that
925    we have to make sure to record the label in our outer context.  */
926 
927 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)928 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
929 {
930   tree label = tf->fallthru_label;
931   treemple temp;
932 
933   if (!label)
934     {
935       label = create_artificial_label (gimple_location (tf->try_finally_expr));
936       tf->fallthru_label = label;
937       if (tf->outer->tf)
938         {
939           temp.t = label;
940           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
941         }
942     }
943   return label;
944 }
945 
946 /* A subroutine of lower_try_finally.  If FINALLY consits of a
947    GIMPLE_EH_ELSE node, return it.  */
948 
949 static inline geh_else *
get_eh_else(gimple_seq finally)950 get_eh_else (gimple_seq finally)
951 {
952   gimple *x = gimple_seq_first_stmt (finally);
953   if (gimple_code (x) == GIMPLE_EH_ELSE)
954     {
955       gcc_assert (gimple_seq_singleton_p (finally));
956       return as_a <geh_else *> (x);
957     }
958   return NULL;
959 }
960 
961 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
962    langhook returns non-null, then the language requires that the exception
963    path out of a try_finally be treated specially.  To wit: the code within
964    the finally block may not itself throw an exception.  We have two choices
965    here. First we can duplicate the finally block and wrap it in a
966    must_not_throw region.  Second, we can generate code like
967 
968 	try {
969 	  finally_block;
970 	} catch {
971 	  if (fintmp == eh_edge)
972 	    protect_cleanup_actions;
973 	}
974 
975    where "fintmp" is the temporary used in the switch statement generation
976    alternative considered below.  For the nonce, we always choose the first
977    option.
978 
979    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
980 
981 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)982 honor_protect_cleanup_actions (struct leh_state *outer_state,
983 			       struct leh_state *this_state,
984 			       struct leh_tf_state *tf)
985 {
986   gimple_seq finally = gimple_try_cleanup (tf->top_p);
987 
988   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
989      It does need to be handled here, so as to separate the (different)
990      EH path from the normal path.  But we should not attempt to wrap
991      it with a must-not-throw node (which indeed gets in the way).  */
992   if (geh_else *eh_else = get_eh_else (finally))
993     {
994       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
995       finally = gimple_eh_else_e_body (eh_else);
996 
997       /* Let the ELSE see the exception that's being processed, but
998 	 since the cleanup is outside the try block, process it with
999 	 outer_state, otherwise it may be used as a cleanup for
1000 	 itself, and Bad Things (TM) ensue.  */
1001       eh_region save_ehp = outer_state->ehp_region;
1002       outer_state->ehp_region = this_state->cur_region;
1003       lower_eh_constructs_1 (outer_state, &finally);
1004       outer_state->ehp_region = save_ehp;
1005     }
1006   else
1007     {
1008       /* First check for nothing to do.  */
1009       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1010 	return;
1011       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1012       if (actions == NULL)
1013 	return;
1014 
1015       if (this_state)
1016 	finally = lower_try_finally_dup_block (finally, outer_state,
1017 	  gimple_location (tf->try_finally_expr));
1018 
1019       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1020 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1021 	 to be in an enclosing scope, but needs to be implemented at this level
1022 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1023 	 cp/decl.c).  Since it's logically at an outer level, we should call
1024 	 terminate before we get to it, so strip it away before adding the
1025 	 MUST_NOT_THROW filter.  */
1026       gimple_stmt_iterator gsi = gsi_start (finally);
1027       gimple *x = gsi_stmt (gsi);
1028       if (gimple_code (x) == GIMPLE_TRY
1029 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1030 	  && gimple_try_catch_is_cleanup (x))
1031 	{
1032 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1033 	  gsi_remove (&gsi, false);
1034 	}
1035 
1036       /* Wrap the block with protect_cleanup_actions as the action.  */
1037       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1038       gtry *try_stmt = gimple_build_try (finally,
1039 					 gimple_seq_alloc_with_stmt (eh_mnt),
1040 					 GIMPLE_TRY_CATCH);
1041       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1042     }
1043 
1044   /* Drop all of this into the exception sequence.  */
1045   emit_post_landing_pad (&eh_seq, tf->region);
1046   gimple_seq_add_seq (&eh_seq, finally);
1047   if (gimple_seq_may_fallthru (finally))
1048     emit_resx (&eh_seq, tf->region);
1049 
1050   /* Having now been handled, EH isn't to be considered with
1051      the rest of the outgoing edges.  */
1052   tf->may_throw = false;
1053 }
1054 
1055 /* A subroutine of lower_try_finally.  We have determined that there is
1056    no fallthru edge out of the finally block.  This means that there is
1057    no outgoing edge corresponding to any incoming edge.  Restructure the
1058    try_finally node for this special case.  */
1059 
1060 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1061 lower_try_finally_nofallthru (struct leh_state *state,
1062 			      struct leh_tf_state *tf)
1063 {
1064   tree lab;
1065   gimple *x;
1066   geh_else *eh_else;
1067   gimple_seq finally;
1068   struct goto_queue_node *q, *qe;
1069 
1070   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1071 
1072   /* We expect that tf->top_p is a GIMPLE_TRY. */
1073   finally = gimple_try_cleanup (tf->top_p);
1074   tf->top_p_seq = gimple_try_eval (tf->top_p);
1075 
1076   x = gimple_build_label (lab);
1077   gimple_seq_add_stmt (&tf->top_p_seq, x);
1078 
1079   q = tf->goto_queue;
1080   qe = q + tf->goto_queue_active;
1081   for (; q < qe; ++q)
1082     if (q->index < 0)
1083       do_return_redirection (q, lab, NULL);
1084     else
1085       do_goto_redirection (q, lab, NULL, tf);
1086 
1087   replace_goto_queue (tf);
1088 
1089   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1090   eh_else = get_eh_else (finally);
1091   if (eh_else)
1092     {
1093       finally = gimple_eh_else_n_body (eh_else);
1094       lower_eh_constructs_1 (state, &finally);
1095       gimple_seq_add_seq (&tf->top_p_seq, finally);
1096 
1097       if (tf->may_throw)
1098 	{
1099 	  finally = gimple_eh_else_e_body (eh_else);
1100 	  lower_eh_constructs_1 (state, &finally);
1101 
1102 	  emit_post_landing_pad (&eh_seq, tf->region);
1103 	  gimple_seq_add_seq (&eh_seq, finally);
1104 	}
1105     }
1106   else
1107     {
1108       lower_eh_constructs_1 (state, &finally);
1109       gimple_seq_add_seq (&tf->top_p_seq, finally);
1110 
1111       if (tf->may_throw)
1112 	{
1113 	  emit_post_landing_pad (&eh_seq, tf->region);
1114 
1115 	  x = gimple_build_goto (lab);
1116 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1117 	  gimple_seq_add_stmt (&eh_seq, x);
1118 	}
1119     }
1120 }
1121 
1122 /* A subroutine of lower_try_finally.  We have determined that there is
1123    exactly one destination of the finally block.  Restructure the
1124    try_finally node for this special case.  */
1125 
1126 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1127 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1128 {
1129   struct goto_queue_node *q, *qe;
1130   geh_else *eh_else;
1131   glabel *label_stmt;
1132   gimple *x;
1133   gimple_seq finally;
1134   gimple_stmt_iterator gsi;
1135   tree finally_label;
1136   location_t loc = gimple_location (tf->try_finally_expr);
1137 
1138   finally = gimple_try_cleanup (tf->top_p);
1139   tf->top_p_seq = gimple_try_eval (tf->top_p);
1140 
1141   /* Since there's only one destination, and the destination edge can only
1142      either be EH or non-EH, that implies that all of our incoming edges
1143      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1144   eh_else = get_eh_else (finally);
1145   if (eh_else)
1146     {
1147       if (tf->may_throw)
1148 	finally = gimple_eh_else_e_body (eh_else);
1149       else
1150 	finally = gimple_eh_else_n_body (eh_else);
1151     }
1152 
1153   lower_eh_constructs_1 (state, &finally);
1154 
1155   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1156     {
1157       gimple *stmt = gsi_stmt (gsi);
1158       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1159 	{
1160 	  tree block = gimple_block (stmt);
1161 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1162 	  gimple_set_block (stmt, block);
1163 	}
1164     }
1165 
1166   if (tf->may_throw)
1167     {
1168       /* Only reachable via the exception edge.  Add the given label to
1169          the head of the FINALLY block.  Append a RESX at the end.  */
1170       emit_post_landing_pad (&eh_seq, tf->region);
1171       gimple_seq_add_seq (&eh_seq, finally);
1172       emit_resx (&eh_seq, tf->region);
1173       return;
1174     }
1175 
1176   if (tf->may_fallthru)
1177     {
1178       /* Only reachable via the fallthru edge.  Do nothing but let
1179 	 the two blocks run together; we'll fall out the bottom.  */
1180       gimple_seq_add_seq (&tf->top_p_seq, finally);
1181       return;
1182     }
1183 
1184   finally_label = create_artificial_label (loc);
1185   label_stmt = gimple_build_label (finally_label);
1186   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1187 
1188   gimple_seq_add_seq (&tf->top_p_seq, finally);
1189 
1190   q = tf->goto_queue;
1191   qe = q + tf->goto_queue_active;
1192 
1193   if (tf->may_return)
1194     {
1195       /* Reachable by return expressions only.  Redirect them.  */
1196       for (; q < qe; ++q)
1197 	do_return_redirection (q, finally_label, NULL);
1198       replace_goto_queue (tf);
1199     }
1200   else
1201     {
1202       /* Reachable by goto expressions only.  Redirect them.  */
1203       for (; q < qe; ++q)
1204 	do_goto_redirection (q, finally_label, NULL, tf);
1205       replace_goto_queue (tf);
1206 
1207       if (tf->dest_array[0] == tf->fallthru_label)
1208 	{
1209 	  /* Reachable by goto to fallthru label only.  Redirect it
1210 	     to the new label (already created, sadly), and do not
1211 	     emit the final branch out, or the fallthru label.  */
1212 	  tf->fallthru_label = NULL;
1213 	  return;
1214 	}
1215     }
1216 
1217   /* Place the original return/goto to the original destination
1218      immediately after the finally block. */
1219   x = tf->goto_queue[0].cont_stmt;
1220   gimple_seq_add_stmt (&tf->top_p_seq, x);
1221   maybe_record_in_goto_queue (state, x);
1222 }
1223 
1224 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1225    and outgoing from the finally block.  Implement this by duplicating the
1226    finally block for every destination.  */
1227 
1228 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1229 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1230 {
1231   gimple_seq finally;
1232   gimple_seq new_stmt;
1233   gimple_seq seq;
1234   gimple *x;
1235   geh_else *eh_else;
1236   tree tmp;
1237   location_t tf_loc = gimple_location (tf->try_finally_expr);
1238 
1239   finally = gimple_try_cleanup (tf->top_p);
1240 
1241   /* Notice EH_ELSE, and simplify some of the remaining code
1242      by considering FINALLY to be the normal return path only.  */
1243   eh_else = get_eh_else (finally);
1244   if (eh_else)
1245     finally = gimple_eh_else_n_body (eh_else);
1246 
1247   tf->top_p_seq = gimple_try_eval (tf->top_p);
1248   new_stmt = NULL;
1249 
1250   if (tf->may_fallthru)
1251     {
1252       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1253       lower_eh_constructs_1 (state, &seq);
1254       gimple_seq_add_seq (&new_stmt, seq);
1255 
1256       tmp = lower_try_finally_fallthru_label (tf);
1257       x = gimple_build_goto (tmp);
1258       gimple_set_location (x, tf_loc);
1259       gimple_seq_add_stmt (&new_stmt, x);
1260     }
1261 
1262   if (tf->may_throw)
1263     {
1264       /* We don't need to copy the EH path of EH_ELSE,
1265 	 since it is only emitted once.  */
1266       if (eh_else)
1267 	seq = gimple_eh_else_e_body (eh_else);
1268       else
1269 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1270       lower_eh_constructs_1 (state, &seq);
1271 
1272       emit_post_landing_pad (&eh_seq, tf->region);
1273       gimple_seq_add_seq (&eh_seq, seq);
1274       emit_resx (&eh_seq, tf->region);
1275     }
1276 
1277   if (tf->goto_queue)
1278     {
1279       struct goto_queue_node *q, *qe;
1280       int return_index, index;
1281       struct labels_s
1282       {
1283 	struct goto_queue_node *q;
1284 	tree label;
1285       } *labels;
1286 
1287       return_index = tf->dest_array.length ();
1288       labels = XCNEWVEC (struct labels_s, return_index + 1);
1289 
1290       q = tf->goto_queue;
1291       qe = q + tf->goto_queue_active;
1292       for (; q < qe; q++)
1293 	{
1294 	  index = q->index < 0 ? return_index : q->index;
1295 
1296 	  if (!labels[index].q)
1297 	    labels[index].q = q;
1298 	}
1299 
1300       for (index = 0; index < return_index + 1; index++)
1301 	{
1302 	  tree lab;
1303 
1304 	  q = labels[index].q;
1305 	  if (! q)
1306 	    continue;
1307 
1308 	  lab = labels[index].label
1309 	    = create_artificial_label (tf_loc);
1310 
1311 	  if (index == return_index)
1312 	    do_return_redirection (q, lab, NULL);
1313 	  else
1314 	    do_goto_redirection (q, lab, NULL, tf);
1315 
1316 	  x = gimple_build_label (lab);
1317           gimple_seq_add_stmt (&new_stmt, x);
1318 
1319 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1320 	  lower_eh_constructs_1 (state, &seq);
1321           gimple_seq_add_seq (&new_stmt, seq);
1322 
1323           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1324 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1325 	}
1326 
1327       for (q = tf->goto_queue; q < qe; q++)
1328 	{
1329 	  tree lab;
1330 
1331 	  index = q->index < 0 ? return_index : q->index;
1332 
1333 	  if (labels[index].q == q)
1334 	    continue;
1335 
1336 	  lab = labels[index].label;
1337 
1338 	  if (index == return_index)
1339 	    do_return_redirection (q, lab, NULL);
1340 	  else
1341 	    do_goto_redirection (q, lab, NULL, tf);
1342 	}
1343 
1344       replace_goto_queue (tf);
1345       free (labels);
1346     }
1347 
1348   /* Need to link new stmts after running replace_goto_queue due
1349      to not wanting to process the same goto stmts twice.  */
1350   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1351 }
1352 
1353 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1354    and outgoing from the finally block.  Implement this by instrumenting
1355    each incoming edge and creating a switch statement at the end of the
1356    finally block that branches to the appropriate destination.  */
1357 
1358 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1359 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1360 {
1361   struct goto_queue_node *q, *qe;
1362   tree finally_tmp, finally_label;
1363   int return_index, eh_index, fallthru_index;
1364   int nlabels, ndests, j, last_case_index;
1365   tree last_case;
1366   auto_vec<tree> case_label_vec;
1367   gimple_seq switch_body = NULL;
1368   gimple *x;
1369   geh_else *eh_else;
1370   tree tmp;
1371   gimple *switch_stmt;
1372   gimple_seq finally;
1373   hash_map<tree, gimple *> *cont_map = NULL;
1374   /* The location of the TRY_FINALLY stmt.  */
1375   location_t tf_loc = gimple_location (tf->try_finally_expr);
1376   /* The location of the finally block.  */
1377   location_t finally_loc;
1378 
1379   finally = gimple_try_cleanup (tf->top_p);
1380   eh_else = get_eh_else (finally);
1381 
1382   /* Mash the TRY block to the head of the chain.  */
1383   tf->top_p_seq = gimple_try_eval (tf->top_p);
1384 
1385   /* The location of the finally is either the last stmt in the finally
1386      block or the location of the TRY_FINALLY itself.  */
1387   x = gimple_seq_last_stmt (finally);
1388   finally_loc = x ? gimple_location (x) : tf_loc;
1389 
1390   /* Prepare for switch statement generation.  */
1391   nlabels = tf->dest_array.length ();
1392   return_index = nlabels;
1393   eh_index = return_index + tf->may_return;
1394   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1395   ndests = fallthru_index + tf->may_fallthru;
1396 
1397   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1398   finally_label = create_artificial_label (finally_loc);
1399 
1400   /* We use vec::quick_push on case_label_vec throughout this function,
1401      since we know the size in advance and allocate precisely as muce
1402      space as needed.  */
1403   case_label_vec.create (ndests);
1404   last_case = NULL;
1405   last_case_index = 0;
1406 
1407   /* Begin inserting code for getting to the finally block.  Things
1408      are done in this order to correspond to the sequence the code is
1409      laid out.  */
1410 
1411   if (tf->may_fallthru)
1412     {
1413       x = gimple_build_assign (finally_tmp,
1414 			       build_int_cst (integer_type_node,
1415 					      fallthru_index));
1416       gimple_set_location (x, finally_loc);
1417       gimple_seq_add_stmt (&tf->top_p_seq, x);
1418 
1419       tmp = build_int_cst (integer_type_node, fallthru_index);
1420       last_case = build_case_label (tmp, NULL,
1421 				    create_artificial_label (finally_loc));
1422       case_label_vec.quick_push (last_case);
1423       last_case_index++;
1424 
1425       x = gimple_build_label (CASE_LABEL (last_case));
1426       gimple_seq_add_stmt (&switch_body, x);
1427 
1428       tmp = lower_try_finally_fallthru_label (tf);
1429       x = gimple_build_goto (tmp);
1430       gimple_set_location (x, finally_loc);
1431       gimple_seq_add_stmt (&switch_body, x);
1432     }
1433 
1434   /* For EH_ELSE, emit the exception path (plus resx) now, then
1435      subsequently we only need consider the normal path.  */
1436   if (eh_else)
1437     {
1438       if (tf->may_throw)
1439 	{
1440 	  finally = gimple_eh_else_e_body (eh_else);
1441 	  lower_eh_constructs_1 (state, &finally);
1442 
1443 	  emit_post_landing_pad (&eh_seq, tf->region);
1444 	  gimple_seq_add_seq (&eh_seq, finally);
1445 	  emit_resx (&eh_seq, tf->region);
1446 	}
1447 
1448       finally = gimple_eh_else_n_body (eh_else);
1449     }
1450   else if (tf->may_throw)
1451     {
1452       emit_post_landing_pad (&eh_seq, tf->region);
1453 
1454       x = gimple_build_assign (finally_tmp,
1455 			       build_int_cst (integer_type_node, eh_index));
1456       gimple_seq_add_stmt (&eh_seq, x);
1457 
1458       x = gimple_build_goto (finally_label);
1459       gimple_set_location (x, tf_loc);
1460       gimple_seq_add_stmt (&eh_seq, x);
1461 
1462       tmp = build_int_cst (integer_type_node, eh_index);
1463       last_case = build_case_label (tmp, NULL,
1464 				    create_artificial_label (tf_loc));
1465       case_label_vec.quick_push (last_case);
1466       last_case_index++;
1467 
1468       x = gimple_build_label (CASE_LABEL (last_case));
1469       gimple_seq_add_stmt (&eh_seq, x);
1470       emit_resx (&eh_seq, tf->region);
1471     }
1472 
1473   x = gimple_build_label (finally_label);
1474   gimple_seq_add_stmt (&tf->top_p_seq, x);
1475 
1476   lower_eh_constructs_1 (state, &finally);
1477   gimple_seq_add_seq (&tf->top_p_seq, finally);
1478 
1479   /* Redirect each incoming goto edge.  */
1480   q = tf->goto_queue;
1481   qe = q + tf->goto_queue_active;
1482   j = last_case_index + tf->may_return;
1483   /* Prepare the assignments to finally_tmp that are executed upon the
1484      entrance through a particular edge. */
1485   for (; q < qe; ++q)
1486     {
1487       gimple_seq mod = NULL;
1488       int switch_id;
1489       unsigned int case_index;
1490 
1491       if (q->index < 0)
1492 	{
1493 	  x = gimple_build_assign (finally_tmp,
1494 				   build_int_cst (integer_type_node,
1495 						  return_index));
1496 	  gimple_seq_add_stmt (&mod, x);
1497 	  do_return_redirection (q, finally_label, mod);
1498 	  switch_id = return_index;
1499 	}
1500       else
1501 	{
1502 	  x = gimple_build_assign (finally_tmp,
1503 				   build_int_cst (integer_type_node, q->index));
1504 	  gimple_seq_add_stmt (&mod, x);
1505 	  do_goto_redirection (q, finally_label, mod, tf);
1506 	  switch_id = q->index;
1507 	}
1508 
1509       case_index = j + q->index;
1510       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1511         {
1512           tree case_lab;
1513 	  tmp = build_int_cst (integer_type_node, switch_id);
1514           case_lab = build_case_label (tmp, NULL,
1515 				       create_artificial_label (tf_loc));
1516           /* We store the cont_stmt in the pointer map, so that we can recover
1517              it in the loop below.  */
1518           if (!cont_map)
1519 	    cont_map = new hash_map<tree, gimple *>;
1520           cont_map->put (case_lab, q->cont_stmt);
1521           case_label_vec.quick_push (case_lab);
1522         }
1523     }
1524   for (j = last_case_index; j < last_case_index + nlabels; j++)
1525     {
1526       gimple *cont_stmt;
1527 
1528       last_case = case_label_vec[j];
1529 
1530       gcc_assert (last_case);
1531       gcc_assert (cont_map);
1532 
1533       cont_stmt = *cont_map->get (last_case);
1534 
1535       x = gimple_build_label (CASE_LABEL (last_case));
1536       gimple_seq_add_stmt (&switch_body, x);
1537       gimple_seq_add_stmt (&switch_body, cont_stmt);
1538       maybe_record_in_goto_queue (state, cont_stmt);
1539     }
1540   if (cont_map)
1541     delete cont_map;
1542 
1543   replace_goto_queue (tf);
1544 
1545   /* Make sure that the last case is the default label, as one is required.
1546      Then sort the labels, which is also required in GIMPLE.  */
1547   CASE_LOW (last_case) = NULL;
1548   tree tem = case_label_vec.pop ();
1549   gcc_assert (tem == last_case);
1550   sort_case_labels (case_label_vec);
1551 
1552   /* Build the switch statement, setting last_case to be the default
1553      label.  */
1554   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1555 				     case_label_vec);
1556   gimple_set_location (switch_stmt, finally_loc);
1557 
1558   /* Need to link SWITCH_STMT after running replace_goto_queue
1559      due to not wanting to process the same goto stmts twice.  */
1560   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1561   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1562 }
1563 
1564 /* Decide whether or not we are going to duplicate the finally block.
1565    There are several considerations.
1566 
1567    Second, we'd like to prevent egregious code growth.  One way to
1568    do this is to estimate the size of the finally block, multiply
1569    that by the number of copies we'd need to make, and compare against
1570    the estimate of the size of the switch machinery we'd have to add.  */
1571 
1572 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1573 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1574 {
1575   int f_estimate, sw_estimate;
1576   geh_else *eh_else;
1577 
1578   /* If there's an EH_ELSE involved, the exception path is separate
1579      and really doesn't come into play for this computation.  */
1580   eh_else = get_eh_else (finally);
1581   if (eh_else)
1582     {
1583       ndests -= may_throw;
1584       finally = gimple_eh_else_n_body (eh_else);
1585     }
1586 
1587   if (!optimize)
1588     {
1589       gimple_stmt_iterator gsi;
1590 
1591       if (ndests == 1)
1592         return true;
1593 
1594       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1595 	{
1596 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1597 	     on the EH paths and, consequently, useless cleanups.  */
1598 	  gimple *stmt = gsi_stmt (gsi);
1599 	  if (!is_gimple_debug (stmt)
1600 	      && !gimple_clobber_p (stmt)
1601 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1602 	    return false;
1603 	}
1604       return true;
1605     }
1606 
1607   /* Finally estimate N times, plus N gotos.  */
1608   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1609   f_estimate = (f_estimate + 1) * ndests;
1610 
1611   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1612   sw_estimate = 10 + 2 * ndests;
1613 
1614   /* Optimize for size clearly wants our best guess.  */
1615   if (optimize_function_for_size_p (cfun))
1616     return f_estimate < sw_estimate;
1617 
1618   /* ??? These numbers are completely made up so far.  */
1619   if (optimize > 1)
1620     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1621   else
1622     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1623 }
1624 
1625 /* REG is current region of a LEH state.
1626    is the enclosing region for a possible cleanup region, or the region
1627    itself.  Returns TRUE if such a region would be unreachable.
1628 
1629    Cleanup regions within a must-not-throw region aren't actually reachable
1630    even if there are throwing stmts within them, because the personality
1631    routine will call terminate before unwinding.  */
1632 
1633 static bool
cleanup_is_dead_in(leh_state * state)1634 cleanup_is_dead_in (leh_state *state)
1635 {
1636   if (flag_checking)
1637     {
1638       eh_region reg = state->cur_region;
1639       while (reg && reg->type == ERT_CLEANUP)
1640 	reg = reg->outer;
1641 
1642       gcc_assert (reg == state->outer_non_cleanup);
1643     }
1644 
1645   eh_region reg = state->outer_non_cleanup;
1646   return (reg && reg->type == ERT_MUST_NOT_THROW);
1647 }
1648 
1649 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1650    to a sequence of labels and blocks, plus the exception region trees
1651    that record all the magic.  This is complicated by the need to
1652    arrange for the FINALLY block to be executed on all exits.  */
1653 
1654 static gimple_seq
lower_try_finally(struct leh_state * state,gtry * tp)1655 lower_try_finally (struct leh_state *state, gtry *tp)
1656 {
1657   struct leh_tf_state this_tf;
1658   struct leh_state this_state;
1659   int ndests;
1660   gimple_seq old_eh_seq;
1661 
1662   /* Process the try block.  */
1663 
1664   memset (&this_tf, 0, sizeof (this_tf));
1665   this_tf.try_finally_expr = tp;
1666   this_tf.top_p = tp;
1667   this_tf.outer = state;
1668   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state))
1669     {
1670       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1671       this_state.cur_region = this_tf.region;
1672     }
1673   else
1674     {
1675       this_tf.region = NULL;
1676       this_state.cur_region = state->cur_region;
1677     }
1678 
1679   this_state.outer_non_cleanup = state->outer_non_cleanup;
1680   this_state.ehp_region = state->ehp_region;
1681   this_state.tf = &this_tf;
1682 
1683   old_eh_seq = eh_seq;
1684   eh_seq = NULL;
1685 
1686   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1687 
1688   /* Determine if the try block is escaped through the bottom.  */
1689   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1690 
1691   /* Determine if any exceptions are possible within the try block.  */
1692   if (this_tf.region)
1693     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1694   if (this_tf.may_throw)
1695     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1696 
1697   /* Determine how many edges (still) reach the finally block.  Or rather,
1698      how many destinations are reached by the finally block.  Use this to
1699      determine how we process the finally block itself.  */
1700 
1701   ndests = this_tf.dest_array.length ();
1702   ndests += this_tf.may_fallthru;
1703   ndests += this_tf.may_return;
1704   ndests += this_tf.may_throw;
1705 
1706   /* If the FINALLY block is not reachable, dike it out.  */
1707   if (ndests == 0)
1708     {
1709       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1710       gimple_try_set_cleanup (tp, NULL);
1711     }
1712   /* If the finally block doesn't fall through, then any destination
1713      we might try to impose there isn't reached either.  There may be
1714      some minor amount of cleanup and redirection still needed.  */
1715   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1716     lower_try_finally_nofallthru (state, &this_tf);
1717 
1718   /* We can easily special-case redirection to a single destination.  */
1719   else if (ndests == 1)
1720     lower_try_finally_onedest (state, &this_tf);
1721   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1722 				    gimple_try_cleanup (tp)))
1723     lower_try_finally_copy (state, &this_tf);
1724   else
1725     lower_try_finally_switch (state, &this_tf);
1726 
1727   /* If someone requested we add a label at the end of the transformed
1728      block, do so.  */
1729   if (this_tf.fallthru_label)
1730     {
1731       /* This must be reached only if ndests == 0. */
1732       gimple *x = gimple_build_label (this_tf.fallthru_label);
1733       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1734     }
1735 
1736   this_tf.dest_array.release ();
1737   free (this_tf.goto_queue);
1738   if (this_tf.goto_queue_map)
1739     delete this_tf.goto_queue_map;
1740 
1741   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1742      If there was no old eh_seq, then the append is trivially already done.  */
1743   if (old_eh_seq)
1744     {
1745       if (eh_seq == NULL)
1746 	eh_seq = old_eh_seq;
1747       else
1748 	{
1749 	  gimple_seq new_eh_seq = eh_seq;
1750 	  eh_seq = old_eh_seq;
1751 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1752 	}
1753     }
1754 
1755   return this_tf.top_p_seq;
1756 }
1757 
1758 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1759    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1760    exception region trees that records all the magic.  */
1761 
1762 static gimple_seq
lower_catch(struct leh_state * state,gtry * tp)1763 lower_catch (struct leh_state *state, gtry *tp)
1764 {
1765   eh_region try_region = NULL;
1766   struct leh_state this_state = *state;
1767   gimple_stmt_iterator gsi;
1768   tree out_label;
1769   gimple_seq new_seq, cleanup;
1770   gimple *x;
1771   geh_dispatch *eh_dispatch;
1772   location_t try_catch_loc = gimple_location (tp);
1773   location_t catch_loc = UNKNOWN_LOCATION;
1774 
1775   if (flag_exceptions)
1776     {
1777       try_region = gen_eh_region_try (state->cur_region);
1778       this_state.cur_region = try_region;
1779       this_state.outer_non_cleanup = this_state.cur_region;
1780     }
1781 
1782   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1783 
1784   if (!eh_region_may_contain_throw (try_region))
1785     return gimple_try_eval (tp);
1786 
1787   new_seq = NULL;
1788   eh_dispatch = gimple_build_eh_dispatch (try_region->index);
1789   gimple_seq_add_stmt (&new_seq, eh_dispatch);
1790   emit_resx (&new_seq, try_region);
1791 
1792   this_state.cur_region = state->cur_region;
1793   this_state.outer_non_cleanup = state->outer_non_cleanup;
1794   this_state.ehp_region = try_region;
1795 
1796   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1797      itself, so that e.g. for coverage purposes the nested cleanups don't
1798      appear before the cleanup body.  See PR64634 for details.  */
1799   gimple_seq old_eh_seq = eh_seq;
1800   eh_seq = NULL;
1801 
1802   out_label = NULL;
1803   cleanup = gimple_try_cleanup (tp);
1804   for (gsi = gsi_start (cleanup);
1805        !gsi_end_p (gsi);
1806        gsi_next (&gsi))
1807     {
1808       eh_catch c;
1809       gcatch *catch_stmt;
1810       gimple_seq handler;
1811 
1812       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1813       if (catch_loc == UNKNOWN_LOCATION)
1814 	catch_loc = gimple_location (catch_stmt);
1815       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1816 
1817       handler = gimple_catch_handler (catch_stmt);
1818       lower_eh_constructs_1 (&this_state, &handler);
1819 
1820       c->label = create_artificial_label (UNKNOWN_LOCATION);
1821       x = gimple_build_label (c->label);
1822       gimple_seq_add_stmt (&new_seq, x);
1823 
1824       gimple_seq_add_seq (&new_seq, handler);
1825 
1826       if (gimple_seq_may_fallthru (new_seq))
1827 	{
1828 	  if (!out_label)
1829 	    out_label = create_artificial_label (try_catch_loc);
1830 
1831 	  x = gimple_build_goto (out_label);
1832 	  gimple_seq_add_stmt (&new_seq, x);
1833 	}
1834       if (!c->type_list)
1835 	break;
1836     }
1837 
1838   /* Try to set a location on the dispatching construct to avoid inheriting
1839      the location of the previous statement.  */
1840   gimple_set_location (eh_dispatch, catch_loc);
1841 
1842   gimple_try_set_cleanup (tp, new_seq);
1843 
1844   gimple_seq new_eh_seq = eh_seq;
1845   eh_seq = old_eh_seq;
1846   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1847   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1848   return ret_seq;
1849 }
1850 
1851 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1852    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1853    region trees that record all the magic.  */
1854 
1855 static gimple_seq
lower_eh_filter(struct leh_state * state,gtry * tp)1856 lower_eh_filter (struct leh_state *state, gtry *tp)
1857 {
1858   struct leh_state this_state = *state;
1859   eh_region this_region = NULL;
1860   gimple *inner, *x;
1861   gimple_seq new_seq;
1862 
1863   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1864 
1865   if (flag_exceptions)
1866     {
1867       this_region = gen_eh_region_allowed (state->cur_region,
1868 				           gimple_eh_filter_types (inner));
1869       this_state.cur_region = this_region;
1870       this_state.outer_non_cleanup = this_state.cur_region;
1871     }
1872 
1873   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1874 
1875   if (!eh_region_may_contain_throw (this_region))
1876     return gimple_try_eval (tp);
1877 
1878   this_state.cur_region = state->cur_region;
1879   this_state.ehp_region = this_region;
1880 
1881   new_seq = NULL;
1882   x = gimple_build_eh_dispatch (this_region->index);
1883   gimple_set_location (x, gimple_location (tp));
1884   gimple_seq_add_stmt (&new_seq, x);
1885   emit_resx (&new_seq, this_region);
1886 
1887   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1888   x = gimple_build_label (this_region->u.allowed.label);
1889   gimple_seq_add_stmt (&new_seq, x);
1890 
1891   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1892   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1893 
1894   gimple_try_set_cleanup (tp, new_seq);
1895 
1896   return frob_into_branch_around (tp, this_region, NULL);
1897 }
1898 
1899 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1900    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1901    plus the exception region trees that record all the magic.  */
1902 
1903 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gtry * tp)1904 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1905 {
1906   struct leh_state this_state = *state;
1907 
1908   if (flag_exceptions)
1909     {
1910       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1911       eh_region this_region;
1912 
1913       this_region = gen_eh_region_must_not_throw (state->cur_region);
1914       this_region->u.must_not_throw.failure_decl
1915 	= gimple_eh_must_not_throw_fndecl (
1916 	    as_a <geh_mnt *> (inner));
1917       this_region->u.must_not_throw.failure_loc
1918 	= LOCATION_LOCUS (gimple_location (tp));
1919 
1920       /* In order to get mangling applied to this decl, we must mark it
1921 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1922 	 needs to happen.  */
1923       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1924 
1925       this_state.cur_region = this_region;
1926       this_state.outer_non_cleanup = this_state.cur_region;
1927     }
1928 
1929   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1930 
1931   return gimple_try_eval (tp);
1932 }
1933 
1934 /* Implement a cleanup expression.  This is similar to try-finally,
1935    except that we only execute the cleanup block for exception edges.  */
1936 
1937 static gimple_seq
lower_cleanup(struct leh_state * state,gtry * tp)1938 lower_cleanup (struct leh_state *state, gtry *tp)
1939 {
1940   struct leh_state this_state = *state;
1941   eh_region this_region = NULL;
1942   struct leh_tf_state fake_tf;
1943   gimple_seq result;
1944   bool cleanup_dead = cleanup_is_dead_in (state);
1945 
1946   if (flag_exceptions && !cleanup_dead)
1947     {
1948       this_region = gen_eh_region_cleanup (state->cur_region);
1949       this_state.cur_region = this_region;
1950       this_state.outer_non_cleanup = state->outer_non_cleanup;
1951     }
1952 
1953   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1954 
1955   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1956     return gimple_try_eval (tp);
1957 
1958   /* Build enough of a try-finally state so that we can reuse
1959      honor_protect_cleanup_actions.  */
1960   memset (&fake_tf, 0, sizeof (fake_tf));
1961   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1962   fake_tf.outer = state;
1963   fake_tf.region = this_region;
1964   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1965   fake_tf.may_throw = true;
1966 
1967   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1968 
1969   if (fake_tf.may_throw)
1970     {
1971       /* In this case honor_protect_cleanup_actions had nothing to do,
1972 	 and we should process this normally.  */
1973       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1974       result = frob_into_branch_around (tp, this_region,
1975                                         fake_tf.fallthru_label);
1976     }
1977   else
1978     {
1979       /* In this case honor_protect_cleanup_actions did nearly all of
1980 	 the work.  All we have left is to append the fallthru_label.  */
1981 
1982       result = gimple_try_eval (tp);
1983       if (fake_tf.fallthru_label)
1984 	{
1985 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1986 	  gimple_seq_add_stmt (&result, x);
1987 	}
1988     }
1989   return result;
1990 }
1991 
1992 /* Main loop for lowering eh constructs. Also moves gsi to the next
1993    statement. */
1994 
1995 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)1996 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1997 {
1998   gimple_seq replace;
1999   gimple *x;
2000   gimple *stmt = gsi_stmt (*gsi);
2001 
2002   switch (gimple_code (stmt))
2003     {
2004     case GIMPLE_CALL:
2005       {
2006 	tree fndecl = gimple_call_fndecl (stmt);
2007 	tree rhs, lhs;
2008 
2009 	if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
2010 	  switch (DECL_FUNCTION_CODE (fndecl))
2011 	    {
2012 	    case BUILT_IN_EH_POINTER:
2013 	      /* The front end may have generated a call to
2014 		 __builtin_eh_pointer (0) within a catch region.  Replace
2015 		 this zero argument with the current catch region number.  */
2016 	      if (state->ehp_region)
2017 		{
2018 		  tree nr = build_int_cst (integer_type_node,
2019 					   state->ehp_region->index);
2020 		  gimple_call_set_arg (stmt, 0, nr);
2021 		}
2022 	      else
2023 		{
2024 		  /* The user has dome something silly.  Remove it.  */
2025 		  rhs = null_pointer_node;
2026 		  goto do_replace;
2027 		}
2028 	      break;
2029 
2030 	    case BUILT_IN_EH_FILTER:
2031 	      /* ??? This should never appear, but since it's a builtin it
2032 		 is accessible to abuse by users.  Just remove it and
2033 		 replace the use with the arbitrary value zero.  */
2034 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2035 	    do_replace:
2036 	      lhs = gimple_call_lhs (stmt);
2037 	      x = gimple_build_assign (lhs, rhs);
2038 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2039 	      /* FALLTHRU */
2040 
2041 	    case BUILT_IN_EH_COPY_VALUES:
2042 	      /* Likewise this should not appear.  Remove it.  */
2043 	      gsi_remove (gsi, true);
2044 	      return;
2045 
2046 	    default:
2047 	      break;
2048 	    }
2049       }
2050       /* FALLTHRU */
2051 
2052     case GIMPLE_ASSIGN:
2053       /* If the stmt can throw, use a new temporary for the assignment
2054          to a LHS.  This makes sure the old value of the LHS is
2055 	 available on the EH edge.  Only do so for statements that
2056 	 potentially fall through (no noreturn calls e.g.), otherwise
2057 	 this new assignment might create fake fallthru regions.  */
2058       if (stmt_could_throw_p (cfun, stmt)
2059 	  && gimple_has_lhs (stmt)
2060 	  && gimple_stmt_may_fallthru (stmt)
2061 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2062 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2063 	{
2064 	  tree lhs = gimple_get_lhs (stmt);
2065 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2066 	  gimple *s = gimple_build_assign (lhs, tmp);
2067 	  gimple_set_location (s, gimple_location (stmt));
2068 	  gimple_set_block (s, gimple_block (stmt));
2069 	  gimple_set_lhs (stmt, tmp);
2070 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2071 	}
2072       /* Look for things that can throw exceptions, and record them.  */
2073       if (state->cur_region && stmt_could_throw_p (cfun, stmt))
2074 	{
2075 	  record_stmt_eh_region (state->cur_region, stmt);
2076 	  note_eh_region_may_contain_throw (state->cur_region);
2077 	}
2078       break;
2079 
2080     case GIMPLE_COND:
2081     case GIMPLE_GOTO:
2082     case GIMPLE_RETURN:
2083       maybe_record_in_goto_queue (state, stmt);
2084       break;
2085 
2086     case GIMPLE_SWITCH:
2087       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2088       break;
2089 
2090     case GIMPLE_TRY:
2091       {
2092 	gtry *try_stmt = as_a <gtry *> (stmt);
2093 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2094 	  replace = lower_try_finally (state, try_stmt);
2095 	else
2096 	  {
2097 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2098 	    if (!x)
2099 	      {
2100 		replace = gimple_try_eval (try_stmt);
2101 		lower_eh_constructs_1 (state, &replace);
2102 	      }
2103 	    else
2104 	      switch (gimple_code (x))
2105 		{
2106 		case GIMPLE_CATCH:
2107 		  replace = lower_catch (state, try_stmt);
2108 		  break;
2109 		case GIMPLE_EH_FILTER:
2110 		  replace = lower_eh_filter (state, try_stmt);
2111 		  break;
2112 		case GIMPLE_EH_MUST_NOT_THROW:
2113 		  replace = lower_eh_must_not_throw (state, try_stmt);
2114 		  break;
2115 		case GIMPLE_EH_ELSE:
2116 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2117 		  gcc_unreachable ();
2118 		default:
2119 		  replace = lower_cleanup (state, try_stmt);
2120 		  break;
2121 		}
2122 	  }
2123       }
2124 
2125       /* Remove the old stmt and insert the transformed sequence
2126 	 instead. */
2127       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2128       gsi_remove (gsi, true);
2129 
2130       /* Return since we don't want gsi_next () */
2131       return;
2132 
2133     case GIMPLE_EH_ELSE:
2134       /* We should be eliminating this in lower_try_finally et al.  */
2135       gcc_unreachable ();
2136 
2137     default:
2138       /* A type, a decl, or some kind of statement that we're not
2139 	 interested in.  Don't walk them.  */
2140       break;
2141     }
2142 
2143   gsi_next (gsi);
2144 }
2145 
2146 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2147 
2148 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2149 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2150 {
2151   gimple_stmt_iterator gsi;
2152   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2153     lower_eh_constructs_2 (state, &gsi);
2154 }
2155 
2156 namespace {
2157 
2158 const pass_data pass_data_lower_eh =
2159 {
2160   GIMPLE_PASS, /* type */
2161   "eh", /* name */
2162   OPTGROUP_NONE, /* optinfo_flags */
2163   TV_TREE_EH, /* tv_id */
2164   PROP_gimple_lcf, /* properties_required */
2165   PROP_gimple_leh, /* properties_provided */
2166   0, /* properties_destroyed */
2167   0, /* todo_flags_start */
2168   0, /* todo_flags_finish */
2169 };
2170 
2171 class pass_lower_eh : public gimple_opt_pass
2172 {
2173 public:
pass_lower_eh(gcc::context * ctxt)2174   pass_lower_eh (gcc::context *ctxt)
2175     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2176   {}
2177 
2178   /* opt_pass methods: */
2179   virtual unsigned int execute (function *);
2180 
2181 }; // class pass_lower_eh
2182 
2183 unsigned int
execute(function * fun)2184 pass_lower_eh::execute (function *fun)
2185 {
2186   struct leh_state null_state;
2187   gimple_seq bodyp;
2188 
2189   bodyp = gimple_body (current_function_decl);
2190   if (bodyp == NULL)
2191     return 0;
2192 
2193   finally_tree = new hash_table<finally_tree_hasher> (31);
2194   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2195   memset (&null_state, 0, sizeof (null_state));
2196 
2197   collect_finally_tree_1 (bodyp, NULL);
2198   lower_eh_constructs_1 (&null_state, &bodyp);
2199   gimple_set_body (current_function_decl, bodyp);
2200 
2201   /* We assume there's a return statement, or something, at the end of
2202      the function, and thus ploping the EH sequence afterward won't
2203      change anything.  */
2204   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2205   gimple_seq_add_seq (&bodyp, eh_seq);
2206 
2207   /* We assume that since BODYP already existed, adding EH_SEQ to it
2208      didn't change its value, and we don't have to re-set the function.  */
2209   gcc_assert (bodyp == gimple_body (current_function_decl));
2210 
2211   delete finally_tree;
2212   finally_tree = NULL;
2213   BITMAP_FREE (eh_region_may_contain_throw_map);
2214   eh_seq = NULL;
2215 
2216   /* If this function needs a language specific EH personality routine
2217      and the frontend didn't already set one do so now.  */
2218   if (function_needs_eh_personality (fun) == eh_personality_lang
2219       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2220     DECL_FUNCTION_PERSONALITY (current_function_decl)
2221       = lang_hooks.eh_personality ();
2222 
2223   return 0;
2224 }
2225 
2226 } // anon namespace
2227 
2228 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2229 make_pass_lower_eh (gcc::context *ctxt)
2230 {
2231   return new pass_lower_eh (ctxt);
2232 }
2233 
2234 /* Create the multiple edges from an EH_DISPATCH statement to all of
2235    the possible handlers for its EH region.  Return true if there's
2236    no fallthru edge; false if there is.  */
2237 
2238 bool
make_eh_dispatch_edges(geh_dispatch * stmt)2239 make_eh_dispatch_edges (geh_dispatch *stmt)
2240 {
2241   eh_region r;
2242   eh_catch c;
2243   basic_block src, dst;
2244 
2245   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2246   src = gimple_bb (stmt);
2247 
2248   switch (r->type)
2249     {
2250     case ERT_TRY:
2251       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2252 	{
2253 	  dst = label_to_block (cfun, c->label);
2254 	  make_edge (src, dst, 0);
2255 
2256 	  /* A catch-all handler doesn't have a fallthru.  */
2257 	  if (c->type_list == NULL)
2258 	    return false;
2259 	}
2260       break;
2261 
2262     case ERT_ALLOWED_EXCEPTIONS:
2263       dst = label_to_block (cfun, r->u.allowed.label);
2264       make_edge (src, dst, 0);
2265       break;
2266 
2267     default:
2268       gcc_unreachable ();
2269     }
2270 
2271   return true;
2272 }
2273 
2274 /* Create the single EH edge from STMT to its nearest landing pad,
2275    if there is such a landing pad within the current function.  */
2276 
2277 void
make_eh_edges(gimple * stmt)2278 make_eh_edges (gimple *stmt)
2279 {
2280   basic_block src, dst;
2281   eh_landing_pad lp;
2282   int lp_nr;
2283 
2284   lp_nr = lookup_stmt_eh_lp (stmt);
2285   if (lp_nr <= 0)
2286     return;
2287 
2288   lp = get_eh_landing_pad_from_number (lp_nr);
2289   gcc_assert (lp != NULL);
2290 
2291   src = gimple_bb (stmt);
2292   dst = label_to_block (cfun, lp->post_landing_pad);
2293   make_edge (src, dst, EDGE_EH);
2294 }
2295 
2296 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2297    do not actually perform the final edge redirection.
2298 
2299    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2300    we intend to change the destination EH region as well; this means
2301    EH_LANDING_PAD_NR must already be set on the destination block label.
2302    If false, we're being called from generic cfg manipulation code and we
2303    should preserve our place within the region tree.  */
2304 
2305 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2306 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2307 {
2308   eh_landing_pad old_lp, new_lp;
2309   basic_block old_bb;
2310   gimple *throw_stmt;
2311   int old_lp_nr, new_lp_nr;
2312   tree old_label, new_label;
2313   edge_iterator ei;
2314   edge e;
2315 
2316   old_bb = edge_in->dest;
2317   old_label = gimple_block_label (old_bb);
2318   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2319   gcc_assert (old_lp_nr > 0);
2320   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2321 
2322   throw_stmt = last_stmt (edge_in->src);
2323   gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2324 
2325   new_label = gimple_block_label (new_bb);
2326 
2327   /* Look for an existing region that might be using NEW_BB already.  */
2328   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2329   if (new_lp_nr)
2330     {
2331       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2332       gcc_assert (new_lp);
2333 
2334       /* Unless CHANGE_REGION is true, the new and old landing pad
2335 	 had better be associated with the same EH region.  */
2336       gcc_assert (change_region || new_lp->region == old_lp->region);
2337     }
2338   else
2339     {
2340       new_lp = NULL;
2341       gcc_assert (!change_region);
2342     }
2343 
2344   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2345   FOR_EACH_EDGE (e, ei, old_bb->preds)
2346     if (e != edge_in && (e->flags & EDGE_EH))
2347       break;
2348 
2349   if (new_lp)
2350     {
2351       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2352 	 there's nothing to do with the EH tree.  If there are no more
2353 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2354 	 If CHANGE_REGION is true, then our caller is expecting to remove
2355 	 the landing pad.  */
2356       if (e == NULL && !change_region)
2357 	remove_eh_landing_pad (old_lp);
2358     }
2359   else
2360     {
2361       /* No correct landing pad exists.  If there are no more edges
2362 	 into OLD_LP, then we can simply re-use the existing landing pad.
2363 	 Otherwise, we have to create a new landing pad.  */
2364       if (e == NULL)
2365 	{
2366 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2367 	  new_lp = old_lp;
2368 	}
2369       else
2370 	new_lp = gen_eh_landing_pad (old_lp->region);
2371       new_lp->post_landing_pad = new_label;
2372       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2373     }
2374 
2375   /* Maybe move the throwing statement to the new region.  */
2376   if (old_lp != new_lp)
2377     {
2378       remove_stmt_from_eh_lp (throw_stmt);
2379       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2380     }
2381 }
2382 
2383 /* Redirect EH edge E to NEW_BB.  */
2384 
2385 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2386 redirect_eh_edge (edge edge_in, basic_block new_bb)
2387 {
2388   redirect_eh_edge_1 (edge_in, new_bb, false);
2389   return ssa_redirect_edge (edge_in, new_bb);
2390 }
2391 
2392 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2393    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2394    The actual edge update will happen in the caller.  */
2395 
2396 void
redirect_eh_dispatch_edge(geh_dispatch * stmt,edge e,basic_block new_bb)2397 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2398 {
2399   tree new_lab = gimple_block_label (new_bb);
2400   bool any_changed = false;
2401   basic_block old_bb;
2402   eh_region r;
2403   eh_catch c;
2404 
2405   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2406   switch (r->type)
2407     {
2408     case ERT_TRY:
2409       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2410 	{
2411 	  old_bb = label_to_block (cfun, c->label);
2412 	  if (old_bb == e->dest)
2413 	    {
2414 	      c->label = new_lab;
2415 	      any_changed = true;
2416 	    }
2417 	}
2418       break;
2419 
2420     case ERT_ALLOWED_EXCEPTIONS:
2421       old_bb = label_to_block (cfun, r->u.allowed.label);
2422       gcc_assert (old_bb == e->dest);
2423       r->u.allowed.label = new_lab;
2424       any_changed = true;
2425       break;
2426 
2427     default:
2428       gcc_unreachable ();
2429     }
2430 
2431   gcc_assert (any_changed);
2432 }
2433 
2434 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2435 
2436 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2437 operation_could_trap_helper_p (enum tree_code op,
2438 			       bool fp_operation,
2439 			       bool honor_trapv,
2440 			       bool honor_nans,
2441 			       bool honor_snans,
2442 			       tree divisor,
2443 			       bool *handled)
2444 {
2445   *handled = true;
2446   switch (op)
2447     {
2448     case TRUNC_DIV_EXPR:
2449     case CEIL_DIV_EXPR:
2450     case FLOOR_DIV_EXPR:
2451     case ROUND_DIV_EXPR:
2452     case EXACT_DIV_EXPR:
2453     case CEIL_MOD_EXPR:
2454     case FLOOR_MOD_EXPR:
2455     case ROUND_MOD_EXPR:
2456     case TRUNC_MOD_EXPR:
2457     case RDIV_EXPR:
2458       if (honor_snans)
2459 	return true;
2460       if (fp_operation)
2461 	return flag_trapping_math;
2462       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2463         return true;
2464       return false;
2465 
2466     case LT_EXPR:
2467     case LE_EXPR:
2468     case GT_EXPR:
2469     case GE_EXPR:
2470     case LTGT_EXPR:
2471       /* Some floating point comparisons may trap.  */
2472       return honor_nans;
2473 
2474     case EQ_EXPR:
2475     case NE_EXPR:
2476     case UNORDERED_EXPR:
2477     case ORDERED_EXPR:
2478     case UNLT_EXPR:
2479     case UNLE_EXPR:
2480     case UNGT_EXPR:
2481     case UNGE_EXPR:
2482     case UNEQ_EXPR:
2483       return honor_snans;
2484 
2485     case NEGATE_EXPR:
2486     case ABS_EXPR:
2487     case CONJ_EXPR:
2488       /* These operations don't trap with floating point.  */
2489       if (honor_trapv)
2490 	return true;
2491       return false;
2492 
2493     case ABSU_EXPR:
2494       /* ABSU_EXPR never traps.  */
2495       return false;
2496 
2497     case PLUS_EXPR:
2498     case MINUS_EXPR:
2499     case MULT_EXPR:
2500       /* Any floating arithmetic may trap.  */
2501       if (fp_operation && flag_trapping_math)
2502 	return true;
2503       if (honor_trapv)
2504 	return true;
2505       return false;
2506 
2507     case COMPLEX_EXPR:
2508     case CONSTRUCTOR:
2509       /* Constructing an object cannot trap.  */
2510       return false;
2511 
2512     case COND_EXPR:
2513     case VEC_COND_EXPR:
2514       /* Whether *COND_EXPR can trap depends on whether the
2515 	 first argument can trap, so signal it as not handled.
2516 	 Whether lhs is floating or not doesn't matter.  */
2517       *handled = false;
2518       return false;
2519 
2520     default:
2521       /* Any floating arithmetic may trap.  */
2522       if (fp_operation && flag_trapping_math)
2523 	return true;
2524 
2525       *handled = false;
2526       return false;
2527     }
2528 }
2529 
2530 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2531    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2532    type operands that may trap.  If OP is a division operator, DIVISOR contains
2533    the value of the divisor.  */
2534 
2535 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2536 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2537 			tree divisor)
2538 {
2539   bool honor_nans = (fp_operation && flag_trapping_math
2540 		     && !flag_finite_math_only);
2541   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2542   bool handled;
2543 
2544   /* This function cannot tell whether or not COND_EXPR and VEC_COND_EXPR could
2545      trap, because that depends on the respective condition op.  */
2546   gcc_assert (op != COND_EXPR && op != VEC_COND_EXPR);
2547 
2548   if (TREE_CODE_CLASS (op) != tcc_comparison
2549       && TREE_CODE_CLASS (op) != tcc_unary
2550       && TREE_CODE_CLASS (op) != tcc_binary)
2551     return false;
2552 
2553   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2554 					honor_nans, honor_snans, divisor,
2555 					&handled);
2556 }
2557 
2558 
2559 /* Returns true if it is possible to prove that the index of
2560    an array access REF (an ARRAY_REF expression) falls into the
2561    array bounds.  */
2562 
2563 static bool
in_array_bounds_p(tree ref)2564 in_array_bounds_p (tree ref)
2565 {
2566   tree idx = TREE_OPERAND (ref, 1);
2567   tree min, max;
2568 
2569   if (TREE_CODE (idx) != INTEGER_CST)
2570     return false;
2571 
2572   min = array_ref_low_bound (ref);
2573   max = array_ref_up_bound (ref);
2574   if (!min
2575       || !max
2576       || TREE_CODE (min) != INTEGER_CST
2577       || TREE_CODE (max) != INTEGER_CST)
2578     return false;
2579 
2580   if (tree_int_cst_lt (idx, min)
2581       || tree_int_cst_lt (max, idx))
2582     return false;
2583 
2584   return true;
2585 }
2586 
2587 /* Returns true if it is possible to prove that the range of
2588    an array access REF (an ARRAY_RANGE_REF expression) falls
2589    into the array bounds.  */
2590 
2591 static bool
range_in_array_bounds_p(tree ref)2592 range_in_array_bounds_p (tree ref)
2593 {
2594   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2595   tree range_min, range_max, min, max;
2596 
2597   range_min = TYPE_MIN_VALUE (domain_type);
2598   range_max = TYPE_MAX_VALUE (domain_type);
2599   if (!range_min
2600       || !range_max
2601       || TREE_CODE (range_min) != INTEGER_CST
2602       || TREE_CODE (range_max) != INTEGER_CST)
2603     return false;
2604 
2605   min = array_ref_low_bound (ref);
2606   max = array_ref_up_bound (ref);
2607   if (!min
2608       || !max
2609       || TREE_CODE (min) != INTEGER_CST
2610       || TREE_CODE (max) != INTEGER_CST)
2611     return false;
2612 
2613   if (tree_int_cst_lt (range_min, min)
2614       || tree_int_cst_lt (max, range_max))
2615     return false;
2616 
2617   return true;
2618 }
2619 
2620 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2621    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2622    This routine expects only GIMPLE lhs or rhs input.  */
2623 
2624 bool
tree_could_trap_p(tree expr)2625 tree_could_trap_p (tree expr)
2626 {
2627   enum tree_code code;
2628   bool fp_operation = false;
2629   bool honor_trapv = false;
2630   tree t, base, div = NULL_TREE;
2631 
2632   if (!expr)
2633     return false;
2634 
2635   /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but
2636      they won't appear as operands in GIMPLE form, so this is just for the
2637      GENERIC uses where it needs to recurse on the operands and so
2638      *COND_EXPR itself doesn't trap.  */
2639   if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR)
2640     return false;
2641 
2642   code = TREE_CODE (expr);
2643   t = TREE_TYPE (expr);
2644 
2645   if (t)
2646     {
2647       if (COMPARISON_CLASS_P (expr))
2648 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2649       else
2650 	fp_operation = FLOAT_TYPE_P (t);
2651       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2652     }
2653 
2654   if (TREE_CODE_CLASS (code) == tcc_binary)
2655     div = TREE_OPERAND (expr, 1);
2656   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2657     return true;
2658 
2659  restart:
2660   switch (code)
2661     {
2662     case COMPONENT_REF:
2663     case REALPART_EXPR:
2664     case IMAGPART_EXPR:
2665     case BIT_FIELD_REF:
2666     case VIEW_CONVERT_EXPR:
2667     case WITH_SIZE_EXPR:
2668       expr = TREE_OPERAND (expr, 0);
2669       code = TREE_CODE (expr);
2670       goto restart;
2671 
2672     case ARRAY_RANGE_REF:
2673       base = TREE_OPERAND (expr, 0);
2674       if (tree_could_trap_p (base))
2675 	return true;
2676       if (TREE_THIS_NOTRAP (expr))
2677 	return false;
2678       return !range_in_array_bounds_p (expr);
2679 
2680     case ARRAY_REF:
2681       base = TREE_OPERAND (expr, 0);
2682       if (tree_could_trap_p (base))
2683 	return true;
2684       if (TREE_THIS_NOTRAP (expr))
2685 	return false;
2686       return !in_array_bounds_p (expr);
2687 
2688     case TARGET_MEM_REF:
2689     case MEM_REF:
2690       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2691 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2692 	return true;
2693       if (TREE_THIS_NOTRAP (expr))
2694 	return false;
2695       /* We cannot prove that the access is in-bounds when we have
2696          variable-index TARGET_MEM_REFs.  */
2697       if (code == TARGET_MEM_REF
2698 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2699 	return true;
2700       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2701 	{
2702 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2703 	  poly_offset_int off = mem_ref_offset (expr);
2704 	  if (maybe_lt (off, 0))
2705 	    return true;
2706 	  if (TREE_CODE (base) == STRING_CST)
2707 	    return maybe_le (TREE_STRING_LENGTH (base), off);
2708 	  tree size = DECL_SIZE_UNIT (base);
2709 	  if (size == NULL_TREE
2710 	      || !poly_int_tree_p (size)
2711 	      || maybe_le (wi::to_poly_offset (size), off))
2712 	    return true;
2713 	  /* Now we are sure the first byte of the access is inside
2714 	     the object.  */
2715 	  return false;
2716 	}
2717       return true;
2718 
2719     case INDIRECT_REF:
2720       return !TREE_THIS_NOTRAP (expr);
2721 
2722     case ASM_EXPR:
2723       return TREE_THIS_VOLATILE (expr);
2724 
2725     case CALL_EXPR:
2726       /* Internal function calls do not trap.  */
2727       if (CALL_EXPR_FN (expr) == NULL_TREE)
2728 	return false;
2729       t = get_callee_fndecl (expr);
2730       /* Assume that indirect and calls to weak functions may trap.  */
2731       if (!t || !DECL_P (t))
2732 	return true;
2733       if (DECL_WEAK (t))
2734 	return tree_could_trap_p (t);
2735       return false;
2736 
2737     case FUNCTION_DECL:
2738       /* Assume that accesses to weak functions may trap, unless we know
2739 	 they are certainly defined in current TU or in some other
2740 	 LTO partition.  */
2741       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2742 	{
2743 	  cgraph_node *node = cgraph_node::get (expr);
2744 	  if (node)
2745 	    node = node->function_symbol ();
2746 	  return !(node && node->in_other_partition);
2747 	}
2748       return false;
2749 
2750     case VAR_DECL:
2751       /* Assume that accesses to weak vars may trap, unless we know
2752 	 they are certainly defined in current TU or in some other
2753 	 LTO partition.  */
2754       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2755 	{
2756 	  varpool_node *node = varpool_node::get (expr);
2757 	  if (node)
2758 	    node = node->ultimate_alias_target ();
2759 	  return !(node && node->in_other_partition);
2760 	}
2761       return false;
2762 
2763     default:
2764       return false;
2765     }
2766 }
2767 
2768 /* Return non-NULL if there is an integer operation with trapping overflow
2769    we can rewrite into non-trapping.  Called via walk_tree from
2770    rewrite_to_non_trapping_overflow.  */
2771 
2772 static tree
find_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2773 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2774 {
2775   if (EXPR_P (*tp)
2776       && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2777       && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2778     return *tp;
2779   if (IS_TYPE_OR_DECL_P (*tp)
2780       || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2781     *walk_subtrees = 0;
2782   return NULL_TREE;
2783 }
2784 
2785 /* Rewrite selected operations into unsigned arithmetics, so that they
2786    don't trap on overflow.  */
2787 
2788 static tree
replace_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2789 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2790 {
2791   if (find_trapping_overflow (tp, walk_subtrees, data))
2792     {
2793       tree type = TREE_TYPE (*tp);
2794       tree utype = unsigned_type_for (type);
2795       *walk_subtrees = 0;
2796       int len = TREE_OPERAND_LENGTH (*tp);
2797       for (int i = 0; i < len; ++i)
2798 	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2799 		   data, (hash_set<tree> *) data);
2800 
2801       if (TREE_CODE (*tp) == ABS_EXPR)
2802 	{
2803 	  TREE_SET_CODE (*tp, ABSU_EXPR);
2804 	  TREE_TYPE (*tp) = utype;
2805 	  *tp = fold_convert (type, *tp);
2806 	}
2807       else
2808 	{
2809 	  TREE_TYPE (*tp) = utype;
2810 	  len = TREE_OPERAND_LENGTH (*tp);
2811 	  for (int i = 0; i < len; ++i)
2812 	    TREE_OPERAND (*tp, i)
2813 	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2814 	  *tp = fold_convert (type, *tp);
2815 	}
2816     }
2817   return NULL_TREE;
2818 }
2819 
2820 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2821    using unsigned arithmetics to avoid traps in it.  */
2822 
2823 tree
rewrite_to_non_trapping_overflow(tree expr)2824 rewrite_to_non_trapping_overflow (tree expr)
2825 {
2826   if (!flag_trapv)
2827     return expr;
2828   hash_set<tree> pset;
2829   if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2830     return expr;
2831   expr = unshare_expr (expr);
2832   pset.empty ();
2833   walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2834   return expr;
2835 }
2836 
2837 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2838    an assignment or a conditional) may throw.  */
2839 
2840 static bool
stmt_could_throw_1_p(gassign * stmt)2841 stmt_could_throw_1_p (gassign *stmt)
2842 {
2843   enum tree_code code = gimple_assign_rhs_code (stmt);
2844   bool honor_nans = false;
2845   bool honor_snans = false;
2846   bool fp_operation = false;
2847   bool honor_trapv = false;
2848   tree t;
2849   size_t i;
2850   bool handled, ret;
2851 
2852   if (TREE_CODE_CLASS (code) == tcc_comparison
2853       || TREE_CODE_CLASS (code) == tcc_unary
2854       || TREE_CODE_CLASS (code) == tcc_binary)
2855     {
2856       if (TREE_CODE_CLASS (code) == tcc_comparison)
2857 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2858       else
2859 	t = gimple_expr_type (stmt);
2860       fp_operation = FLOAT_TYPE_P (t);
2861       if (fp_operation)
2862 	{
2863 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2864 	  honor_snans = flag_signaling_nans != 0;
2865 	}
2866       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2867 	honor_trapv = true;
2868     }
2869 
2870   /* First check the LHS.  */
2871   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2872     return true;
2873 
2874   /* Check if the main expression may trap.  */
2875   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2876 				       honor_nans, honor_snans,
2877 				       gimple_assign_rhs2 (stmt),
2878 				       &handled);
2879   if (handled)
2880     return ret;
2881 
2882   /* If the expression does not trap, see if any of the individual operands may
2883      trap.  */
2884   for (i = 1; i < gimple_num_ops (stmt); i++)
2885     if (tree_could_trap_p (gimple_op (stmt, i)))
2886       return true;
2887 
2888   return false;
2889 }
2890 
2891 
2892 /* Return true if statement STMT within FUN could throw an exception.  */
2893 
2894 bool
stmt_could_throw_p(function * fun,gimple * stmt)2895 stmt_could_throw_p (function *fun, gimple *stmt)
2896 {
2897   if (!flag_exceptions)
2898     return false;
2899 
2900   /* The only statements that can throw an exception are assignments,
2901      conditionals, calls, resx, and asms.  */
2902   switch (gimple_code (stmt))
2903     {
2904     case GIMPLE_RESX:
2905       return true;
2906 
2907     case GIMPLE_CALL:
2908       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2909 
2910     case GIMPLE_COND:
2911       {
2912 	if (fun && !fun->can_throw_non_call_exceptions)
2913 	  return false;
2914 	gcond *cond = as_a <gcond *> (stmt);
2915 	tree lhs = gimple_cond_lhs (cond);
2916 	return operation_could_trap_p (gimple_cond_code (cond),
2917 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2918 				       false, NULL_TREE);
2919       }
2920 
2921     case GIMPLE_ASSIGN:
2922       if ((fun && !fun->can_throw_non_call_exceptions)
2923 	  || gimple_clobber_p (stmt))
2924         return false;
2925       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2926 
2927     case GIMPLE_ASM:
2928       if (fun && !fun->can_throw_non_call_exceptions)
2929         return false;
2930       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2931 
2932     default:
2933       return false;
2934     }
2935 }
2936 
2937 /* Return true if STMT in function FUN must be assumed necessary because of
2938    non-call exceptions.  */
2939 
2940 bool
stmt_unremovable_because_of_non_call_eh_p(function * fun,gimple * stmt)2941 stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt)
2942 {
2943   return (fun->can_throw_non_call_exceptions
2944 	  && !fun->can_delete_dead_exceptions
2945 	  && stmt_could_throw_p (fun, stmt));
2946 }
2947 
2948 /* Return true if expression T could throw an exception.  */
2949 
2950 bool
tree_could_throw_p(tree t)2951 tree_could_throw_p (tree t)
2952 {
2953   if (!flag_exceptions)
2954     return false;
2955   if (TREE_CODE (t) == MODIFY_EXPR)
2956     {
2957       if (cfun->can_throw_non_call_exceptions
2958           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2959         return true;
2960       t = TREE_OPERAND (t, 1);
2961     }
2962 
2963   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2964     t = TREE_OPERAND (t, 0);
2965   if (TREE_CODE (t) == CALL_EXPR)
2966     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2967   if (cfun->can_throw_non_call_exceptions)
2968     return tree_could_trap_p (t);
2969   return false;
2970 }
2971 
2972 /* Return true if STMT can throw an exception that is not caught within its
2973    function FUN.  FUN can be NULL but the function is extra conservative
2974    then.  */
2975 
2976 bool
stmt_can_throw_external(function * fun,gimple * stmt)2977 stmt_can_throw_external (function *fun, gimple *stmt)
2978 {
2979   int lp_nr;
2980 
2981   if (!stmt_could_throw_p (fun, stmt))
2982     return false;
2983   if (!fun)
2984     return true;
2985 
2986   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
2987   return lp_nr == 0;
2988 }
2989 
2990 /* Return true if STMT can throw an exception that is caught within its
2991    function FUN.  */
2992 
2993 bool
stmt_can_throw_internal(function * fun,gimple * stmt)2994 stmt_can_throw_internal (function *fun, gimple *stmt)
2995 {
2996   int lp_nr;
2997 
2998   gcc_checking_assert (fun);
2999   if (!stmt_could_throw_p (fun, stmt))
3000     return false;
3001 
3002   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3003   return lp_nr > 0;
3004 }
3005 
3006 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
3007    remove any entry it might have from the EH table.  Return true if
3008    any change was made.  */
3009 
3010 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple * stmt)3011 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
3012 {
3013   if (stmt_could_throw_p (ifun, stmt))
3014     return false;
3015   return remove_stmt_from_eh_lp_fn (ifun, stmt);
3016 }
3017 
3018 /* Likewise, but always use the current function.  */
3019 
3020 bool
maybe_clean_eh_stmt(gimple * stmt)3021 maybe_clean_eh_stmt (gimple *stmt)
3022 {
3023   return maybe_clean_eh_stmt_fn (cfun, stmt);
3024 }
3025 
3026 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
3027    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
3028    in the table if it should be in there.  Return TRUE if a replacement was
3029    done that my require an EH edge purge.  */
3030 
3031 bool
maybe_clean_or_replace_eh_stmt(gimple * old_stmt,gimple * new_stmt)3032 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
3033 {
3034   int lp_nr = lookup_stmt_eh_lp (old_stmt);
3035 
3036   if (lp_nr != 0)
3037     {
3038       bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt);
3039 
3040       if (new_stmt == old_stmt && new_stmt_could_throw)
3041 	return false;
3042 
3043       remove_stmt_from_eh_lp (old_stmt);
3044       if (new_stmt_could_throw)
3045 	{
3046 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
3047 	  return false;
3048 	}
3049       else
3050 	return true;
3051     }
3052 
3053   return false;
3054 }
3055 
3056 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3057    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3058    operand is the return value of duplicate_eh_regions.  */
3059 
3060 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple * new_stmt,struct function * old_fun,gimple * old_stmt,hash_map<void *,void * > * map,int default_lp_nr)3061 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3062 			    struct function *old_fun, gimple *old_stmt,
3063 			    hash_map<void *, void *> *map,
3064 			    int default_lp_nr)
3065 {
3066   int old_lp_nr, new_lp_nr;
3067 
3068   if (!stmt_could_throw_p (new_fun, new_stmt))
3069     return false;
3070 
3071   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3072   if (old_lp_nr == 0)
3073     {
3074       if (default_lp_nr == 0)
3075 	return false;
3076       new_lp_nr = default_lp_nr;
3077     }
3078   else if (old_lp_nr > 0)
3079     {
3080       eh_landing_pad old_lp, new_lp;
3081 
3082       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3083       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3084       new_lp_nr = new_lp->index;
3085     }
3086   else
3087     {
3088       eh_region old_r, new_r;
3089 
3090       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3091       new_r = static_cast<eh_region> (*map->get (old_r));
3092       new_lp_nr = -new_r->index;
3093     }
3094 
3095   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3096   return true;
3097 }
3098 
3099 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3100    and thus no remapping is required.  */
3101 
3102 bool
maybe_duplicate_eh_stmt(gimple * new_stmt,gimple * old_stmt)3103 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3104 {
3105   int lp_nr;
3106 
3107   if (!stmt_could_throw_p (cfun, new_stmt))
3108     return false;
3109 
3110   lp_nr = lookup_stmt_eh_lp (old_stmt);
3111   if (lp_nr == 0)
3112     return false;
3113 
3114   add_stmt_to_eh_lp (new_stmt, lp_nr);
3115   return true;
3116 }
3117 
3118 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3119    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3120    this only handles handlers consisting of a single call, as that's the
3121    important case for C++: a destructor call for a particular object showing
3122    up in multiple handlers.  */
3123 
3124 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)3125 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3126 {
3127   gimple_stmt_iterator gsi;
3128   gimple *ones, *twos;
3129   unsigned int ai;
3130 
3131   gsi = gsi_start (oneh);
3132   if (!gsi_one_before_end_p (gsi))
3133     return false;
3134   ones = gsi_stmt (gsi);
3135 
3136   gsi = gsi_start (twoh);
3137   if (!gsi_one_before_end_p (gsi))
3138     return false;
3139   twos = gsi_stmt (gsi);
3140 
3141   if (!is_gimple_call (ones)
3142       || !is_gimple_call (twos)
3143       || gimple_call_lhs (ones)
3144       || gimple_call_lhs (twos)
3145       || gimple_call_chain (ones)
3146       || gimple_call_chain (twos)
3147       || !gimple_call_same_target_p (ones, twos)
3148       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3149     return false;
3150 
3151   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3152     if (!operand_equal_p (gimple_call_arg (ones, ai),
3153                           gimple_call_arg (twos, ai), 0))
3154       return false;
3155 
3156   return true;
3157 }
3158 
3159 /* Optimize
3160     try { A() } finally { try { ~B() } catch { ~A() } }
3161     try { ... } finally { ~A() }
3162    into
3163     try { A() } catch { ~B() }
3164     try { ~B() ... } finally { ~A() }
3165 
3166    This occurs frequently in C++, where A is a local variable and B is a
3167    temporary used in the initializer for A.  */
3168 
3169 static void
optimize_double_finally(gtry * one,gtry * two)3170 optimize_double_finally (gtry *one, gtry *two)
3171 {
3172   gimple *oneh;
3173   gimple_stmt_iterator gsi;
3174   gimple_seq cleanup;
3175 
3176   cleanup = gimple_try_cleanup (one);
3177   gsi = gsi_start (cleanup);
3178   if (!gsi_one_before_end_p (gsi))
3179     return;
3180 
3181   oneh = gsi_stmt (gsi);
3182   if (gimple_code (oneh) != GIMPLE_TRY
3183       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3184     return;
3185 
3186   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3187     {
3188       gimple_seq seq = gimple_try_eval (oneh);
3189 
3190       gimple_try_set_cleanup (one, seq);
3191       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3192       seq = copy_gimple_seq_and_replace_locals (seq);
3193       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3194       gimple_try_set_eval (two, seq);
3195     }
3196 }
3197 
3198 /* Perform EH refactoring optimizations that are simpler to do when code
3199    flow has been lowered but EH structures haven't.  */
3200 
3201 static void
refactor_eh_r(gimple_seq seq)3202 refactor_eh_r (gimple_seq seq)
3203 {
3204   gimple_stmt_iterator gsi;
3205   gimple *one, *two;
3206 
3207   one = NULL;
3208   two = NULL;
3209   gsi = gsi_start (seq);
3210   while (1)
3211     {
3212       one = two;
3213       if (gsi_end_p (gsi))
3214 	two = NULL;
3215       else
3216 	two = gsi_stmt (gsi);
3217       if (one && two)
3218 	if (gtry *try_one = dyn_cast <gtry *> (one))
3219 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3220 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3221 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3222 	      optimize_double_finally (try_one, try_two);
3223       if (one)
3224 	switch (gimple_code (one))
3225 	  {
3226 	  case GIMPLE_TRY:
3227 	    refactor_eh_r (gimple_try_eval (one));
3228 	    refactor_eh_r (gimple_try_cleanup (one));
3229 	    break;
3230 	  case GIMPLE_CATCH:
3231 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3232 	    break;
3233 	  case GIMPLE_EH_FILTER:
3234 	    refactor_eh_r (gimple_eh_filter_failure (one));
3235 	    break;
3236 	  case GIMPLE_EH_ELSE:
3237 	    {
3238 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3239 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3240 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3241 	    }
3242 	    break;
3243 	  default:
3244 	    break;
3245 	  }
3246       if (two)
3247 	gsi_next (&gsi);
3248       else
3249 	break;
3250     }
3251 }
3252 
3253 namespace {
3254 
3255 const pass_data pass_data_refactor_eh =
3256 {
3257   GIMPLE_PASS, /* type */
3258   "ehopt", /* name */
3259   OPTGROUP_NONE, /* optinfo_flags */
3260   TV_TREE_EH, /* tv_id */
3261   PROP_gimple_lcf, /* properties_required */
3262   0, /* properties_provided */
3263   0, /* properties_destroyed */
3264   0, /* todo_flags_start */
3265   0, /* todo_flags_finish */
3266 };
3267 
3268 class pass_refactor_eh : public gimple_opt_pass
3269 {
3270 public:
pass_refactor_eh(gcc::context * ctxt)3271   pass_refactor_eh (gcc::context *ctxt)
3272     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3273   {}
3274 
3275   /* opt_pass methods: */
gate(function *)3276   virtual bool gate (function *) { return flag_exceptions != 0; }
execute(function *)3277   virtual unsigned int execute (function *)
3278     {
3279       refactor_eh_r (gimple_body (current_function_decl));
3280       return 0;
3281     }
3282 
3283 }; // class pass_refactor_eh
3284 
3285 } // anon namespace
3286 
3287 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3288 make_pass_refactor_eh (gcc::context *ctxt)
3289 {
3290   return new pass_refactor_eh (ctxt);
3291 }
3292 
3293 /* At the end of gimple optimization, we can lower RESX.  */
3294 
3295 static bool
lower_resx(basic_block bb,gresx * stmt,hash_map<eh_region,tree> * mnt_map)3296 lower_resx (basic_block bb, gresx *stmt,
3297 	    hash_map<eh_region, tree> *mnt_map)
3298 {
3299   int lp_nr;
3300   eh_region src_r, dst_r;
3301   gimple_stmt_iterator gsi;
3302   gimple *x;
3303   tree fn, src_nr;
3304   bool ret = false;
3305 
3306   lp_nr = lookup_stmt_eh_lp (stmt);
3307   if (lp_nr != 0)
3308     dst_r = get_eh_region_from_lp_number (lp_nr);
3309   else
3310     dst_r = NULL;
3311 
3312   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3313   gsi = gsi_last_bb (bb);
3314 
3315   if (src_r == NULL)
3316     {
3317       /* We can wind up with no source region when pass_cleanup_eh shows
3318 	 that there are no entries into an eh region and deletes it, but
3319 	 then the block that contains the resx isn't removed.  This can
3320 	 happen without optimization when the switch statement created by
3321 	 lower_try_finally_switch isn't simplified to remove the eh case.
3322 
3323 	 Resolve this by expanding the resx node to an abort.  */
3324 
3325       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3326       x = gimple_build_call (fn, 0);
3327       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3328 
3329       while (EDGE_COUNT (bb->succs) > 0)
3330 	remove_edge (EDGE_SUCC (bb, 0));
3331     }
3332   else if (dst_r)
3333     {
3334       /* When we have a destination region, we resolve this by copying
3335 	 the excptr and filter values into place, and changing the edge
3336 	 to immediately after the landing pad.  */
3337       edge e;
3338 
3339       if (lp_nr < 0)
3340 	{
3341 	  basic_block new_bb;
3342 	  tree lab;
3343 
3344 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3345 	     the failure decl into a new block, if needed.  */
3346 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3347 
3348 	  tree *slot = mnt_map->get (dst_r);
3349 	  if (slot == NULL)
3350 	    {
3351 	      gimple_stmt_iterator gsi2;
3352 
3353 	      new_bb = create_empty_bb (bb);
3354 	      new_bb->count = bb->count;
3355 	      add_bb_to_loop (new_bb, bb->loop_father);
3356 	      lab = gimple_block_label (new_bb);
3357 	      gsi2 = gsi_start_bb (new_bb);
3358 
3359 	      fn = dst_r->u.must_not_throw.failure_decl;
3360 	      x = gimple_build_call (fn, 0);
3361 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3362 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3363 
3364 	      mnt_map->put (dst_r, lab);
3365 	    }
3366 	  else
3367 	    {
3368 	      lab = *slot;
3369 	      new_bb = label_to_block (cfun, lab);
3370 	    }
3371 
3372 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3373 	  e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3374 	}
3375       else
3376 	{
3377 	  edge_iterator ei;
3378 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3379 
3380 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3381 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3382 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3383 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3384 
3385 	  /* Update the flags for the outgoing edge.  */
3386 	  e = single_succ_edge (bb);
3387 	  gcc_assert (e->flags & EDGE_EH);
3388 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3389 	  e->probability = profile_probability::always ();
3390 
3391 	  /* If there are no more EH users of the landing pad, delete it.  */
3392 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3393 	    if (e->flags & EDGE_EH)
3394 	      break;
3395 	  if (e == NULL)
3396 	    {
3397 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3398 	      remove_eh_landing_pad (lp);
3399 	    }
3400 	}
3401 
3402       ret = true;
3403     }
3404   else
3405     {
3406       tree var;
3407 
3408       /* When we don't have a destination region, this exception escapes
3409 	 up the call chain.  We resolve this by generating a call to the
3410 	 _Unwind_Resume library function.  */
3411 
3412       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3413 	 with no arguments for C++.  Check for that.  */
3414       if (src_r->use_cxa_end_cleanup)
3415 	{
3416 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3417 	  x = gimple_build_call (fn, 0);
3418 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3419 	}
3420       else
3421 	{
3422 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3423 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3424 	  x = gimple_build_call (fn, 1, src_nr);
3425 	  var = create_tmp_var (ptr_type_node);
3426 	  var = make_ssa_name (var, x);
3427 	  gimple_call_set_lhs (x, var);
3428 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3429 
3430 	  /* When exception handling is delegated to a caller function, we
3431 	     have to guarantee that shadow memory variables living on stack
3432 	     will be cleaner before control is given to a parent function.  */
3433 	  if (sanitize_flags_p (SANITIZE_ADDRESS))
3434 	    {
3435 	      tree decl
3436 		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3437 	      gimple *g = gimple_build_call (decl, 0);
3438 	      gimple_set_location (g, gimple_location (stmt));
3439 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3440 	    }
3441 
3442 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3443 	  x = gimple_build_call (fn, 1, var);
3444 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3445 	}
3446 
3447       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3448     }
3449 
3450   gsi_remove (&gsi, true);
3451 
3452   return ret;
3453 }
3454 
3455 namespace {
3456 
3457 const pass_data pass_data_lower_resx =
3458 {
3459   GIMPLE_PASS, /* type */
3460   "resx", /* name */
3461   OPTGROUP_NONE, /* optinfo_flags */
3462   TV_TREE_EH, /* tv_id */
3463   PROP_gimple_lcf, /* properties_required */
3464   0, /* properties_provided */
3465   0, /* properties_destroyed */
3466   0, /* todo_flags_start */
3467   0, /* todo_flags_finish */
3468 };
3469 
3470 class pass_lower_resx : public gimple_opt_pass
3471 {
3472 public:
pass_lower_resx(gcc::context * ctxt)3473   pass_lower_resx (gcc::context *ctxt)
3474     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3475   {}
3476 
3477   /* opt_pass methods: */
gate(function *)3478   virtual bool gate (function *) { return flag_exceptions != 0; }
3479   virtual unsigned int execute (function *);
3480 
3481 }; // class pass_lower_resx
3482 
3483 unsigned
execute(function * fun)3484 pass_lower_resx::execute (function *fun)
3485 {
3486   basic_block bb;
3487   bool dominance_invalidated = false;
3488   bool any_rewritten = false;
3489 
3490   hash_map<eh_region, tree> mnt_map;
3491 
3492   FOR_EACH_BB_FN (bb, fun)
3493     {
3494       gimple *last = last_stmt (bb);
3495       if (last && is_gimple_resx (last))
3496 	{
3497 	  dominance_invalidated |=
3498 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3499 	  any_rewritten = true;
3500 	}
3501     }
3502 
3503   if (dominance_invalidated)
3504     {
3505       free_dominance_info (CDI_DOMINATORS);
3506       free_dominance_info (CDI_POST_DOMINATORS);
3507     }
3508 
3509   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3510 }
3511 
3512 } // anon namespace
3513 
3514 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3515 make_pass_lower_resx (gcc::context *ctxt)
3516 {
3517   return new pass_lower_resx (ctxt);
3518 }
3519 
3520 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3521    external throw.  */
3522 
3523 static void
optimize_clobbers(basic_block bb)3524 optimize_clobbers (basic_block bb)
3525 {
3526   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3527   bool any_clobbers = false;
3528   bool seen_stack_restore = false;
3529   edge_iterator ei;
3530   edge e;
3531 
3532   /* Only optimize anything if the bb contains at least one clobber,
3533      ends with resx (checked by caller), optionally contains some
3534      debug stmts or labels, or at most one __builtin_stack_restore
3535      call, and has an incoming EH edge.  */
3536   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3537     {
3538       gimple *stmt = gsi_stmt (gsi);
3539       if (is_gimple_debug (stmt))
3540 	continue;
3541       if (gimple_clobber_p (stmt))
3542 	{
3543 	  any_clobbers = true;
3544 	  continue;
3545 	}
3546       if (!seen_stack_restore
3547 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3548 	{
3549 	  seen_stack_restore = true;
3550 	  continue;
3551 	}
3552       if (gimple_code (stmt) == GIMPLE_LABEL)
3553 	break;
3554       return;
3555     }
3556   if (!any_clobbers)
3557     return;
3558   FOR_EACH_EDGE (e, ei, bb->preds)
3559     if (e->flags & EDGE_EH)
3560       break;
3561   if (e == NULL)
3562     return;
3563   gsi = gsi_last_bb (bb);
3564   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3565     {
3566       gimple *stmt = gsi_stmt (gsi);
3567       if (!gimple_clobber_p (stmt))
3568 	continue;
3569       unlink_stmt_vdef (stmt);
3570       gsi_remove (&gsi, true);
3571       release_defs (stmt);
3572     }
3573 }
3574 
3575 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3576    internal throw to successor BB.
3577    SUNK, if not NULL, is an array of sequences indexed by basic-block
3578    index to sink to and to pick up sinking opportunities from.
3579    If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
3580    but set *FOUND_OPPORTUNITY to true.  */
3581 
3582 static int
3583 sink_clobbers (basic_block bb,
3584 	       gimple_seq *sunk = NULL, bool *found_opportunity = NULL)
3585 {
3586   edge e;
3587   edge_iterator ei;
3588   gimple_stmt_iterator gsi, dgsi;
3589   basic_block succbb;
3590   bool any_clobbers = false;
3591   unsigned todo = 0;
3592 
3593   /* Only optimize if BB has a single EH successor and
3594      all predecessor edges are EH too.  */
3595   if (!single_succ_p (bb)
3596       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3597     return 0;
3598 
3599   FOR_EACH_EDGE (e, ei, bb->preds)
3600     {
3601       if ((e->flags & EDGE_EH) == 0)
3602 	return 0;
3603     }
3604 
3605   /* And BB contains only CLOBBER stmts before the final
3606      RESX.  */
3607   gsi = gsi_last_bb (bb);
3608   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3609     {
3610       gimple *stmt = gsi_stmt (gsi);
3611       if (is_gimple_debug (stmt))
3612 	continue;
3613       if (gimple_code (stmt) == GIMPLE_LABEL)
3614 	break;
3615       if (!gimple_clobber_p (stmt))
3616 	return 0;
3617       any_clobbers = true;
3618     }
3619   if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index])))
3620     return 0;
3621 
3622   /* If this was a dry run, tell it we found clobbers to sink.  */
3623   if (found_opportunity)
3624     {
3625       *found_opportunity = true;
3626       return 0;
3627     }
3628 
3629   edge succe = single_succ_edge (bb);
3630   succbb = succe->dest;
3631 
3632   /* See if there is a virtual PHI node to take an updated virtual
3633      operand from.  */
3634   gphi *vphi = NULL;
3635   for (gphi_iterator gpi = gsi_start_phis (succbb);
3636        !gsi_end_p (gpi); gsi_next (&gpi))
3637     {
3638       tree res = gimple_phi_result (gpi.phi ());
3639       if (virtual_operand_p (res))
3640 	{
3641 	  vphi = gpi.phi ();
3642 	  break;
3643 	}
3644     }
3645 
3646   gimple *first_sunk = NULL;
3647   gimple *last_sunk = NULL;
3648   if (sunk && !(succbb->flags & BB_VISITED))
3649     dgsi = gsi_start (sunk[succbb->index]);
3650   else
3651     dgsi = gsi_after_labels (succbb);
3652   gsi = gsi_last_bb (bb);
3653   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3654     {
3655       gimple *stmt = gsi_stmt (gsi);
3656       tree lhs;
3657       if (is_gimple_debug (stmt))
3658 	continue;
3659       if (gimple_code (stmt) == GIMPLE_LABEL)
3660 	break;
3661       lhs = gimple_assign_lhs (stmt);
3662       /* Unfortunately we don't have dominance info updated at this
3663 	 point, so checking if
3664 	 dominated_by_p (CDI_DOMINATORS, succbb,
3665 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3666 	 would be too costly.  Thus, avoid sinking any clobbers that
3667 	 refer to non-(D) SSA_NAMEs.  */
3668       if (TREE_CODE (lhs) == MEM_REF
3669 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3670 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3671 	{
3672 	  unlink_stmt_vdef (stmt);
3673 	  gsi_remove (&gsi, true);
3674 	  release_defs (stmt);
3675 	  continue;
3676 	}
3677 
3678       /* As we do not change stmt order when sinking across a
3679          forwarder edge we can keep virtual operands in place.  */
3680       gsi_remove (&gsi, false);
3681       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3682       if (!first_sunk)
3683 	first_sunk = stmt;
3684       last_sunk = stmt;
3685     }
3686   if (sunk && !gimple_seq_empty_p (sunk[bb->index]))
3687     {
3688       if (!first_sunk)
3689 	first_sunk = gsi_stmt (gsi_last (sunk[bb->index]));
3690       last_sunk = gsi_stmt (gsi_start (sunk[bb->index]));
3691       gsi_insert_seq_before_without_update (&dgsi,
3692 					    sunk[bb->index], GSI_NEW_STMT);
3693       sunk[bb->index] = NULL;
3694     }
3695   if (first_sunk)
3696     {
3697       /* Adjust virtual operands if we sunk across a virtual PHI.  */
3698       if (vphi)
3699 	{
3700 	  imm_use_iterator iter;
3701 	  use_operand_p use_p;
3702 	  gimple *use_stmt;
3703 	  tree phi_def = gimple_phi_result (vphi);
3704 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def)
3705 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3706               SET_USE (use_p, gimple_vdef (first_sunk));
3707 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def))
3708 	    {
3709 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1;
3710 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0;
3711 	    }
3712 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe),
3713 		   gimple_vuse (last_sunk));
3714 	  SET_USE (gimple_vuse_op (last_sunk), phi_def);
3715 	}
3716       /* If there isn't a single predecessor but no virtual PHI node
3717          arrange for virtual operands to be renamed.  */
3718       else if (!single_pred_p (succbb)
3719 	       && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME)
3720 	{
3721 	  mark_virtual_operand_for_renaming (gimple_vuse (last_sunk));
3722 	  todo |= TODO_update_ssa_only_virtuals;
3723 	}
3724     }
3725 
3726   return todo;
3727 }
3728 
3729 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3730    we have found some duplicate labels and removed some edges.  */
3731 
3732 static bool
lower_eh_dispatch(basic_block src,geh_dispatch * stmt)3733 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3734 {
3735   gimple_stmt_iterator gsi;
3736   int region_nr;
3737   eh_region r;
3738   tree filter, fn;
3739   gimple *x;
3740   bool redirected = false;
3741 
3742   region_nr = gimple_eh_dispatch_region (stmt);
3743   r = get_eh_region_from_number (region_nr);
3744 
3745   gsi = gsi_last_bb (src);
3746 
3747   switch (r->type)
3748     {
3749     case ERT_TRY:
3750       {
3751 	auto_vec<tree> labels;
3752 	tree default_label = NULL;
3753 	eh_catch c;
3754 	edge_iterator ei;
3755 	edge e;
3756 	hash_set<tree> seen_values;
3757 
3758 	/* Collect the labels for a switch.  Zero the post_landing_pad
3759 	   field becase we'll no longer have anything keeping these labels
3760 	   in existence and the optimizer will be free to merge these
3761 	   blocks at will.  */
3762 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3763 	  {
3764 	    tree tp_node, flt_node, lab = c->label;
3765 	    bool have_label = false;
3766 
3767 	    c->label = NULL;
3768 	    tp_node = c->type_list;
3769 	    flt_node = c->filter_list;
3770 
3771 	    if (tp_node == NULL)
3772 	      {
3773 	        default_label = lab;
3774 		break;
3775 	      }
3776 	    do
3777 	      {
3778 		/* Filter out duplicate labels that arise when this handler
3779 		   is shadowed by an earlier one.  When no labels are
3780 		   attached to the handler anymore, we remove
3781 		   the corresponding edge and then we delete unreachable
3782 		   blocks at the end of this pass.  */
3783 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3784 		  {
3785 		    tree t = build_case_label (TREE_VALUE (flt_node),
3786 					       NULL, lab);
3787 		    labels.safe_push (t);
3788 		    seen_values.add (TREE_VALUE (flt_node));
3789 		    have_label = true;
3790 		  }
3791 
3792 		tp_node = TREE_CHAIN (tp_node);
3793 		flt_node = TREE_CHAIN (flt_node);
3794 	      }
3795 	    while (tp_node);
3796 	    if (! have_label)
3797 	      {
3798 		remove_edge (find_edge (src, label_to_block (cfun, lab)));
3799 	        redirected = true;
3800 	      }
3801 	  }
3802 
3803 	/* Clean up the edge flags.  */
3804 	FOR_EACH_EDGE (e, ei, src->succs)
3805 	  {
3806 	    if (e->flags & EDGE_FALLTHRU)
3807 	      {
3808 		/* If there was no catch-all, use the fallthru edge.  */
3809 		if (default_label == NULL)
3810 		  default_label = gimple_block_label (e->dest);
3811 		e->flags &= ~EDGE_FALLTHRU;
3812 	      }
3813 	  }
3814 	gcc_assert (default_label != NULL);
3815 
3816 	/* Don't generate a switch if there's only a default case.
3817 	   This is common in the form of try { A; } catch (...) { B; }.  */
3818 	if (!labels.exists ())
3819 	  {
3820 	    e = single_succ_edge (src);
3821 	    e->flags |= EDGE_FALLTHRU;
3822 	  }
3823 	else
3824 	  {
3825 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3826 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3827 							 region_nr));
3828 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3829 	    filter = make_ssa_name (filter, x);
3830 	    gimple_call_set_lhs (x, filter);
3831 	    gimple_set_location (x, gimple_location (stmt));
3832 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3833 
3834 	    /* Turn the default label into a default case.  */
3835 	    default_label = build_case_label (NULL, NULL, default_label);
3836 	    sort_case_labels (labels);
3837 
3838 	    x = gimple_build_switch (filter, default_label, labels);
3839 	    gimple_set_location (x, gimple_location (stmt));
3840 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3841 	  }
3842       }
3843       break;
3844 
3845     case ERT_ALLOWED_EXCEPTIONS:
3846       {
3847 	edge b_e = BRANCH_EDGE (src);
3848 	edge f_e = FALLTHRU_EDGE (src);
3849 
3850 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3851 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3852 						     region_nr));
3853 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3854 	filter = make_ssa_name (filter, x);
3855 	gimple_call_set_lhs (x, filter);
3856 	gimple_set_location (x, gimple_location (stmt));
3857 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3858 
3859 	r->u.allowed.label = NULL;
3860 	x = gimple_build_cond (EQ_EXPR, filter,
3861 			       build_int_cst (TREE_TYPE (filter),
3862 					      r->u.allowed.filter),
3863 			       NULL_TREE, NULL_TREE);
3864 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3865 
3866 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3867         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3868       }
3869       break;
3870 
3871     default:
3872       gcc_unreachable ();
3873     }
3874 
3875   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3876   gsi_remove (&gsi, true);
3877   return redirected;
3878 }
3879 
3880 namespace {
3881 
3882 const pass_data pass_data_lower_eh_dispatch =
3883 {
3884   GIMPLE_PASS, /* type */
3885   "ehdisp", /* name */
3886   OPTGROUP_NONE, /* optinfo_flags */
3887   TV_TREE_EH, /* tv_id */
3888   PROP_gimple_lcf, /* properties_required */
3889   0, /* properties_provided */
3890   0, /* properties_destroyed */
3891   0, /* todo_flags_start */
3892   0, /* todo_flags_finish */
3893 };
3894 
3895 class pass_lower_eh_dispatch : public gimple_opt_pass
3896 {
3897 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3898   pass_lower_eh_dispatch (gcc::context *ctxt)
3899     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3900   {}
3901 
3902   /* opt_pass methods: */
gate(function * fun)3903   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3904   virtual unsigned int execute (function *);
3905 
3906 }; // class pass_lower_eh_dispatch
3907 
3908 unsigned
execute(function * fun)3909 pass_lower_eh_dispatch::execute (function *fun)
3910 {
3911   basic_block bb;
3912   int flags = 0;
3913   bool redirected = false;
3914   bool any_resx_to_process = false;
3915 
3916   assign_filter_values ();
3917 
3918   FOR_EACH_BB_FN (bb, fun)
3919     {
3920       gimple *last = last_stmt (bb);
3921       if (last == NULL)
3922 	continue;
3923       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3924 	{
3925 	  redirected |= lower_eh_dispatch (bb,
3926 					   as_a <geh_dispatch *> (last));
3927 	  flags |= TODO_update_ssa_only_virtuals;
3928 	}
3929       else if (gimple_code (last) == GIMPLE_RESX)
3930 	{
3931 	  if (stmt_can_throw_external (fun, last))
3932 	    optimize_clobbers (bb);
3933 	  else if (!any_resx_to_process)
3934 	    sink_clobbers (bb, NULL, &any_resx_to_process);
3935 	}
3936       bb->flags &= ~BB_VISITED;
3937     }
3938   if (redirected)
3939     {
3940       free_dominance_info (CDI_DOMINATORS);
3941       delete_unreachable_blocks ();
3942     }
3943 
3944   if (any_resx_to_process)
3945     {
3946       /* Make sure to catch all secondary sinking opportunities by processing
3947 	 blocks in RPO order and after all CFG modifications from lowering
3948 	 and unreachable block removal.  */
3949       int *rpo = XNEWVEC  (int, n_basic_blocks_for_fn (fun));
3950       int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
3951       gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun));
3952       for (int i = 0; i < rpo_n; ++i)
3953 	{
3954 	  bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3955 	  gimple *last = last_stmt (bb);
3956 	  if (last
3957 	      && gimple_code (last) == GIMPLE_RESX
3958 	      && !stmt_can_throw_external (fun, last))
3959 	    flags |= sink_clobbers (bb, sunk);
3960 	  /* If there were any clobbers sunk into this BB, insert them now.  */
3961 	  if (!gimple_seq_empty_p (sunk[bb->index]))
3962 	    {
3963 	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
3964 	      gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT);
3965 	      sunk[bb->index] = NULL;
3966 	    }
3967 	  bb->flags |= BB_VISITED;
3968 	}
3969       free (rpo);
3970       free (sunk);
3971     }
3972 
3973   return flags;
3974 }
3975 
3976 } // anon namespace
3977 
3978 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)3979 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3980 {
3981   return new pass_lower_eh_dispatch (ctxt);
3982 }
3983 
3984 /* Walk statements, see what regions and, optionally, landing pads
3985    are really referenced.
3986 
3987    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3988    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3989 
3990    Passing NULL for LP_REACHABLE is valid, in this case only reachable
3991    regions are marked.
3992 
3993    The caller is responsible for freeing the returned sbitmaps.  */
3994 
3995 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)3996 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3997 {
3998   sbitmap r_reachable, lp_reachable;
3999   basic_block bb;
4000   bool mark_landing_pads = (lp_reachablep != NULL);
4001   gcc_checking_assert (r_reachablep != NULL);
4002 
4003   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
4004   bitmap_clear (r_reachable);
4005   *r_reachablep = r_reachable;
4006 
4007   if (mark_landing_pads)
4008     {
4009       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
4010       bitmap_clear (lp_reachable);
4011       *lp_reachablep = lp_reachable;
4012     }
4013   else
4014     lp_reachable = NULL;
4015 
4016   FOR_EACH_BB_FN (bb, cfun)
4017     {
4018       gimple_stmt_iterator gsi;
4019 
4020       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4021 	{
4022 	  gimple *stmt = gsi_stmt (gsi);
4023 
4024 	  if (mark_landing_pads)
4025 	    {
4026 	      int lp_nr = lookup_stmt_eh_lp (stmt);
4027 
4028 	      /* Negative LP numbers are MUST_NOT_THROW regions which
4029 		 are not considered BB enders.  */
4030 	      if (lp_nr < 0)
4031 		bitmap_set_bit (r_reachable, -lp_nr);
4032 
4033 	      /* Positive LP numbers are real landing pads, and BB enders.  */
4034 	      else if (lp_nr > 0)
4035 		{
4036 		  gcc_assert (gsi_one_before_end_p (gsi));
4037 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
4038 		  bitmap_set_bit (r_reachable, region->index);
4039 		  bitmap_set_bit (lp_reachable, lp_nr);
4040 		}
4041 	    }
4042 
4043 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
4044 	  switch (gimple_code (stmt))
4045 	    {
4046 	    case GIMPLE_RESX:
4047 	      bitmap_set_bit (r_reachable,
4048 			      gimple_resx_region (as_a <gresx *> (stmt)));
4049 	      break;
4050 	    case GIMPLE_EH_DISPATCH:
4051 	      bitmap_set_bit (r_reachable,
4052 			      gimple_eh_dispatch_region (
4053                                 as_a <geh_dispatch *> (stmt)));
4054 	      break;
4055 	    case GIMPLE_CALL:
4056 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
4057 		for (int i = 0; i < 2; ++i)
4058 		  {
4059 		    tree rt = gimple_call_arg (stmt, i);
4060 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
4061 
4062 		    gcc_assert (ri == (int)ri);
4063 		    bitmap_set_bit (r_reachable, ri);
4064 		  }
4065 	      break;
4066 	    default:
4067 	      break;
4068 	    }
4069 	}
4070     }
4071 }
4072 
4073 /* Remove unreachable handlers and unreachable landing pads.  */
4074 
4075 static void
remove_unreachable_handlers(void)4076 remove_unreachable_handlers (void)
4077 {
4078   sbitmap r_reachable, lp_reachable;
4079   eh_region region;
4080   eh_landing_pad lp;
4081   unsigned i;
4082 
4083   mark_reachable_handlers (&r_reachable, &lp_reachable);
4084 
4085   if (dump_file)
4086     {
4087       fprintf (dump_file, "Before removal of unreachable regions:\n");
4088       dump_eh_tree (dump_file, cfun);
4089       fprintf (dump_file, "Reachable regions: ");
4090       dump_bitmap_file (dump_file, r_reachable);
4091       fprintf (dump_file, "Reachable landing pads: ");
4092       dump_bitmap_file (dump_file, lp_reachable);
4093     }
4094 
4095   if (dump_file)
4096     {
4097       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4098 	if (region && !bitmap_bit_p (r_reachable, region->index))
4099 	  fprintf (dump_file,
4100 		   "Removing unreachable region %d\n",
4101 		   region->index);
4102     }
4103 
4104   remove_unreachable_eh_regions (r_reachable);
4105 
4106   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4107     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4108       {
4109 	if (dump_file)
4110 	  fprintf (dump_file,
4111 		   "Removing unreachable landing pad %d\n",
4112 		   lp->index);
4113 	remove_eh_landing_pad (lp);
4114       }
4115 
4116   if (dump_file)
4117     {
4118       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4119       dump_eh_tree (dump_file, cfun);
4120       fprintf (dump_file, "\n\n");
4121     }
4122 
4123   sbitmap_free (r_reachable);
4124   sbitmap_free (lp_reachable);
4125 
4126   if (flag_checking)
4127     verify_eh_tree (cfun);
4128 }
4129 
4130 /* Remove unreachable handlers if any landing pads have been removed after
4131    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4132 
4133 void
maybe_remove_unreachable_handlers(void)4134 maybe_remove_unreachable_handlers (void)
4135 {
4136   eh_landing_pad lp;
4137   unsigned i;
4138 
4139   if (cfun->eh == NULL)
4140     return;
4141 
4142   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4143     if (lp
4144 	&& (lp->post_landing_pad == NULL_TREE
4145 	    || label_to_block (cfun, lp->post_landing_pad) == NULL))
4146       {
4147 	remove_unreachable_handlers ();
4148 	return;
4149       }
4150 }
4151 
4152 /* Remove regions that do not have landing pads.  This assumes
4153    that remove_unreachable_handlers has already been run, and
4154    that we've just manipulated the landing pads since then.
4155 
4156    Preserve regions with landing pads and regions that prevent
4157    exceptions from propagating further, even if these regions
4158    are not reachable.  */
4159 
4160 static void
remove_unreachable_handlers_no_lp(void)4161 remove_unreachable_handlers_no_lp (void)
4162 {
4163   eh_region region;
4164   sbitmap r_reachable;
4165   unsigned i;
4166 
4167   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4168 
4169   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4170     {
4171       if (! region)
4172 	continue;
4173 
4174       if (region->landing_pads != NULL
4175 	  || region->type == ERT_MUST_NOT_THROW)
4176 	bitmap_set_bit (r_reachable, region->index);
4177 
4178       if (dump_file
4179 	  && !bitmap_bit_p (r_reachable, region->index))
4180 	fprintf (dump_file,
4181 		 "Removing unreachable region %d\n",
4182 		 region->index);
4183     }
4184 
4185   remove_unreachable_eh_regions (r_reachable);
4186 
4187   sbitmap_free (r_reachable);
4188 }
4189 
4190 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
4191    optimisticaly split all sorts of edges, including EH edges.  The
4192    optimization passes in between may not have needed them; if not,
4193    we should undo the split.
4194 
4195    Recognize this case by having one EH edge incoming to the BB and
4196    one normal edge outgoing; BB should be empty apart from the
4197    post_landing_pad label.
4198 
4199    Note that this is slightly different from the empty handler case
4200    handled by cleanup_empty_eh, in that the actual handler may yet
4201    have actual code but the landing pad has been separated from the
4202    handler.  As such, cleanup_empty_eh relies on this transformation
4203    having been done first.  */
4204 
4205 static bool
unsplit_eh(eh_landing_pad lp)4206 unsplit_eh (eh_landing_pad lp)
4207 {
4208   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4209   gimple_stmt_iterator gsi;
4210   edge e_in, e_out;
4211 
4212   /* Quickly check the edge counts on BB for singularity.  */
4213   if (!single_pred_p (bb) || !single_succ_p (bb))
4214     return false;
4215   e_in = single_pred_edge (bb);
4216   e_out = single_succ_edge (bb);
4217 
4218   /* Input edge must be EH and output edge must be normal.  */
4219   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4220     return false;
4221 
4222   /* The block must be empty except for the labels and debug insns.  */
4223   gsi = gsi_after_labels (bb);
4224   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4225     gsi_next_nondebug (&gsi);
4226   if (!gsi_end_p (gsi))
4227     return false;
4228 
4229   /* The destination block must not already have a landing pad
4230      for a different region.  */
4231   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4232     {
4233       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4234       tree lab;
4235       int lp_nr;
4236 
4237       if (!label_stmt)
4238 	break;
4239       lab = gimple_label_label (label_stmt);
4240       lp_nr = EH_LANDING_PAD_NR (lab);
4241       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4242 	return false;
4243     }
4244 
4245   /* The new destination block must not already be a destination of
4246      the source block, lest we merge fallthru and eh edges and get
4247      all sorts of confused.  */
4248   if (find_edge (e_in->src, e_out->dest))
4249     return false;
4250 
4251   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4252      thought this should have been cleaned up by a phicprop pass, but
4253      that doesn't appear to handle virtuals.  Propagate by hand.  */
4254   if (!gimple_seq_empty_p (phi_nodes (bb)))
4255     {
4256       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4257 	{
4258 	  gimple *use_stmt;
4259 	  gphi *phi = gpi.phi ();
4260 	  tree lhs = gimple_phi_result (phi);
4261 	  tree rhs = gimple_phi_arg_def (phi, 0);
4262 	  use_operand_p use_p;
4263 	  imm_use_iterator iter;
4264 
4265 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4266 	    {
4267 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4268 		SET_USE (use_p, rhs);
4269 	    }
4270 
4271 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4272 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4273 
4274 	  remove_phi_node (&gpi, true);
4275 	}
4276     }
4277 
4278   if (dump_file && (dump_flags & TDF_DETAILS))
4279     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4280 	     lp->index, e_out->dest->index);
4281 
4282   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4283      a successor edge, humor it.  But do the real CFG change with the
4284      predecessor of E_OUT in order to preserve the ordering of arguments
4285      to the PHI nodes in E_OUT->DEST.  */
4286   redirect_eh_edge_1 (e_in, e_out->dest, false);
4287   redirect_edge_pred (e_out, e_in->src);
4288   e_out->flags = e_in->flags;
4289   e_out->probability = e_in->probability;
4290   remove_edge (e_in);
4291 
4292   return true;
4293 }
4294 
4295 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4296 
4297 static bool
unsplit_all_eh(void)4298 unsplit_all_eh (void)
4299 {
4300   bool changed = false;
4301   eh_landing_pad lp;
4302   int i;
4303 
4304   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4305     if (lp)
4306       changed |= unsplit_eh (lp);
4307 
4308   return changed;
4309 }
4310 
4311 /* Wrapper around unsplit_all_eh that makes it usable everywhere.  */
4312 
4313 void
unsplit_eh_edges(void)4314 unsplit_eh_edges (void)
4315 {
4316   bool changed;
4317 
4318   /* unsplit_all_eh can die looking up unreachable landing pads.  */
4319   maybe_remove_unreachable_handlers ();
4320 
4321   changed = unsplit_all_eh ();
4322 
4323   /* If EH edges have been unsplit, delete unreachable forwarder blocks.  */
4324   if (changed)
4325     {
4326       free_dominance_info (CDI_DOMINATORS);
4327       free_dominance_info (CDI_POST_DOMINATORS);
4328       delete_unreachable_blocks ();
4329     }
4330 }
4331 
4332 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4333    to OLD_BB to NEW_BB; return true on success, false on failure.
4334 
4335    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4336    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4337    Virtual PHIs may be deleted and marked for renaming.  */
4338 
4339 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4340 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4341 			     edge old_bb_out, bool change_region)
4342 {
4343   gphi_iterator ngsi, ogsi;
4344   edge_iterator ei;
4345   edge e;
4346   bitmap ophi_handled;
4347 
4348   /* The destination block must not be a regular successor for any
4349      of the preds of the landing pad.  Thus, avoid turning
4350         <..>
4351 	 |  \ EH
4352 	 |  <..>
4353 	 |  /
4354 	<..>
4355      into
4356         <..>
4357 	|  | EH
4358 	<..>
4359      which CFG verification would choke on.  See PR45172 and PR51089.  */
4360   if (!single_pred_p (new_bb))
4361     FOR_EACH_EDGE (e, ei, old_bb->preds)
4362       if (find_edge (e->src, new_bb))
4363 	return false;
4364 
4365   FOR_EACH_EDGE (e, ei, old_bb->preds)
4366     redirect_edge_var_map_clear (e);
4367 
4368   ophi_handled = BITMAP_ALLOC (NULL);
4369 
4370   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4371      for the edges we're going to move.  */
4372   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4373     {
4374       gphi *ophi, *nphi = ngsi.phi ();
4375       tree nresult, nop;
4376 
4377       nresult = gimple_phi_result (nphi);
4378       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4379 
4380       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4381 	 the source ssa_name.  */
4382       ophi = NULL;
4383       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4384 	{
4385 	  ophi = ogsi.phi ();
4386 	  if (gimple_phi_result (ophi) == nop)
4387 	    break;
4388 	  ophi = NULL;
4389 	}
4390 
4391       /* If we did find the corresponding PHI, copy those inputs.  */
4392       if (ophi)
4393 	{
4394 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4395 	  if (!has_single_use (nop))
4396 	    {
4397 	      imm_use_iterator imm_iter;
4398 	      use_operand_p use_p;
4399 
4400 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4401 		{
4402 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4403 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4404 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4405 		    goto fail;
4406 		}
4407 	    }
4408 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4409 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4410 	    {
4411 	      location_t oloc;
4412 	      tree oop;
4413 
4414 	      if ((e->flags & EDGE_EH) == 0)
4415 		continue;
4416 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4417 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4418 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4419 	    }
4420 	}
4421       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4422 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4423 	 variable is unchanged from input to the block and we can simply
4424 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4425       else
4426 	{
4427 	  location_t nloc
4428 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4429 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4430 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4431 	}
4432     }
4433 
4434   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4435      we don't know what values from the other edges into NEW_BB to use.  */
4436   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4437     {
4438       gphi *ophi = ogsi.phi ();
4439       tree oresult = gimple_phi_result (ophi);
4440       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4441 	goto fail;
4442     }
4443 
4444   /* Finally, move the edges and update the PHIs.  */
4445   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4446     if (e->flags & EDGE_EH)
4447       {
4448 	/* ???  CFG manipluation routines do not try to update loop
4449 	   form on edge redirection.  Do so manually here for now.  */
4450 	/* If we redirect a loop entry or latch edge that will either create
4451 	   a multiple entry loop or rotate the loop.  If the loops merge
4452 	   we may have created a loop with multiple latches.
4453 	   All of this isn't easily fixed thus cancel the affected loop
4454 	   and mark the other loop as possibly having multiple latches.  */
4455 	if (e->dest == e->dest->loop_father->header)
4456 	  {
4457 	    mark_loop_for_removal (e->dest->loop_father);
4458 	    new_bb->loop_father->latch = NULL;
4459 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4460 	  }
4461 	redirect_eh_edge_1 (e, new_bb, change_region);
4462 	redirect_edge_succ (e, new_bb);
4463 	flush_pending_stmts (e);
4464       }
4465     else
4466       ei_next (&ei);
4467 
4468   BITMAP_FREE (ophi_handled);
4469   return true;
4470 
4471  fail:
4472   FOR_EACH_EDGE (e, ei, old_bb->preds)
4473     redirect_edge_var_map_clear (e);
4474   BITMAP_FREE (ophi_handled);
4475   return false;
4476 }
4477 
4478 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4479    old region to NEW_REGION at BB.  */
4480 
4481 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4482 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4483 			  eh_landing_pad lp, eh_region new_region)
4484 {
4485   gimple_stmt_iterator gsi;
4486   eh_landing_pad *pp;
4487 
4488   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4489     continue;
4490   *pp = lp->next_lp;
4491 
4492   lp->region = new_region;
4493   lp->next_lp = new_region->landing_pads;
4494   new_region->landing_pads = lp;
4495 
4496   /* Delete the RESX that was matched within the empty handler block.  */
4497   gsi = gsi_last_bb (bb);
4498   unlink_stmt_vdef (gsi_stmt (gsi));
4499   gsi_remove (&gsi, true);
4500 
4501   /* Clean up E_OUT for the fallthru.  */
4502   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4503   e_out->probability = profile_probability::always ();
4504 }
4505 
4506 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4507    unsplitting than unsplit_eh was prepared to handle, e.g. when
4508    multiple incoming edges and phis are involved.  */
4509 
4510 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4511 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4512 {
4513   gimple_stmt_iterator gsi;
4514   tree lab;
4515 
4516   /* We really ought not have totally lost everything following
4517      a landing pad label.  Given that BB is empty, there had better
4518      be a successor.  */
4519   gcc_assert (e_out != NULL);
4520 
4521   /* The destination block must not already have a landing pad
4522      for a different region.  */
4523   lab = NULL;
4524   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4525     {
4526       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4527       int lp_nr;
4528 
4529       if (!stmt)
4530 	break;
4531       lab = gimple_label_label (stmt);
4532       lp_nr = EH_LANDING_PAD_NR (lab);
4533       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4534 	return false;
4535     }
4536 
4537   /* Attempt to move the PHIs into the successor block.  */
4538   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4539     {
4540       if (dump_file && (dump_flags & TDF_DETAILS))
4541 	fprintf (dump_file,
4542 		 "Unsplit EH landing pad %d to block %i "
4543 		 "(via cleanup_empty_eh).\n",
4544 		 lp->index, e_out->dest->index);
4545       return true;
4546     }
4547 
4548   return false;
4549 }
4550 
4551 /* Return true if edge E_FIRST is part of an empty infinite loop
4552    or leads to such a loop through a series of single successor
4553    empty bbs.  */
4554 
4555 static bool
infinite_empty_loop_p(edge e_first)4556 infinite_empty_loop_p (edge e_first)
4557 {
4558   bool inf_loop = false;
4559   edge e;
4560 
4561   if (e_first->dest == e_first->src)
4562     return true;
4563 
4564   e_first->src->aux = (void *) 1;
4565   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4566     {
4567       gimple_stmt_iterator gsi;
4568       if (e->dest->aux)
4569 	{
4570 	  inf_loop = true;
4571 	  break;
4572 	}
4573       e->dest->aux = (void *) 1;
4574       gsi = gsi_after_labels (e->dest);
4575       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4576 	gsi_next_nondebug (&gsi);
4577       if (!gsi_end_p (gsi))
4578 	break;
4579     }
4580   e_first->src->aux = NULL;
4581   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4582     e->dest->aux = NULL;
4583 
4584   return inf_loop;
4585 }
4586 
4587 /* Examine the block associated with LP to determine if it's an empty
4588    handler for its EH region.  If so, attempt to redirect EH edges to
4589    an outer region.  Return true the CFG was updated in any way.  This
4590    is similar to jump forwarding, just across EH edges.  */
4591 
4592 static bool
cleanup_empty_eh(eh_landing_pad lp)4593 cleanup_empty_eh (eh_landing_pad lp)
4594 {
4595   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4596   gimple_stmt_iterator gsi;
4597   gimple *resx;
4598   eh_region new_region;
4599   edge_iterator ei;
4600   edge e, e_out;
4601   bool has_non_eh_pred;
4602   bool ret = false;
4603   int new_lp_nr;
4604 
4605   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4606   switch (EDGE_COUNT (bb->succs))
4607     {
4608     case 0:
4609       e_out = NULL;
4610       break;
4611     case 1:
4612       e_out = single_succ_edge (bb);
4613       break;
4614     default:
4615       return false;
4616     }
4617 
4618   gsi = gsi_last_nondebug_bb (bb);
4619   resx = gsi_stmt (gsi);
4620   if (resx && is_gimple_resx (resx))
4621     {
4622       if (stmt_can_throw_external (cfun, resx))
4623 	optimize_clobbers (bb);
4624       else if (sink_clobbers (bb))
4625 	ret = true;
4626     }
4627 
4628   gsi = gsi_after_labels (bb);
4629 
4630   /* Make sure to skip debug statements.  */
4631   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4632     gsi_next_nondebug (&gsi);
4633 
4634   /* If the block is totally empty, look for more unsplitting cases.  */
4635   if (gsi_end_p (gsi))
4636     {
4637       /* For the degenerate case of an infinite loop bail out.
4638 	 If bb has no successors and is totally empty, which can happen e.g.
4639 	 because of incorrect noreturn attribute, bail out too.  */
4640       if (e_out == NULL
4641 	  || infinite_empty_loop_p (e_out))
4642 	return ret;
4643 
4644       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4645     }
4646 
4647   /* The block should consist only of a single RESX statement, modulo a
4648      preceding call to __builtin_stack_restore if there is no outgoing
4649      edge, since the call can be eliminated in this case.  */
4650   resx = gsi_stmt (gsi);
4651   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4652     {
4653       gsi_next_nondebug (&gsi);
4654       resx = gsi_stmt (gsi);
4655     }
4656   if (!is_gimple_resx (resx))
4657     return ret;
4658   gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4659 
4660   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4661   has_non_eh_pred = false;
4662   FOR_EACH_EDGE (e, ei, bb->preds)
4663     if (!(e->flags & EDGE_EH))
4664       has_non_eh_pred = true;
4665 
4666   /* Find the handler that's outer of the empty handler by looking at
4667      where the RESX instruction was vectored.  */
4668   new_lp_nr = lookup_stmt_eh_lp (resx);
4669   new_region = get_eh_region_from_lp_number (new_lp_nr);
4670 
4671   /* If there's no destination region within the current function,
4672      redirection is trivial via removing the throwing statements from
4673      the EH region, removing the EH edges, and allowing the block
4674      to go unreachable.  */
4675   if (new_region == NULL)
4676     {
4677       gcc_assert (e_out == NULL);
4678       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4679 	if (e->flags & EDGE_EH)
4680 	  {
4681 	    gimple *stmt = last_stmt (e->src);
4682 	    remove_stmt_from_eh_lp (stmt);
4683 	    remove_edge (e);
4684 	  }
4685 	else
4686 	  ei_next (&ei);
4687       goto succeed;
4688     }
4689 
4690   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4691      to handle the abort and allow the blocks to go unreachable.  */
4692   if (new_region->type == ERT_MUST_NOT_THROW)
4693     {
4694       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4695 	if (e->flags & EDGE_EH)
4696 	  {
4697 	    gimple *stmt = last_stmt (e->src);
4698 	    remove_stmt_from_eh_lp (stmt);
4699 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4700 	    remove_edge (e);
4701 	  }
4702 	else
4703 	  ei_next (&ei);
4704       goto succeed;
4705     }
4706 
4707   /* Try to redirect the EH edges and merge the PHIs into the destination
4708      landing pad block.  If the merge succeeds, we'll already have redirected
4709      all the EH edges.  The handler itself will go unreachable if there were
4710      no normal edges.  */
4711   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4712     goto succeed;
4713 
4714   /* Finally, if all input edges are EH edges, then we can (potentially)
4715      reduce the number of transfers from the runtime by moving the landing
4716      pad from the original region to the new region.  This is a win when
4717      we remove the last CLEANUP region along a particular exception
4718      propagation path.  Since nothing changes except for the region with
4719      which the landing pad is associated, the PHI nodes do not need to be
4720      adjusted at all.  */
4721   if (!has_non_eh_pred)
4722     {
4723       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4724       if (dump_file && (dump_flags & TDF_DETAILS))
4725 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4726 		 lp->index, new_region->index);
4727 
4728       /* ??? The CFG didn't change, but we may have rendered the
4729 	 old EH region unreachable.  Trigger a cleanup there.  */
4730       return true;
4731     }
4732 
4733   return ret;
4734 
4735  succeed:
4736   if (dump_file && (dump_flags & TDF_DETAILS))
4737     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4738   remove_eh_landing_pad (lp);
4739   return true;
4740 }
4741 
4742 /* Do a post-order traversal of the EH region tree.  Examine each
4743    post_landing_pad block and see if we can eliminate it as empty.  */
4744 
4745 static bool
cleanup_all_empty_eh(void)4746 cleanup_all_empty_eh (void)
4747 {
4748   bool changed = false;
4749   eh_landing_pad lp;
4750   int i;
4751 
4752   /* The post-order traversal may lead to quadraticness in the redirection
4753      of incoming EH edges from inner LPs, so first try to walk the region
4754      tree from inner to outer LPs in order to eliminate these edges.  */
4755   for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i)
4756     {
4757       lp = (*cfun->eh->lp_array)[i];
4758       if (lp)
4759 	changed |= cleanup_empty_eh (lp);
4760     }
4761 
4762   /* Now do the post-order traversal to eliminate outer empty LPs.  */
4763   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4764     if (lp)
4765       changed |= cleanup_empty_eh (lp);
4766 
4767   return changed;
4768 }
4769 
4770 /* Perform cleanups and lowering of exception handling
4771     1) cleanups regions with handlers doing nothing are optimized out
4772     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4773     3) Info about regions that are containing instructions, and regions
4774        reachable via local EH edges is collected
4775     4) Eh tree is pruned for regions no longer necessary.
4776 
4777    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4778 	 Unify those that have the same failure decl and locus.
4779 */
4780 
4781 static unsigned int
execute_cleanup_eh_1(void)4782 execute_cleanup_eh_1 (void)
4783 {
4784   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4785      looking up unreachable landing pads.  */
4786   remove_unreachable_handlers ();
4787 
4788   /* Watch out for the region tree vanishing due to all unreachable.  */
4789   if (cfun->eh->region_tree)
4790     {
4791       bool changed = false;
4792 
4793       if (optimize)
4794 	changed |= unsplit_all_eh ();
4795       changed |= cleanup_all_empty_eh ();
4796 
4797       if (changed)
4798 	{
4799 	  free_dominance_info (CDI_DOMINATORS);
4800 	  free_dominance_info (CDI_POST_DOMINATORS);
4801 
4802           /* We delayed all basic block deletion, as we may have performed
4803 	     cleanups on EH edges while non-EH edges were still present.  */
4804 	  delete_unreachable_blocks ();
4805 
4806 	  /* We manipulated the landing pads.  Remove any region that no
4807 	     longer has a landing pad.  */
4808 	  remove_unreachable_handlers_no_lp ();
4809 
4810 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4811 	}
4812     }
4813 
4814   return 0;
4815 }
4816 
4817 namespace {
4818 
4819 const pass_data pass_data_cleanup_eh =
4820 {
4821   GIMPLE_PASS, /* type */
4822   "ehcleanup", /* name */
4823   OPTGROUP_NONE, /* optinfo_flags */
4824   TV_TREE_EH, /* tv_id */
4825   PROP_gimple_lcf, /* properties_required */
4826   0, /* properties_provided */
4827   0, /* properties_destroyed */
4828   0, /* todo_flags_start */
4829   0, /* todo_flags_finish */
4830 };
4831 
4832 class pass_cleanup_eh : public gimple_opt_pass
4833 {
4834 public:
pass_cleanup_eh(gcc::context * ctxt)4835   pass_cleanup_eh (gcc::context *ctxt)
4836     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4837   {}
4838 
4839   /* opt_pass methods: */
clone()4840   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate(function * fun)4841   virtual bool gate (function *fun)
4842     {
4843       return fun->eh != NULL && fun->eh->region_tree != NULL;
4844     }
4845 
4846   virtual unsigned int execute (function *);
4847 
4848 }; // class pass_cleanup_eh
4849 
4850 unsigned int
execute(function * fun)4851 pass_cleanup_eh::execute (function *fun)
4852 {
4853   int ret = execute_cleanup_eh_1 ();
4854 
4855   /* If the function no longer needs an EH personality routine
4856      clear it.  This exposes cross-language inlining opportunities
4857      and avoids references to a never defined personality routine.  */
4858   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4859       && function_needs_eh_personality (fun) != eh_personality_lang)
4860     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4861 
4862   return ret;
4863 }
4864 
4865 } // anon namespace
4866 
4867 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4868 make_pass_cleanup_eh (gcc::context *ctxt)
4869 {
4870   return new pass_cleanup_eh (ctxt);
4871 }
4872 
4873 /* Disable warnings about missing quoting in GCC diagnostics for
4874    the verification errors.  Their format strings don't follow GCC
4875    diagnostic conventions but are only used for debugging.  */
4876 #if __GNUC__ >= 10
4877 #  pragma GCC diagnostic push
4878 #  pragma GCC diagnostic ignored "-Wformat-diag"
4879 #endif
4880 
4881 /* Verify that BB containing STMT as the last statement, has precisely the
4882    edge that make_eh_edges would create.  */
4883 
4884 DEBUG_FUNCTION bool
verify_eh_edges(gimple * stmt)4885 verify_eh_edges (gimple *stmt)
4886 {
4887   basic_block bb = gimple_bb (stmt);
4888   eh_landing_pad lp = NULL;
4889   int lp_nr;
4890   edge_iterator ei;
4891   edge e, eh_edge;
4892 
4893   lp_nr = lookup_stmt_eh_lp (stmt);
4894   if (lp_nr > 0)
4895     lp = get_eh_landing_pad_from_number (lp_nr);
4896 
4897   eh_edge = NULL;
4898   FOR_EACH_EDGE (e, ei, bb->succs)
4899     {
4900       if (e->flags & EDGE_EH)
4901 	{
4902 	  if (eh_edge)
4903 	    {
4904 	      error ("BB %i has multiple EH edges", bb->index);
4905 	      return true;
4906 	    }
4907 	  else
4908 	    eh_edge = e;
4909 	}
4910     }
4911 
4912   if (lp == NULL)
4913     {
4914       if (eh_edge)
4915 	{
4916 	  error ("BB %i cannot throw but has an EH edge", bb->index);
4917 	  return true;
4918 	}
4919       return false;
4920     }
4921 
4922   if (!stmt_could_throw_p (cfun, stmt))
4923     {
4924       error ("BB %i last statement has incorrectly set lp", bb->index);
4925       return true;
4926     }
4927 
4928   if (eh_edge == NULL)
4929     {
4930       error ("BB %i is missing an EH edge", bb->index);
4931       return true;
4932     }
4933 
4934   if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
4935     {
4936       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4937       return true;
4938     }
4939 
4940   return false;
4941 }
4942 
4943 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4944 
4945 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(geh_dispatch * stmt)4946 verify_eh_dispatch_edge (geh_dispatch *stmt)
4947 {
4948   eh_region r;
4949   eh_catch c;
4950   basic_block src, dst;
4951   bool want_fallthru = true;
4952   edge_iterator ei;
4953   edge e, fall_edge;
4954 
4955   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4956   src = gimple_bb (stmt);
4957 
4958   FOR_EACH_EDGE (e, ei, src->succs)
4959     gcc_assert (e->aux == NULL);
4960 
4961   switch (r->type)
4962     {
4963     case ERT_TRY:
4964       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4965 	{
4966 	  dst = label_to_block (cfun, c->label);
4967 	  e = find_edge (src, dst);
4968 	  if (e == NULL)
4969 	    {
4970 	      error ("BB %i is missing an edge", src->index);
4971 	      return true;
4972 	    }
4973 	  e->aux = (void *)e;
4974 
4975 	  /* A catch-all handler doesn't have a fallthru.  */
4976 	  if (c->type_list == NULL)
4977 	    {
4978 	      want_fallthru = false;
4979 	      break;
4980 	    }
4981 	}
4982       break;
4983 
4984     case ERT_ALLOWED_EXCEPTIONS:
4985       dst = label_to_block (cfun, r->u.allowed.label);
4986       e = find_edge (src, dst);
4987       if (e == NULL)
4988 	{
4989 	  error ("BB %i is missing an edge", src->index);
4990 	  return true;
4991 	}
4992       e->aux = (void *)e;
4993       break;
4994 
4995     default:
4996       gcc_unreachable ();
4997     }
4998 
4999   fall_edge = NULL;
5000   FOR_EACH_EDGE (e, ei, src->succs)
5001     {
5002       if (e->flags & EDGE_FALLTHRU)
5003 	{
5004 	  if (fall_edge != NULL)
5005 	    {
5006 	      error ("BB %i too many fallthru edges", src->index);
5007 	      return true;
5008 	    }
5009 	  fall_edge = e;
5010 	}
5011       else if (e->aux)
5012 	e->aux = NULL;
5013       else
5014 	{
5015 	  error ("BB %i has incorrect edge", src->index);
5016 	  return true;
5017 	}
5018     }
5019   if ((fall_edge != NULL) ^ want_fallthru)
5020     {
5021       error ("BB %i has incorrect fallthru edge", src->index);
5022       return true;
5023     }
5024 
5025   return false;
5026 }
5027 
5028 #if __GNUC__ >= 10
5029 #  pragma GCC diagnostic pop
5030 #endif
5031