1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
47 #include "attribs.h"
48 #include "asan.h"
49 #include "gimplify.h"
50 
51 /* In some instances a tree and a gimple need to be stored in a same table,
52    i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree *tp; tree t; gimple *g;} treemple;
54 
55 /* Misc functions used in this file.  */
56 
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58    Really this means any statement that could_throw_p.  We could
59    stuff this information into the stmt_ann data structure, but:
60 
61    (1) We absolutely rely on this information being kept until
62    we get to rtl.  Once we're done with lowering here, if we lose
63    the information there's no way to recover it!
64 
65    (2) There are many more statements that *cannot* throw as
66    compared to those that can.  We should be saving some amount
67    of space by only allocating memory for those that can throw.  */
68 
69 /* Add statement T in function IFUN to landing pad NUM.  */
70 
71 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple * t,int num)72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73 {
74   gcc_assert (num != 0);
75 
76   if (!get_eh_throw_stmt_table (ifun))
77     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78 
79   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80 }
81 
82 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
83 
84 void
add_stmt_to_eh_lp(gimple * t,int num)85 add_stmt_to_eh_lp (gimple *t, int num)
86 {
87   add_stmt_to_eh_lp_fn (cfun, t, num);
88 }
89 
90 /* Add statement T to the single EH landing pad in REGION.  */
91 
92 static void
record_stmt_eh_region(eh_region region,gimple * t)93 record_stmt_eh_region (eh_region region, gimple *t)
94 {
95   if (region == NULL)
96     return;
97   if (region->type == ERT_MUST_NOT_THROW)
98     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99   else
100     {
101       eh_landing_pad lp = region->landing_pads;
102       if (lp == NULL)
103 	lp = gen_eh_landing_pad (region);
104       else
105 	gcc_assert (lp->next_lp == NULL);
106       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107     }
108 }
109 
110 
111 /* Remove statement T in function IFUN from its EH landing pad.  */
112 
113 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple * t)114 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115 {
116   if (!get_eh_throw_stmt_table (ifun))
117     return false;
118 
119   if (!get_eh_throw_stmt_table (ifun)->get (t))
120     return false;
121 
122   get_eh_throw_stmt_table (ifun)->remove (t);
123       return true;
124 }
125 
126 
127 /* Remove statement T in the current function (cfun) from its
128    EH landing pad.  */
129 
130 bool
remove_stmt_from_eh_lp(gimple * t)131 remove_stmt_from_eh_lp (gimple *t)
132 {
133   return remove_stmt_from_eh_lp_fn (cfun, t);
134 }
135 
136 /* Determine if statement T is inside an EH region in function IFUN.
137    Positive numbers indicate a landing pad index; negative numbers
138    indicate a MUST_NOT_THROW region index; zero indicates that the
139    statement is not recorded in the region table.  */
140 
141 int
lookup_stmt_eh_lp_fn(struct function * ifun,const gimple * t)142 lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t)
143 {
144   if (ifun->eh->throw_stmt_table == NULL)
145     return 0;
146 
147   int *lp_nr = ifun->eh->throw_stmt_table->get (const_cast <gimple *> (t));
148   return lp_nr ? *lp_nr : 0;
149 }
150 
151 /* Likewise, but always use the current function.  */
152 
153 int
lookup_stmt_eh_lp(const gimple * t)154 lookup_stmt_eh_lp (const gimple *t)
155 {
156   /* We can get called from initialized data when -fnon-call-exceptions
157      is on; prevent crash.  */
158   if (!cfun)
159     return 0;
160   return lookup_stmt_eh_lp_fn (cfun, t);
161 }
162 
163 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
164    nodes and LABEL_DECL nodes.  We will use this during the second phase to
165    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
166 
167 struct finally_tree_node
168 {
169   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
170      when deciding whether a GOTO to a certain LABEL_DECL (which is a
171      tree) leaves the TRY block, its necessary to record a tree in
172      this field.  Thus a treemple is used. */
173   treemple child;
174   gtry *parent;
175 };
176 
177 /* Hashtable helpers.  */
178 
179 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180 {
181   static inline hashval_t hash (const finally_tree_node *);
182   static inline bool equal (const finally_tree_node *,
183 			    const finally_tree_node *);
184 };
185 
186 inline hashval_t
hash(const finally_tree_node * v)187 finally_tree_hasher::hash (const finally_tree_node *v)
188 {
189   return (intptr_t)v->child.t >> 4;
190 }
191 
192 inline bool
equal(const finally_tree_node * v,const finally_tree_node * c)193 finally_tree_hasher::equal (const finally_tree_node *v,
194 			    const finally_tree_node *c)
195 {
196   return v->child.t == c->child.t;
197 }
198 
199 /* Note that this table is *not* marked GTY.  It is short-lived.  */
200 static hash_table<finally_tree_hasher> *finally_tree;
201 
202 static void
record_in_finally_tree(treemple child,gtry * parent)203 record_in_finally_tree (treemple child, gtry *parent)
204 {
205   struct finally_tree_node *n;
206   finally_tree_node **slot;
207 
208   n = XNEW (struct finally_tree_node);
209   n->child = child;
210   n->parent = parent;
211 
212   slot = finally_tree->find_slot (n, INSERT);
213   gcc_assert (!*slot);
214   *slot = n;
215 }
216 
217 static void
218 collect_finally_tree (gimple *stmt, gtry *region);
219 
220 /* Go through the gimple sequence.  Works with collect_finally_tree to
221    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222 
223 static void
collect_finally_tree_1(gimple_seq seq,gtry * region)224 collect_finally_tree_1 (gimple_seq seq, gtry *region)
225 {
226   gimple_stmt_iterator gsi;
227 
228   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229     collect_finally_tree (gsi_stmt (gsi), region);
230 }
231 
232 static void
collect_finally_tree(gimple * stmt,gtry * region)233 collect_finally_tree (gimple *stmt, gtry *region)
234 {
235   treemple temp;
236 
237   switch (gimple_code (stmt))
238     {
239     case GIMPLE_LABEL:
240       temp.t = gimple_label_label (as_a <glabel *> (stmt));
241       record_in_finally_tree (temp, region);
242       break;
243 
244     case GIMPLE_TRY:
245       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246         {
247           temp.g = stmt;
248           record_in_finally_tree (temp, region);
249           collect_finally_tree_1 (gimple_try_eval (stmt),
250 				  as_a <gtry *> (stmt));
251 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252         }
253       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254         {
255           collect_finally_tree_1 (gimple_try_eval (stmt), region);
256           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257         }
258       break;
259 
260     case GIMPLE_CATCH:
261       collect_finally_tree_1 (gimple_catch_handler (
262 				 as_a <gcatch *> (stmt)),
263 			      region);
264       break;
265 
266     case GIMPLE_EH_FILTER:
267       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268       break;
269 
270     case GIMPLE_EH_ELSE:
271       {
272 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275       }
276       break;
277 
278     default:
279       /* A type, a decl, or some kind of statement that we're not
280 	 interested in.  Don't walk them.  */
281       break;
282     }
283 }
284 
285 
286 /* Use the finally tree to determine if a jump from START to TARGET
287    would leave the try_finally node that START lives in.  */
288 
289 static bool
outside_finally_tree(treemple start,gimple * target)290 outside_finally_tree (treemple start, gimple *target)
291 {
292   struct finally_tree_node n, *p;
293 
294   do
295     {
296       n.child = start;
297       p = finally_tree->find (&n);
298       if (!p)
299 	return true;
300       start.g = p->parent;
301     }
302   while (start.g != target);
303 
304   return false;
305 }
306 
307 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
308    nodes into a set of gotos, magic labels, and eh regions.
309    The eh region creation is straight-forward, but frobbing all the gotos
310    and such into shape isn't.  */
311 
312 /* The sequence into which we record all EH stuff.  This will be
313    placed at the end of the function when we're all done.  */
314 static gimple_seq eh_seq;
315 
316 /* Record whether an EH region contains something that can throw,
317    indexed by EH region number.  */
318 static bitmap eh_region_may_contain_throw_map;
319 
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322    The idea is to record a gimple statement for everything except for
323    the conditionals, which get their labels recorded. Since labels are
324    of type 'tree', we need this node to store both gimple and tree
325    objects.  REPL_STMT is the sequence used to replace the goto/return
326    statement.  CONT_STMT is used to store the statement that allows
327    the return/goto to jump to the original destination. */
328 
329 struct goto_queue_node
330 {
331   treemple stmt;
332   location_t location;
333   gimple_seq repl_stmt;
334   gimple *cont_stmt;
335   int index;
336   /* This is used when index >= 0 to indicate that stmt is a label (as
337      opposed to a goto stmt).  */
338   int is_label;
339 };
340 
341 /* State of the world while lowering.  */
342 
343 struct leh_state
344 {
345   /* What's "current" while constructing the eh region tree.  These
346      correspond to variables of the same name in cfun->eh, which we
347      don't have easy access to.  */
348   eh_region cur_region;
349 
350   /* What's "current" for the purposes of __builtin_eh_pointer.  For
351      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
352      the associated ALLOWED_EXCEPTIONS, etc.  */
353   eh_region ehp_region;
354 
355   /* Processing of TRY_FINALLY requires a bit more state.  This is
356      split out into a separate structure so that we don't have to
357      copy so much when processing other nodes.  */
358   struct leh_tf_state *tf;
359 
360   /* Outer non-clean up region.  */
361   eh_region outer_non_cleanup;
362 };
363 
364 struct leh_tf_state
365 {
366   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
367      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
368      this so that outside_finally_tree can reliably reference the tree used
369      in the collect_finally_tree data structures.  */
370   gtry *try_finally_expr;
371   gtry *top_p;
372 
373   /* While lowering a top_p usually it is expanded into multiple statements,
374      thus we need the following field to store them. */
375   gimple_seq top_p_seq;
376 
377   /* The state outside this try_finally node.  */
378   struct leh_state *outer;
379 
380   /* The exception region created for it.  */
381   eh_region region;
382 
383   /* The goto queue.  */
384   struct goto_queue_node *goto_queue;
385   size_t goto_queue_size;
386   size_t goto_queue_active;
387 
388   /* Pointer map to help in searching goto_queue when it is large.  */
389   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
390 
391   /* The set of unique labels seen as entries in the goto queue.  */
392   vec<tree> dest_array;
393 
394   /* A label to be added at the end of the completed transformed
395      sequence.  It will be set if may_fallthru was true *at one time*,
396      though subsequent transformations may have cleared that flag.  */
397   tree fallthru_label;
398 
399   /* True if it is possible to fall out the bottom of the try block.
400      Cleared if the fallthru is converted to a goto.  */
401   bool may_fallthru;
402 
403   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
404   bool may_return;
405 
406   /* True if the finally block can receive an exception edge.
407      Cleared if the exception case is handled by code duplication.  */
408   bool may_throw;
409 };
410 
411 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
412 
413 /* Search for STMT in the goto queue.  Return the replacement,
414    or null if the statement isn't in the queue.  */
415 
416 #define LARGE_GOTO_QUEUE 20
417 
418 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
419 
420 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)421 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
422 {
423   unsigned int i;
424 
425   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
426     {
427       for (i = 0; i < tf->goto_queue_active; i++)
428 	if ( tf->goto_queue[i].stmt.g == stmt.g)
429 	  return tf->goto_queue[i].repl_stmt;
430       return NULL;
431     }
432 
433   /* If we have a large number of entries in the goto_queue, create a
434      pointer map and use that for searching.  */
435 
436   if (!tf->goto_queue_map)
437     {
438       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
439       for (i = 0; i < tf->goto_queue_active; i++)
440 	{
441 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
442 						  &tf->goto_queue[i]);
443 	  gcc_assert (!existed);
444 	}
445     }
446 
447   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
448   if (slot != NULL)
449     return ((*slot)->repl_stmt);
450 
451   return NULL;
452 }
453 
454 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
455    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
456    then we can just splat it in, otherwise we add the new stmts immediately
457    after the GIMPLE_COND and redirect.  */
458 
459 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)460 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
461 				gimple_stmt_iterator *gsi)
462 {
463   tree label;
464   gimple_seq new_seq;
465   treemple temp;
466   location_t loc = gimple_location (gsi_stmt (*gsi));
467 
468   temp.tp = tp;
469   new_seq = find_goto_replacement (tf, temp);
470   if (!new_seq)
471     return;
472 
473   if (gimple_seq_singleton_p (new_seq)
474       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
475     {
476       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
477       return;
478     }
479 
480   label = create_artificial_label (loc);
481   /* Set the new label for the GIMPLE_COND */
482   *tp = label;
483 
484   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
485   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
486 }
487 
488 /* The real work of replace_goto_queue.  Returns with TSI updated to
489    point to the next statement.  */
490 
491 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
492 
493 static void
replace_goto_queue_1(gimple * stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)494 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
495 		      gimple_stmt_iterator *gsi)
496 {
497   gimple_seq seq;
498   treemple temp;
499   temp.g = NULL;
500 
501   switch (gimple_code (stmt))
502     {
503     case GIMPLE_GOTO:
504     case GIMPLE_RETURN:
505       temp.g = stmt;
506       seq = find_goto_replacement (tf, temp);
507       if (seq)
508 	{
509 	  gimple_stmt_iterator i;
510 	  seq = gimple_seq_copy (seq);
511 	  for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
512 	    gimple_set_location (gsi_stmt (i), gimple_location (stmt));
513 	  gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
514 	  gsi_remove (gsi, false);
515 	  return;
516 	}
517       break;
518 
519     case GIMPLE_COND:
520       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
521       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
522       break;
523 
524     case GIMPLE_TRY:
525       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
526       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
527       break;
528     case GIMPLE_CATCH:
529       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 				      as_a <gcatch *> (stmt)),
531 				    tf);
532       break;
533     case GIMPLE_EH_FILTER:
534       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
535       break;
536     case GIMPLE_EH_ELSE:
537       {
538 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
539 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
540 				      tf);
541 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
542 				      tf);
543       }
544       break;
545 
546     default:
547       /* These won't have gotos in them.  */
548       break;
549     }
550 
551   gsi_next (gsi);
552 }
553 
554 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
555 
556 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)557 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
558 {
559   gimple_stmt_iterator gsi = gsi_start (*seq);
560 
561   while (!gsi_end_p (gsi))
562     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
563 }
564 
565 /* Replace all goto queue members.  */
566 
567 static void
replace_goto_queue(struct leh_tf_state * tf)568 replace_goto_queue (struct leh_tf_state *tf)
569 {
570   if (tf->goto_queue_active == 0)
571     return;
572   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
573   replace_goto_queue_stmt_list (&eh_seq, tf);
574 }
575 
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
578    a gimple return. */
579 
580 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)581 record_in_goto_queue (struct leh_tf_state *tf,
582                       treemple new_stmt,
583                       int index,
584                       bool is_label,
585 		      location_t location)
586 {
587   size_t active, size;
588   struct goto_queue_node *q;
589 
590   gcc_assert (!tf->goto_queue_map);
591 
592   active = tf->goto_queue_active;
593   size = tf->goto_queue_size;
594   if (active >= size)
595     {
596       size = (size ? size * 2 : 32);
597       tf->goto_queue_size = size;
598       tf->goto_queue
599          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
600     }
601 
602   q = &tf->goto_queue[active];
603   tf->goto_queue_active = active + 1;
604 
605   memset (q, 0, sizeof (*q));
606   q->stmt = new_stmt;
607   q->index = index;
608   q->location = location;
609   q->is_label = is_label;
610 }
611 
612 /* Record the LABEL label in the goto queue contained in TF.
613    TF is not null.  */
614 
615 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)616 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
617 			    location_t location)
618 {
619   int index;
620   treemple temp, new_stmt;
621 
622   if (!label)
623     return;
624 
625   /* Computed and non-local gotos do not get processed.  Given
626      their nature we can neither tell whether we've escaped the
627      finally block nor redirect them if we knew.  */
628   if (TREE_CODE (label) != LABEL_DECL)
629     return;
630 
631   /* No need to record gotos that don't leave the try block.  */
632   temp.t = label;
633   if (!outside_finally_tree (temp, tf->try_finally_expr))
634     return;
635 
636   if (! tf->dest_array.exists ())
637     {
638       tf->dest_array.create (10);
639       tf->dest_array.quick_push (label);
640       index = 0;
641     }
642   else
643     {
644       int n = tf->dest_array.length ();
645       for (index = 0; index < n; ++index)
646         if (tf->dest_array[index] == label)
647           break;
648       if (index == n)
649         tf->dest_array.safe_push (label);
650     }
651 
652   /* In the case of a GOTO we want to record the destination label,
653      since with a GIMPLE_COND we have an easy access to the then/else
654      labels. */
655   new_stmt = stmt;
656   record_in_goto_queue (tf, new_stmt, index, true, location);
657 }
658 
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660    node, and if so record that fact in the goto queue associated with that
661    try_finally node.  */
662 
663 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple * stmt)664 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
665 {
666   struct leh_tf_state *tf = state->tf;
667   treemple new_stmt;
668 
669   if (!tf)
670     return;
671 
672   switch (gimple_code (stmt))
673     {
674     case GIMPLE_COND:
675       {
676 	gcond *cond_stmt = as_a <gcond *> (stmt);
677 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
678 	record_in_goto_queue_label (tf, new_stmt,
679 				    gimple_cond_true_label (cond_stmt),
680 				    EXPR_LOCATION (*new_stmt.tp));
681 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
682 	record_in_goto_queue_label (tf, new_stmt,
683 				    gimple_cond_false_label (cond_stmt),
684 				    EXPR_LOCATION (*new_stmt.tp));
685       }
686       break;
687     case GIMPLE_GOTO:
688       new_stmt.g = stmt;
689       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
690 				  gimple_location (stmt));
691       break;
692 
693     case GIMPLE_RETURN:
694       tf->may_return = true;
695       new_stmt.g = stmt;
696       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
697       break;
698 
699     default:
700       gcc_unreachable ();
701     }
702 }
703 
704 
705 #if CHECKING_P
706 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
707    was in fact structured, and we've not yet done jump threading, then none
708    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
709 
710 static void
verify_norecord_switch_expr(struct leh_state * state,gswitch * switch_expr)711 verify_norecord_switch_expr (struct leh_state *state,
712 			     gswitch *switch_expr)
713 {
714   struct leh_tf_state *tf = state->tf;
715   size_t i, n;
716 
717   if (!tf)
718     return;
719 
720   n = gimple_switch_num_labels (switch_expr);
721 
722   for (i = 0; i < n; ++i)
723     {
724       treemple temp;
725       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
726       temp.t = lab;
727       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
728     }
729 }
730 #else
731 #define verify_norecord_switch_expr(state, switch_expr)
732 #endif
733 
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
735    non-null, insert it before the new branch.  */
736 
737 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)738 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
739 {
740   gimple *x;
741 
742   /* In the case of a return, the queue node must be a gimple statement.  */
743   gcc_assert (!q->is_label);
744 
745   /* Note that the return value may have already been computed, e.g.,
746 
747 	int x;
748 	int foo (void)
749 	{
750 	  x = 0;
751 	  try {
752 	    return x;
753 	  } finally {
754 	    x++;
755 	  }
756 	}
757 
758      should return 0, not 1.  We don't have to do anything to make
759      this happens because the return value has been placed in the
760      RESULT_DECL already.  */
761 
762   q->cont_stmt = q->stmt.g;
763 
764   if (mod)
765     gimple_seq_add_seq (&q->repl_stmt, mod);
766 
767   x = gimple_build_goto (finlab);
768   gimple_set_location (x, q->location);
769   gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771 
772 /* Similar, but easier, for GIMPLE_GOTO.  */
773 
774 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)775 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
776 		     struct leh_tf_state *tf)
777 {
778   ggoto *x;
779 
780   gcc_assert (q->is_label);
781 
782   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
783 
784   if (mod)
785     gimple_seq_add_seq (&q->repl_stmt, mod);
786 
787   x = gimple_build_goto (finlab);
788   gimple_set_location (x, q->location);
789   gimple_seq_add_stmt (&q->repl_stmt, x);
790 }
791 
792 /* Emit a standard landing pad sequence into SEQ for REGION.  */
793 
794 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)795 emit_post_landing_pad (gimple_seq *seq, eh_region region)
796 {
797   eh_landing_pad lp = region->landing_pads;
798   glabel *x;
799 
800   if (lp == NULL)
801     lp = gen_eh_landing_pad (region);
802 
803   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
804   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
805 
806   x = gimple_build_label (lp->post_landing_pad);
807   gimple_seq_add_stmt (seq, x);
808 }
809 
810 /* Emit a RESX statement into SEQ for REGION.  */
811 
812 static void
emit_resx(gimple_seq * seq,eh_region region)813 emit_resx (gimple_seq *seq, eh_region region)
814 {
815   gresx *x = gimple_build_resx (region->index);
816   gimple_seq_add_stmt (seq, x);
817   if (region->outer)
818     record_stmt_eh_region (region->outer, x);
819 }
820 
821 /* Note that the current EH region may contain a throw, or a
822    call to a function which itself may contain a throw.  */
823 
824 static void
note_eh_region_may_contain_throw(eh_region region)825 note_eh_region_may_contain_throw (eh_region region)
826 {
827   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
828     {
829       if (region->type == ERT_MUST_NOT_THROW)
830 	break;
831       region = region->outer;
832       if (region == NULL)
833 	break;
834     }
835 }
836 
837 /* Check if REGION has been marked as containing a throw.  If REGION is
838    NULL, this predicate is false.  */
839 
840 static inline bool
eh_region_may_contain_throw(eh_region r)841 eh_region_may_contain_throw (eh_region r)
842 {
843   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 }
845 
846 /* We want to transform
847 	try { body; } catch { stuff; }
848    to
849 	normal_sequence:
850 	  body;
851 	  over:
852 	eh_sequence:
853 	  landing_pad:
854 	  stuff;
855 	  goto over;
856 
857    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
858    should be placed before the second operand, or NULL.  OVER is
859    an existing label that should be put at the exit, or NULL.  */
860 
861 static gimple_seq
frob_into_branch_around(gtry * tp,eh_region region,tree over)862 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 {
864   gimple *x;
865   gimple_seq cleanup, result;
866   location_t loc = gimple_location (tp);
867 
868   cleanup = gimple_try_cleanup (tp);
869   result = gimple_try_eval (tp);
870 
871   if (region)
872     emit_post_landing_pad (&eh_seq, region);
873 
874   if (gimple_seq_may_fallthru (cleanup))
875     {
876       if (!over)
877 	over = create_artificial_label (loc);
878       x = gimple_build_goto (over);
879       gimple_set_location (x, loc);
880       gimple_seq_add_stmt (&cleanup, x);
881     }
882   gimple_seq_add_seq (&eh_seq, cleanup);
883 
884   if (over)
885     {
886       x = gimple_build_label (over);
887       gimple_seq_add_stmt (&result, x);
888     }
889   return result;
890 }
891 
892 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
893    Make sure to record all new labels found.  */
894 
895 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)896 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
897 			     location_t loc)
898 {
899   gtry *region = NULL;
900   gimple_seq new_seq;
901   gimple_stmt_iterator gsi;
902 
903   new_seq = copy_gimple_seq_and_replace_locals (seq);
904 
905   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
906     {
907       gimple *stmt = gsi_stmt (gsi);
908       /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
909 	 it on the EH paths.  When it is not eliminated, make it transparent in
910 	 the debug info.  */
911       if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
912 	gimple_set_location (stmt, UNKNOWN_LOCATION);
913       else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
914 	{
915 	  tree block = gimple_block (stmt);
916 	  gimple_set_location (stmt, loc);
917 	  gimple_set_block (stmt, block);
918 	}
919     }
920 
921   if (outer_state->tf)
922     region = outer_state->tf->try_finally_expr;
923   collect_finally_tree_1 (new_seq, region);
924 
925   return new_seq;
926 }
927 
928 /* A subroutine of lower_try_finally.  Create a fallthru label for
929    the given try_finally state.  The only tricky bit here is that
930    we have to make sure to record the label in our outer context.  */
931 
932 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)933 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
934 {
935   tree label = tf->fallthru_label;
936   treemple temp;
937 
938   if (!label)
939     {
940       label = create_artificial_label (gimple_location (tf->try_finally_expr));
941       tf->fallthru_label = label;
942       if (tf->outer->tf)
943         {
944           temp.t = label;
945           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
946         }
947     }
948   return label;
949 }
950 
951 /* A subroutine of lower_try_finally.  If FINALLY consits of a
952    GIMPLE_EH_ELSE node, return it.  */
953 
954 static inline geh_else *
get_eh_else(gimple_seq finally)955 get_eh_else (gimple_seq finally)
956 {
957   gimple *x = gimple_seq_first_stmt (finally);
958   if (gimple_code (x) == GIMPLE_EH_ELSE)
959     {
960       gcc_assert (gimple_seq_singleton_p (finally));
961       return as_a <geh_else *> (x);
962     }
963   return NULL;
964 }
965 
966 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
967    langhook returns non-null, then the language requires that the exception
968    path out of a try_finally be treated specially.  To wit: the code within
969    the finally block may not itself throw an exception.  We have two choices
970    here. First we can duplicate the finally block and wrap it in a
971    must_not_throw region.  Second, we can generate code like
972 
973 	try {
974 	  finally_block;
975 	} catch {
976 	  if (fintmp == eh_edge)
977 	    protect_cleanup_actions;
978 	}
979 
980    where "fintmp" is the temporary used in the switch statement generation
981    alternative considered below.  For the nonce, we always choose the first
982    option.
983 
984    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
985 
986 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)987 honor_protect_cleanup_actions (struct leh_state *outer_state,
988 			       struct leh_state *this_state,
989 			       struct leh_tf_state *tf)
990 {
991   gimple_seq finally = gimple_try_cleanup (tf->top_p);
992 
993   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
994      It does need to be handled here, so as to separate the (different)
995      EH path from the normal path.  But we should not attempt to wrap
996      it with a must-not-throw node (which indeed gets in the way).  */
997   if (geh_else *eh_else = get_eh_else (finally))
998     {
999       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1000       finally = gimple_eh_else_e_body (eh_else);
1001 
1002       /* Let the ELSE see the exception that's being processed, but
1003 	 since the cleanup is outside the try block, process it with
1004 	 outer_state, otherwise it may be used as a cleanup for
1005 	 itself, and Bad Things (TM) ensue.  */
1006       eh_region save_ehp = outer_state->ehp_region;
1007       outer_state->ehp_region = this_state->cur_region;
1008       lower_eh_constructs_1 (outer_state, &finally);
1009       outer_state->ehp_region = save_ehp;
1010     }
1011   else
1012     {
1013       /* First check for nothing to do.  */
1014       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1015 	return;
1016       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1017       if (actions == NULL)
1018 	return;
1019 
1020       if (this_state)
1021 	finally = lower_try_finally_dup_block (finally, outer_state,
1022 	  gimple_location (tf->try_finally_expr));
1023 
1024       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1025 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1026 	 to be in an enclosing scope, but needs to be implemented at this level
1027 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1028 	 cp/decl.c).  Since it's logically at an outer level, we should call
1029 	 terminate before we get to it, so strip it away before adding the
1030 	 MUST_NOT_THROW filter.  */
1031       gimple_stmt_iterator gsi = gsi_start (finally);
1032       gimple *x = gsi_stmt (gsi);
1033       if (gimple_code (x) == GIMPLE_TRY
1034 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1035 	  && gimple_try_catch_is_cleanup (x))
1036 	{
1037 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1038 	  gsi_remove (&gsi, false);
1039 	}
1040 
1041       /* Wrap the block with protect_cleanup_actions as the action.  */
1042       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1043       gtry *try_stmt = gimple_build_try (finally,
1044 					 gimple_seq_alloc_with_stmt (eh_mnt),
1045 					 GIMPLE_TRY_CATCH);
1046       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1047     }
1048 
1049   /* Drop all of this into the exception sequence.  */
1050   emit_post_landing_pad (&eh_seq, tf->region);
1051   gimple_seq_add_seq (&eh_seq, finally);
1052   if (gimple_seq_may_fallthru (finally))
1053     emit_resx (&eh_seq, tf->region);
1054 
1055   /* Having now been handled, EH isn't to be considered with
1056      the rest of the outgoing edges.  */
1057   tf->may_throw = false;
1058 }
1059 
1060 /* A subroutine of lower_try_finally.  We have determined that there is
1061    no fallthru edge out of the finally block.  This means that there is
1062    no outgoing edge corresponding to any incoming edge.  Restructure the
1063    try_finally node for this special case.  */
1064 
1065 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1066 lower_try_finally_nofallthru (struct leh_state *state,
1067 			      struct leh_tf_state *tf)
1068 {
1069   tree lab;
1070   gimple *x;
1071   geh_else *eh_else;
1072   gimple_seq finally;
1073   struct goto_queue_node *q, *qe;
1074 
1075   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1076 
1077   /* We expect that tf->top_p is a GIMPLE_TRY. */
1078   finally = gimple_try_cleanup (tf->top_p);
1079   tf->top_p_seq = gimple_try_eval (tf->top_p);
1080 
1081   x = gimple_build_label (lab);
1082   gimple_seq_add_stmt (&tf->top_p_seq, x);
1083 
1084   q = tf->goto_queue;
1085   qe = q + tf->goto_queue_active;
1086   for (; q < qe; ++q)
1087     if (q->index < 0)
1088       do_return_redirection (q, lab, NULL);
1089     else
1090       do_goto_redirection (q, lab, NULL, tf);
1091 
1092   replace_goto_queue (tf);
1093 
1094   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1095   eh_else = get_eh_else (finally);
1096   if (eh_else)
1097     {
1098       finally = gimple_eh_else_n_body (eh_else);
1099       lower_eh_constructs_1 (state, &finally);
1100       gimple_seq_add_seq (&tf->top_p_seq, finally);
1101 
1102       if (tf->may_throw)
1103 	{
1104 	  finally = gimple_eh_else_e_body (eh_else);
1105 	  lower_eh_constructs_1 (state, &finally);
1106 
1107 	  emit_post_landing_pad (&eh_seq, tf->region);
1108 	  gimple_seq_add_seq (&eh_seq, finally);
1109 	}
1110     }
1111   else
1112     {
1113       lower_eh_constructs_1 (state, &finally);
1114       gimple_seq_add_seq (&tf->top_p_seq, finally);
1115 
1116       if (tf->may_throw)
1117 	{
1118 	  emit_post_landing_pad (&eh_seq, tf->region);
1119 
1120 	  x = gimple_build_goto (lab);
1121 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1122 	  gimple_seq_add_stmt (&eh_seq, x);
1123 	}
1124     }
1125 }
1126 
1127 /* A subroutine of lower_try_finally.  We have determined that there is
1128    exactly one destination of the finally block.  Restructure the
1129    try_finally node for this special case.  */
1130 
1131 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1132 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1133 {
1134   struct goto_queue_node *q, *qe;
1135   geh_else *eh_else;
1136   glabel *label_stmt;
1137   gimple *x;
1138   gimple_seq finally;
1139   gimple_stmt_iterator gsi;
1140   tree finally_label;
1141   location_t loc = gimple_location (tf->try_finally_expr);
1142 
1143   finally = gimple_try_cleanup (tf->top_p);
1144   tf->top_p_seq = gimple_try_eval (tf->top_p);
1145 
1146   /* Since there's only one destination, and the destination edge can only
1147      either be EH or non-EH, that implies that all of our incoming edges
1148      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1149   eh_else = get_eh_else (finally);
1150   if (eh_else)
1151     {
1152       if (tf->may_throw)
1153 	finally = gimple_eh_else_e_body (eh_else);
1154       else
1155 	finally = gimple_eh_else_n_body (eh_else);
1156     }
1157 
1158   lower_eh_constructs_1 (state, &finally);
1159 
1160   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1161     {
1162       gimple *stmt = gsi_stmt (gsi);
1163       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1164 	{
1165 	  tree block = gimple_block (stmt);
1166 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1167 	  gimple_set_block (stmt, block);
1168 	}
1169     }
1170 
1171   if (tf->may_throw)
1172     {
1173       /* Only reachable via the exception edge.  Add the given label to
1174          the head of the FINALLY block.  Append a RESX at the end.  */
1175       emit_post_landing_pad (&eh_seq, tf->region);
1176       gimple_seq_add_seq (&eh_seq, finally);
1177       emit_resx (&eh_seq, tf->region);
1178       return;
1179     }
1180 
1181   if (tf->may_fallthru)
1182     {
1183       /* Only reachable via the fallthru edge.  Do nothing but let
1184 	 the two blocks run together; we'll fall out the bottom.  */
1185       gimple_seq_add_seq (&tf->top_p_seq, finally);
1186       return;
1187     }
1188 
1189   finally_label = create_artificial_label (loc);
1190   label_stmt = gimple_build_label (finally_label);
1191   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1192 
1193   gimple_seq_add_seq (&tf->top_p_seq, finally);
1194 
1195   q = tf->goto_queue;
1196   qe = q + tf->goto_queue_active;
1197 
1198   if (tf->may_return)
1199     {
1200       /* Reachable by return expressions only.  Redirect them.  */
1201       for (; q < qe; ++q)
1202 	do_return_redirection (q, finally_label, NULL);
1203       replace_goto_queue (tf);
1204     }
1205   else
1206     {
1207       /* Reachable by goto expressions only.  Redirect them.  */
1208       for (; q < qe; ++q)
1209 	do_goto_redirection (q, finally_label, NULL, tf);
1210       replace_goto_queue (tf);
1211 
1212       if (tf->dest_array[0] == tf->fallthru_label)
1213 	{
1214 	  /* Reachable by goto to fallthru label only.  Redirect it
1215 	     to the new label (already created, sadly), and do not
1216 	     emit the final branch out, or the fallthru label.  */
1217 	  tf->fallthru_label = NULL;
1218 	  return;
1219 	}
1220     }
1221 
1222   /* Place the original return/goto to the original destination
1223      immediately after the finally block. */
1224   x = tf->goto_queue[0].cont_stmt;
1225   gimple_seq_add_stmt (&tf->top_p_seq, x);
1226   maybe_record_in_goto_queue (state, x);
1227 }
1228 
1229 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1230    and outgoing from the finally block.  Implement this by duplicating the
1231    finally block for every destination.  */
1232 
1233 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1234 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1235 {
1236   gimple_seq finally;
1237   gimple_seq new_stmt;
1238   gimple_seq seq;
1239   gimple *x;
1240   geh_else *eh_else;
1241   tree tmp;
1242   location_t tf_loc = gimple_location (tf->try_finally_expr);
1243 
1244   finally = gimple_try_cleanup (tf->top_p);
1245 
1246   /* Notice EH_ELSE, and simplify some of the remaining code
1247      by considering FINALLY to be the normal return path only.  */
1248   eh_else = get_eh_else (finally);
1249   if (eh_else)
1250     finally = gimple_eh_else_n_body (eh_else);
1251 
1252   tf->top_p_seq = gimple_try_eval (tf->top_p);
1253   new_stmt = NULL;
1254 
1255   if (tf->may_fallthru)
1256     {
1257       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1258       lower_eh_constructs_1 (state, &seq);
1259       gimple_seq_add_seq (&new_stmt, seq);
1260 
1261       tmp = lower_try_finally_fallthru_label (tf);
1262       x = gimple_build_goto (tmp);
1263       gimple_set_location (x, tf_loc);
1264       gimple_seq_add_stmt (&new_stmt, x);
1265     }
1266 
1267   if (tf->may_throw)
1268     {
1269       /* We don't need to copy the EH path of EH_ELSE,
1270 	 since it is only emitted once.  */
1271       if (eh_else)
1272 	seq = gimple_eh_else_e_body (eh_else);
1273       else
1274 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1275       lower_eh_constructs_1 (state, &seq);
1276 
1277       emit_post_landing_pad (&eh_seq, tf->region);
1278       gimple_seq_add_seq (&eh_seq, seq);
1279       emit_resx (&eh_seq, tf->region);
1280     }
1281 
1282   if (tf->goto_queue)
1283     {
1284       struct goto_queue_node *q, *qe;
1285       int return_index, index;
1286       struct labels_s
1287       {
1288 	struct goto_queue_node *q;
1289 	tree label;
1290       } *labels;
1291 
1292       return_index = tf->dest_array.length ();
1293       labels = XCNEWVEC (struct labels_s, return_index + 1);
1294 
1295       q = tf->goto_queue;
1296       qe = q + tf->goto_queue_active;
1297       for (; q < qe; q++)
1298 	{
1299 	  index = q->index < 0 ? return_index : q->index;
1300 
1301 	  if (!labels[index].q)
1302 	    labels[index].q = q;
1303 	}
1304 
1305       for (index = 0; index < return_index + 1; index++)
1306 	{
1307 	  tree lab;
1308 
1309 	  q = labels[index].q;
1310 	  if (! q)
1311 	    continue;
1312 
1313 	  lab = labels[index].label
1314 	    = create_artificial_label (tf_loc);
1315 
1316 	  if (index == return_index)
1317 	    do_return_redirection (q, lab, NULL);
1318 	  else
1319 	    do_goto_redirection (q, lab, NULL, tf);
1320 
1321 	  x = gimple_build_label (lab);
1322           gimple_seq_add_stmt (&new_stmt, x);
1323 
1324 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1325 	  lower_eh_constructs_1 (state, &seq);
1326           gimple_seq_add_seq (&new_stmt, seq);
1327 
1328           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1329 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1330 	}
1331 
1332       for (q = tf->goto_queue; q < qe; q++)
1333 	{
1334 	  tree lab;
1335 
1336 	  index = q->index < 0 ? return_index : q->index;
1337 
1338 	  if (labels[index].q == q)
1339 	    continue;
1340 
1341 	  lab = labels[index].label;
1342 
1343 	  if (index == return_index)
1344 	    do_return_redirection (q, lab, NULL);
1345 	  else
1346 	    do_goto_redirection (q, lab, NULL, tf);
1347 	}
1348 
1349       replace_goto_queue (tf);
1350       free (labels);
1351     }
1352 
1353   /* Need to link new stmts after running replace_goto_queue due
1354      to not wanting to process the same goto stmts twice.  */
1355   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1356 }
1357 
1358 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1359    and outgoing from the finally block.  Implement this by instrumenting
1360    each incoming edge and creating a switch statement at the end of the
1361    finally block that branches to the appropriate destination.  */
1362 
1363 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1364 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1365 {
1366   struct goto_queue_node *q, *qe;
1367   tree finally_tmp, finally_label;
1368   int return_index, eh_index, fallthru_index;
1369   int nlabels, ndests, j, last_case_index;
1370   tree last_case;
1371   auto_vec<tree> case_label_vec;
1372   gimple_seq switch_body = NULL;
1373   gimple *x;
1374   geh_else *eh_else;
1375   tree tmp;
1376   gimple *switch_stmt;
1377   gimple_seq finally;
1378   hash_map<tree, gimple *> *cont_map = NULL;
1379   /* The location of the TRY_FINALLY stmt.  */
1380   location_t tf_loc = gimple_location (tf->try_finally_expr);
1381   /* The location of the finally block.  */
1382   location_t finally_loc;
1383 
1384   finally = gimple_try_cleanup (tf->top_p);
1385   eh_else = get_eh_else (finally);
1386 
1387   /* Mash the TRY block to the head of the chain.  */
1388   tf->top_p_seq = gimple_try_eval (tf->top_p);
1389 
1390   /* The location of the finally is either the last stmt in the finally
1391      block or the location of the TRY_FINALLY itself.  */
1392   x = gimple_seq_last_stmt (finally);
1393   finally_loc = x ? gimple_location (x) : tf_loc;
1394 
1395   /* Prepare for switch statement generation.  */
1396   nlabels = tf->dest_array.length ();
1397   return_index = nlabels;
1398   eh_index = return_index + tf->may_return;
1399   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1400   ndests = fallthru_index + tf->may_fallthru;
1401 
1402   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1403   finally_label = create_artificial_label (finally_loc);
1404 
1405   /* We use vec::quick_push on case_label_vec throughout this function,
1406      since we know the size in advance and allocate precisely as muce
1407      space as needed.  */
1408   case_label_vec.create (ndests);
1409   last_case = NULL;
1410   last_case_index = 0;
1411 
1412   /* Begin inserting code for getting to the finally block.  Things
1413      are done in this order to correspond to the sequence the code is
1414      laid out.  */
1415 
1416   if (tf->may_fallthru)
1417     {
1418       x = gimple_build_assign (finally_tmp,
1419 			       build_int_cst (integer_type_node,
1420 					      fallthru_index));
1421       gimple_set_location (x, finally_loc);
1422       gimple_seq_add_stmt (&tf->top_p_seq, x);
1423 
1424       tmp = build_int_cst (integer_type_node, fallthru_index);
1425       last_case = build_case_label (tmp, NULL,
1426 				    create_artificial_label (finally_loc));
1427       case_label_vec.quick_push (last_case);
1428       last_case_index++;
1429 
1430       x = gimple_build_label (CASE_LABEL (last_case));
1431       gimple_seq_add_stmt (&switch_body, x);
1432 
1433       tmp = lower_try_finally_fallthru_label (tf);
1434       x = gimple_build_goto (tmp);
1435       gimple_set_location (x, finally_loc);
1436       gimple_seq_add_stmt (&switch_body, x);
1437     }
1438 
1439   /* For EH_ELSE, emit the exception path (plus resx) now, then
1440      subsequently we only need consider the normal path.  */
1441   if (eh_else)
1442     {
1443       if (tf->may_throw)
1444 	{
1445 	  finally = gimple_eh_else_e_body (eh_else);
1446 	  lower_eh_constructs_1 (state, &finally);
1447 
1448 	  emit_post_landing_pad (&eh_seq, tf->region);
1449 	  gimple_seq_add_seq (&eh_seq, finally);
1450 	  emit_resx (&eh_seq, tf->region);
1451 	}
1452 
1453       finally = gimple_eh_else_n_body (eh_else);
1454     }
1455   else if (tf->may_throw)
1456     {
1457       emit_post_landing_pad (&eh_seq, tf->region);
1458 
1459       x = gimple_build_assign (finally_tmp,
1460 			       build_int_cst (integer_type_node, eh_index));
1461       gimple_seq_add_stmt (&eh_seq, x);
1462 
1463       x = gimple_build_goto (finally_label);
1464       gimple_set_location (x, tf_loc);
1465       gimple_seq_add_stmt (&eh_seq, x);
1466 
1467       tmp = build_int_cst (integer_type_node, eh_index);
1468       last_case = build_case_label (tmp, NULL,
1469 				    create_artificial_label (tf_loc));
1470       case_label_vec.quick_push (last_case);
1471       last_case_index++;
1472 
1473       x = gimple_build_label (CASE_LABEL (last_case));
1474       gimple_seq_add_stmt (&eh_seq, x);
1475       emit_resx (&eh_seq, tf->region);
1476     }
1477 
1478   x = gimple_build_label (finally_label);
1479   gimple_seq_add_stmt (&tf->top_p_seq, x);
1480 
1481   lower_eh_constructs_1 (state, &finally);
1482   gimple_seq_add_seq (&tf->top_p_seq, finally);
1483 
1484   /* Redirect each incoming goto edge.  */
1485   q = tf->goto_queue;
1486   qe = q + tf->goto_queue_active;
1487   j = last_case_index + tf->may_return;
1488   /* Prepare the assignments to finally_tmp that are executed upon the
1489      entrance through a particular edge. */
1490   for (; q < qe; ++q)
1491     {
1492       gimple_seq mod = NULL;
1493       int switch_id;
1494       unsigned int case_index;
1495 
1496       if (q->index < 0)
1497 	{
1498 	  x = gimple_build_assign (finally_tmp,
1499 				   build_int_cst (integer_type_node,
1500 						  return_index));
1501 	  gimple_seq_add_stmt (&mod, x);
1502 	  do_return_redirection (q, finally_label, mod);
1503 	  switch_id = return_index;
1504 	}
1505       else
1506 	{
1507 	  x = gimple_build_assign (finally_tmp,
1508 				   build_int_cst (integer_type_node, q->index));
1509 	  gimple_seq_add_stmt (&mod, x);
1510 	  do_goto_redirection (q, finally_label, mod, tf);
1511 	  switch_id = q->index;
1512 	}
1513 
1514       case_index = j + q->index;
1515       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1516         {
1517           tree case_lab;
1518 	  tmp = build_int_cst (integer_type_node, switch_id);
1519           case_lab = build_case_label (tmp, NULL,
1520 				       create_artificial_label (tf_loc));
1521           /* We store the cont_stmt in the pointer map, so that we can recover
1522              it in the loop below.  */
1523           if (!cont_map)
1524 	    cont_map = new hash_map<tree, gimple *>;
1525           cont_map->put (case_lab, q->cont_stmt);
1526           case_label_vec.quick_push (case_lab);
1527         }
1528     }
1529   for (j = last_case_index; j < last_case_index + nlabels; j++)
1530     {
1531       gimple *cont_stmt;
1532 
1533       last_case = case_label_vec[j];
1534 
1535       gcc_assert (last_case);
1536       gcc_assert (cont_map);
1537 
1538       cont_stmt = *cont_map->get (last_case);
1539 
1540       x = gimple_build_label (CASE_LABEL (last_case));
1541       gimple_seq_add_stmt (&switch_body, x);
1542       gimple_seq_add_stmt (&switch_body, cont_stmt);
1543       maybe_record_in_goto_queue (state, cont_stmt);
1544     }
1545   if (cont_map)
1546     delete cont_map;
1547 
1548   replace_goto_queue (tf);
1549 
1550   /* Make sure that the last case is the default label, as one is required.
1551      Then sort the labels, which is also required in GIMPLE.  */
1552   CASE_LOW (last_case) = NULL;
1553   tree tem = case_label_vec.pop ();
1554   gcc_assert (tem == last_case);
1555   sort_case_labels (case_label_vec);
1556 
1557   /* Build the switch statement, setting last_case to be the default
1558      label.  */
1559   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1560 				     case_label_vec);
1561   gimple_set_location (switch_stmt, finally_loc);
1562 
1563   /* Need to link SWITCH_STMT after running replace_goto_queue
1564      due to not wanting to process the same goto stmts twice.  */
1565   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1566   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1567 }
1568 
1569 /* Decide whether or not we are going to duplicate the finally block.
1570    There are several considerations.
1571 
1572    Second, we'd like to prevent egregious code growth.  One way to
1573    do this is to estimate the size of the finally block, multiply
1574    that by the number of copies we'd need to make, and compare against
1575    the estimate of the size of the switch machinery we'd have to add.  */
1576 
1577 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1578 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1579 {
1580   int f_estimate, sw_estimate;
1581   geh_else *eh_else;
1582 
1583   /* If there's an EH_ELSE involved, the exception path is separate
1584      and really doesn't come into play for this computation.  */
1585   eh_else = get_eh_else (finally);
1586   if (eh_else)
1587     {
1588       ndests -= may_throw;
1589       finally = gimple_eh_else_n_body (eh_else);
1590     }
1591 
1592   if (!optimize)
1593     {
1594       gimple_stmt_iterator gsi;
1595 
1596       if (ndests == 1)
1597         return true;
1598 
1599       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1600 	{
1601 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1602 	     on the EH paths and, consequently, useless cleanups.  */
1603 	  gimple *stmt = gsi_stmt (gsi);
1604 	  if (!is_gimple_debug (stmt)
1605 	      && !gimple_clobber_p (stmt)
1606 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1607 	    return false;
1608 	}
1609       return true;
1610     }
1611 
1612   /* Finally estimate N times, plus N gotos.  */
1613   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1614   f_estimate = (f_estimate + 1) * ndests;
1615 
1616   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1617   sw_estimate = 10 + 2 * ndests;
1618 
1619   /* Optimize for size clearly wants our best guess.  */
1620   if (optimize_function_for_size_p (cfun))
1621     return f_estimate < sw_estimate;
1622 
1623   /* ??? These numbers are completely made up so far.  */
1624   if (optimize > 1)
1625     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1626   else
1627     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1628 }
1629 
1630 /* REG is current region of a LEH state.
1631    is the enclosing region for a possible cleanup region, or the region
1632    itself.  Returns TRUE if such a region would be unreachable.
1633 
1634    Cleanup regions within a must-not-throw region aren't actually reachable
1635    even if there are throwing stmts within them, because the personality
1636    routine will call terminate before unwinding.  */
1637 
1638 static bool
cleanup_is_dead_in(leh_state * state)1639 cleanup_is_dead_in (leh_state *state)
1640 {
1641   if (flag_checking)
1642     {
1643       eh_region reg = state->cur_region;
1644       while (reg && reg->type == ERT_CLEANUP)
1645 	reg = reg->outer;
1646 
1647       gcc_assert (reg == state->outer_non_cleanup);
1648     }
1649 
1650   eh_region reg = state->outer_non_cleanup;
1651   return (reg && reg->type == ERT_MUST_NOT_THROW);
1652 }
1653 
1654 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1655    to a sequence of labels and blocks, plus the exception region trees
1656    that record all the magic.  This is complicated by the need to
1657    arrange for the FINALLY block to be executed on all exits.  */
1658 
1659 static gimple_seq
lower_try_finally(struct leh_state * state,gtry * tp)1660 lower_try_finally (struct leh_state *state, gtry *tp)
1661 {
1662   struct leh_tf_state this_tf;
1663   struct leh_state this_state;
1664   int ndests;
1665   gimple_seq old_eh_seq;
1666 
1667   /* Process the try block.  */
1668 
1669   memset (&this_tf, 0, sizeof (this_tf));
1670   this_tf.try_finally_expr = tp;
1671   this_tf.top_p = tp;
1672   this_tf.outer = state;
1673   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state))
1674     {
1675       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1676       this_state.cur_region = this_tf.region;
1677     }
1678   else
1679     {
1680       this_tf.region = NULL;
1681       this_state.cur_region = state->cur_region;
1682     }
1683 
1684   this_state.outer_non_cleanup = state->outer_non_cleanup;
1685   this_state.ehp_region = state->ehp_region;
1686   this_state.tf = &this_tf;
1687 
1688   old_eh_seq = eh_seq;
1689   eh_seq = NULL;
1690 
1691   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1692 
1693   /* Determine if the try block is escaped through the bottom.  */
1694   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1695 
1696   /* Determine if any exceptions are possible within the try block.  */
1697   if (this_tf.region)
1698     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1699   if (this_tf.may_throw)
1700     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1701 
1702   /* Determine how many edges (still) reach the finally block.  Or rather,
1703      how many destinations are reached by the finally block.  Use this to
1704      determine how we process the finally block itself.  */
1705 
1706   ndests = this_tf.dest_array.length ();
1707   ndests += this_tf.may_fallthru;
1708   ndests += this_tf.may_return;
1709   ndests += this_tf.may_throw;
1710 
1711   /* If the FINALLY block is not reachable, dike it out.  */
1712   if (ndests == 0)
1713     {
1714       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1715       gimple_try_set_cleanup (tp, NULL);
1716     }
1717   /* If the finally block doesn't fall through, then any destination
1718      we might try to impose there isn't reached either.  There may be
1719      some minor amount of cleanup and redirection still needed.  */
1720   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1721     lower_try_finally_nofallthru (state, &this_tf);
1722 
1723   /* We can easily special-case redirection to a single destination.  */
1724   else if (ndests == 1)
1725     lower_try_finally_onedest (state, &this_tf);
1726   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1727 				    gimple_try_cleanup (tp)))
1728     lower_try_finally_copy (state, &this_tf);
1729   else
1730     lower_try_finally_switch (state, &this_tf);
1731 
1732   /* If someone requested we add a label at the end of the transformed
1733      block, do so.  */
1734   if (this_tf.fallthru_label)
1735     {
1736       /* This must be reached only if ndests == 0. */
1737       gimple *x = gimple_build_label (this_tf.fallthru_label);
1738       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1739     }
1740 
1741   this_tf.dest_array.release ();
1742   free (this_tf.goto_queue);
1743   if (this_tf.goto_queue_map)
1744     delete this_tf.goto_queue_map;
1745 
1746   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1747      If there was no old eh_seq, then the append is trivially already done.  */
1748   if (old_eh_seq)
1749     {
1750       if (eh_seq == NULL)
1751 	eh_seq = old_eh_seq;
1752       else
1753 	{
1754 	  gimple_seq new_eh_seq = eh_seq;
1755 	  eh_seq = old_eh_seq;
1756 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1757 	}
1758     }
1759 
1760   return this_tf.top_p_seq;
1761 }
1762 
1763 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1764    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1765    exception region trees that records all the magic.  */
1766 
1767 static gimple_seq
lower_catch(struct leh_state * state,gtry * tp)1768 lower_catch (struct leh_state *state, gtry *tp)
1769 {
1770   eh_region try_region = NULL;
1771   struct leh_state this_state = *state;
1772   gimple_stmt_iterator gsi;
1773   tree out_label;
1774   gimple_seq new_seq, cleanup;
1775   gimple *x;
1776   geh_dispatch *eh_dispatch;
1777   location_t try_catch_loc = gimple_location (tp);
1778   location_t catch_loc = UNKNOWN_LOCATION;
1779 
1780   if (flag_exceptions)
1781     {
1782       try_region = gen_eh_region_try (state->cur_region);
1783       this_state.cur_region = try_region;
1784       this_state.outer_non_cleanup = this_state.cur_region;
1785     }
1786 
1787   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1788 
1789   if (!eh_region_may_contain_throw (try_region))
1790     return gimple_try_eval (tp);
1791 
1792   new_seq = NULL;
1793   eh_dispatch = gimple_build_eh_dispatch (try_region->index);
1794   gimple_seq_add_stmt (&new_seq, eh_dispatch);
1795   emit_resx (&new_seq, try_region);
1796 
1797   this_state.cur_region = state->cur_region;
1798   this_state.outer_non_cleanup = state->outer_non_cleanup;
1799   this_state.ehp_region = try_region;
1800 
1801   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1802      itself, so that e.g. for coverage purposes the nested cleanups don't
1803      appear before the cleanup body.  See PR64634 for details.  */
1804   gimple_seq old_eh_seq = eh_seq;
1805   eh_seq = NULL;
1806 
1807   out_label = NULL;
1808   cleanup = gimple_try_cleanup (tp);
1809   for (gsi = gsi_start (cleanup);
1810        !gsi_end_p (gsi);
1811        gsi_next (&gsi))
1812     {
1813       eh_catch c;
1814       gcatch *catch_stmt;
1815       gimple_seq handler;
1816 
1817       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1818       if (catch_loc == UNKNOWN_LOCATION)
1819 	catch_loc = gimple_location (catch_stmt);
1820       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1821 
1822       handler = gimple_catch_handler (catch_stmt);
1823       lower_eh_constructs_1 (&this_state, &handler);
1824 
1825       c->label = create_artificial_label (UNKNOWN_LOCATION);
1826       x = gimple_build_label (c->label);
1827       gimple_seq_add_stmt (&new_seq, x);
1828 
1829       gimple_seq_add_seq (&new_seq, handler);
1830 
1831       if (gimple_seq_may_fallthru (new_seq))
1832 	{
1833 	  if (!out_label)
1834 	    out_label = create_artificial_label (try_catch_loc);
1835 
1836 	  x = gimple_build_goto (out_label);
1837 	  gimple_seq_add_stmt (&new_seq, x);
1838 	}
1839       if (!c->type_list)
1840 	break;
1841     }
1842 
1843   /* Try to set a location on the dispatching construct to avoid inheriting
1844      the location of the previous statement.  */
1845   gimple_set_location (eh_dispatch, catch_loc);
1846 
1847   gimple_try_set_cleanup (tp, new_seq);
1848 
1849   gimple_seq new_eh_seq = eh_seq;
1850   eh_seq = old_eh_seq;
1851   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1852   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1853   return ret_seq;
1854 }
1855 
1856 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1857    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1858    region trees that record all the magic.  */
1859 
1860 static gimple_seq
lower_eh_filter(struct leh_state * state,gtry * tp)1861 lower_eh_filter (struct leh_state *state, gtry *tp)
1862 {
1863   struct leh_state this_state = *state;
1864   eh_region this_region = NULL;
1865   gimple *inner, *x;
1866   gimple_seq new_seq;
1867 
1868   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1869 
1870   if (flag_exceptions)
1871     {
1872       this_region = gen_eh_region_allowed (state->cur_region,
1873 				           gimple_eh_filter_types (inner));
1874       this_state.cur_region = this_region;
1875       this_state.outer_non_cleanup = this_state.cur_region;
1876     }
1877 
1878   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1879 
1880   if (!eh_region_may_contain_throw (this_region))
1881     return gimple_try_eval (tp);
1882 
1883   this_state.cur_region = state->cur_region;
1884   this_state.ehp_region = this_region;
1885 
1886   new_seq = NULL;
1887   x = gimple_build_eh_dispatch (this_region->index);
1888   gimple_set_location (x, gimple_location (tp));
1889   gimple_seq_add_stmt (&new_seq, x);
1890   emit_resx (&new_seq, this_region);
1891 
1892   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1893   x = gimple_build_label (this_region->u.allowed.label);
1894   gimple_seq_add_stmt (&new_seq, x);
1895 
1896   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1897   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1898 
1899   gimple_try_set_cleanup (tp, new_seq);
1900 
1901   return frob_into_branch_around (tp, this_region, NULL);
1902 }
1903 
1904 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1905    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1906    plus the exception region trees that record all the magic.  */
1907 
1908 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gtry * tp)1909 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1910 {
1911   struct leh_state this_state = *state;
1912 
1913   if (flag_exceptions)
1914     {
1915       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1916       eh_region this_region;
1917 
1918       this_region = gen_eh_region_must_not_throw (state->cur_region);
1919       this_region->u.must_not_throw.failure_decl
1920 	= gimple_eh_must_not_throw_fndecl (
1921 	    as_a <geh_mnt *> (inner));
1922       this_region->u.must_not_throw.failure_loc
1923 	= LOCATION_LOCUS (gimple_location (tp));
1924 
1925       /* In order to get mangling applied to this decl, we must mark it
1926 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1927 	 needs to happen.  */
1928       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1929 
1930       this_state.cur_region = this_region;
1931       this_state.outer_non_cleanup = this_state.cur_region;
1932     }
1933 
1934   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1935 
1936   return gimple_try_eval (tp);
1937 }
1938 
1939 /* Implement a cleanup expression.  This is similar to try-finally,
1940    except that we only execute the cleanup block for exception edges.  */
1941 
1942 static gimple_seq
lower_cleanup(struct leh_state * state,gtry * tp)1943 lower_cleanup (struct leh_state *state, gtry *tp)
1944 {
1945   struct leh_state this_state = *state;
1946   eh_region this_region = NULL;
1947   struct leh_tf_state fake_tf;
1948   gimple_seq result;
1949   bool cleanup_dead = cleanup_is_dead_in (state);
1950 
1951   if (flag_exceptions && !cleanup_dead)
1952     {
1953       this_region = gen_eh_region_cleanup (state->cur_region);
1954       this_state.cur_region = this_region;
1955       this_state.outer_non_cleanup = state->outer_non_cleanup;
1956     }
1957 
1958   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1959 
1960   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1961     return gimple_try_eval (tp);
1962 
1963   /* Build enough of a try-finally state so that we can reuse
1964      honor_protect_cleanup_actions.  */
1965   memset (&fake_tf, 0, sizeof (fake_tf));
1966   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1967   fake_tf.outer = state;
1968   fake_tf.region = this_region;
1969   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1970   fake_tf.may_throw = true;
1971 
1972   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1973 
1974   if (fake_tf.may_throw)
1975     {
1976       /* In this case honor_protect_cleanup_actions had nothing to do,
1977 	 and we should process this normally.  */
1978       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1979       result = frob_into_branch_around (tp, this_region,
1980                                         fake_tf.fallthru_label);
1981     }
1982   else
1983     {
1984       /* In this case honor_protect_cleanup_actions did nearly all of
1985 	 the work.  All we have left is to append the fallthru_label.  */
1986 
1987       result = gimple_try_eval (tp);
1988       if (fake_tf.fallthru_label)
1989 	{
1990 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1991 	  gimple_seq_add_stmt (&result, x);
1992 	}
1993     }
1994   return result;
1995 }
1996 
1997 /* Main loop for lowering eh constructs. Also moves gsi to the next
1998    statement. */
1999 
2000 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)2001 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
2002 {
2003   gimple_seq replace;
2004   gimple *x;
2005   gimple *stmt = gsi_stmt (*gsi);
2006 
2007   switch (gimple_code (stmt))
2008     {
2009     case GIMPLE_CALL:
2010       {
2011 	tree fndecl = gimple_call_fndecl (stmt);
2012 	tree rhs, lhs;
2013 
2014 	if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
2015 	  switch (DECL_FUNCTION_CODE (fndecl))
2016 	    {
2017 	    case BUILT_IN_EH_POINTER:
2018 	      /* The front end may have generated a call to
2019 		 __builtin_eh_pointer (0) within a catch region.  Replace
2020 		 this zero argument with the current catch region number.  */
2021 	      if (state->ehp_region)
2022 		{
2023 		  tree nr = build_int_cst (integer_type_node,
2024 					   state->ehp_region->index);
2025 		  gimple_call_set_arg (stmt, 0, nr);
2026 		}
2027 	      else
2028 		{
2029 		  /* The user has dome something silly.  Remove it.  */
2030 		  rhs = null_pointer_node;
2031 		  goto do_replace;
2032 		}
2033 	      break;
2034 
2035 	    case BUILT_IN_EH_FILTER:
2036 	      /* ??? This should never appear, but since it's a builtin it
2037 		 is accessible to abuse by users.  Just remove it and
2038 		 replace the use with the arbitrary value zero.  */
2039 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2040 	    do_replace:
2041 	      lhs = gimple_call_lhs (stmt);
2042 	      x = gimple_build_assign (lhs, rhs);
2043 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2044 	      /* FALLTHRU */
2045 
2046 	    case BUILT_IN_EH_COPY_VALUES:
2047 	      /* Likewise this should not appear.  Remove it.  */
2048 	      gsi_remove (gsi, true);
2049 	      return;
2050 
2051 	    default:
2052 	      break;
2053 	    }
2054       }
2055       /* FALLTHRU */
2056 
2057     case GIMPLE_ASSIGN:
2058       /* If the stmt can throw, use a new temporary for the assignment
2059          to a LHS.  This makes sure the old value of the LHS is
2060 	 available on the EH edge.  Only do so for statements that
2061 	 potentially fall through (no noreturn calls e.g.), otherwise
2062 	 this new assignment might create fake fallthru regions.  */
2063       if (stmt_could_throw_p (cfun, stmt)
2064 	  && gimple_has_lhs (stmt)
2065 	  && gimple_stmt_may_fallthru (stmt)
2066 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2067 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2068 	{
2069 	  tree lhs = gimple_get_lhs (stmt);
2070 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2071 	  gimple *s = gimple_build_assign (lhs, tmp);
2072 	  gimple_set_location (s, gimple_location (stmt));
2073 	  gimple_set_block (s, gimple_block (stmt));
2074 	  gimple_set_lhs (stmt, tmp);
2075 	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2076 	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2077 	    DECL_GIMPLE_REG_P (tmp) = 1;
2078 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2079 	}
2080       /* Look for things that can throw exceptions, and record them.  */
2081       if (state->cur_region && stmt_could_throw_p (cfun, stmt))
2082 	{
2083 	  record_stmt_eh_region (state->cur_region, stmt);
2084 	  note_eh_region_may_contain_throw (state->cur_region);
2085 	}
2086       break;
2087 
2088     case GIMPLE_COND:
2089     case GIMPLE_GOTO:
2090     case GIMPLE_RETURN:
2091       maybe_record_in_goto_queue (state, stmt);
2092       break;
2093 
2094     case GIMPLE_SWITCH:
2095       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2096       break;
2097 
2098     case GIMPLE_TRY:
2099       {
2100 	gtry *try_stmt = as_a <gtry *> (stmt);
2101 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2102 	  replace = lower_try_finally (state, try_stmt);
2103 	else
2104 	  {
2105 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2106 	    if (!x)
2107 	      {
2108 		replace = gimple_try_eval (try_stmt);
2109 		lower_eh_constructs_1 (state, &replace);
2110 	      }
2111 	    else
2112 	      switch (gimple_code (x))
2113 		{
2114 		case GIMPLE_CATCH:
2115 		  replace = lower_catch (state, try_stmt);
2116 		  break;
2117 		case GIMPLE_EH_FILTER:
2118 		  replace = lower_eh_filter (state, try_stmt);
2119 		  break;
2120 		case GIMPLE_EH_MUST_NOT_THROW:
2121 		  replace = lower_eh_must_not_throw (state, try_stmt);
2122 		  break;
2123 		case GIMPLE_EH_ELSE:
2124 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2125 		  gcc_unreachable ();
2126 		default:
2127 		  replace = lower_cleanup (state, try_stmt);
2128 		  break;
2129 		}
2130 	  }
2131       }
2132 
2133       /* Remove the old stmt and insert the transformed sequence
2134 	 instead. */
2135       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2136       gsi_remove (gsi, true);
2137 
2138       /* Return since we don't want gsi_next () */
2139       return;
2140 
2141     case GIMPLE_EH_ELSE:
2142       /* We should be eliminating this in lower_try_finally et al.  */
2143       gcc_unreachable ();
2144 
2145     default:
2146       /* A type, a decl, or some kind of statement that we're not
2147 	 interested in.  Don't walk them.  */
2148       break;
2149     }
2150 
2151   gsi_next (gsi);
2152 }
2153 
2154 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2155 
2156 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2157 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2158 {
2159   gimple_stmt_iterator gsi;
2160   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2161     lower_eh_constructs_2 (state, &gsi);
2162 }
2163 
2164 namespace {
2165 
2166 const pass_data pass_data_lower_eh =
2167 {
2168   GIMPLE_PASS, /* type */
2169   "eh", /* name */
2170   OPTGROUP_NONE, /* optinfo_flags */
2171   TV_TREE_EH, /* tv_id */
2172   PROP_gimple_lcf, /* properties_required */
2173   PROP_gimple_leh, /* properties_provided */
2174   0, /* properties_destroyed */
2175   0, /* todo_flags_start */
2176   0, /* todo_flags_finish */
2177 };
2178 
2179 class pass_lower_eh : public gimple_opt_pass
2180 {
2181 public:
pass_lower_eh(gcc::context * ctxt)2182   pass_lower_eh (gcc::context *ctxt)
2183     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2184   {}
2185 
2186   /* opt_pass methods: */
2187   virtual unsigned int execute (function *);
2188 
2189 }; // class pass_lower_eh
2190 
2191 unsigned int
execute(function * fun)2192 pass_lower_eh::execute (function *fun)
2193 {
2194   struct leh_state null_state;
2195   gimple_seq bodyp;
2196 
2197   bodyp = gimple_body (current_function_decl);
2198   if (bodyp == NULL)
2199     return 0;
2200 
2201   finally_tree = new hash_table<finally_tree_hasher> (31);
2202   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2203   memset (&null_state, 0, sizeof (null_state));
2204 
2205   collect_finally_tree_1 (bodyp, NULL);
2206   lower_eh_constructs_1 (&null_state, &bodyp);
2207   gimple_set_body (current_function_decl, bodyp);
2208 
2209   /* We assume there's a return statement, or something, at the end of
2210      the function, and thus ploping the EH sequence afterward won't
2211      change anything.  */
2212   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2213   gimple_seq_add_seq (&bodyp, eh_seq);
2214 
2215   /* We assume that since BODYP already existed, adding EH_SEQ to it
2216      didn't change its value, and we don't have to re-set the function.  */
2217   gcc_assert (bodyp == gimple_body (current_function_decl));
2218 
2219   delete finally_tree;
2220   finally_tree = NULL;
2221   BITMAP_FREE (eh_region_may_contain_throw_map);
2222   eh_seq = NULL;
2223 
2224   /* If this function needs a language specific EH personality routine
2225      and the frontend didn't already set one do so now.  */
2226   if (function_needs_eh_personality (fun) == eh_personality_lang
2227       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2228     DECL_FUNCTION_PERSONALITY (current_function_decl)
2229       = lang_hooks.eh_personality ();
2230 
2231   return 0;
2232 }
2233 
2234 } // anon namespace
2235 
2236 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2237 make_pass_lower_eh (gcc::context *ctxt)
2238 {
2239   return new pass_lower_eh (ctxt);
2240 }
2241 
2242 /* Create the multiple edges from an EH_DISPATCH statement to all of
2243    the possible handlers for its EH region.  Return true if there's
2244    no fallthru edge; false if there is.  */
2245 
2246 bool
make_eh_dispatch_edges(geh_dispatch * stmt)2247 make_eh_dispatch_edges (geh_dispatch *stmt)
2248 {
2249   eh_region r;
2250   eh_catch c;
2251   basic_block src, dst;
2252 
2253   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2254   src = gimple_bb (stmt);
2255 
2256   switch (r->type)
2257     {
2258     case ERT_TRY:
2259       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2260 	{
2261 	  dst = label_to_block (cfun, c->label);
2262 	  make_edge (src, dst, 0);
2263 
2264 	  /* A catch-all handler doesn't have a fallthru.  */
2265 	  if (c->type_list == NULL)
2266 	    return false;
2267 	}
2268       break;
2269 
2270     case ERT_ALLOWED_EXCEPTIONS:
2271       dst = label_to_block (cfun, r->u.allowed.label);
2272       make_edge (src, dst, 0);
2273       break;
2274 
2275     default:
2276       gcc_unreachable ();
2277     }
2278 
2279   return true;
2280 }
2281 
2282 /* Create the single EH edge from STMT to its nearest landing pad,
2283    if there is such a landing pad within the current function.  */
2284 
2285 void
make_eh_edges(gimple * stmt)2286 make_eh_edges (gimple *stmt)
2287 {
2288   basic_block src, dst;
2289   eh_landing_pad lp;
2290   int lp_nr;
2291 
2292   lp_nr = lookup_stmt_eh_lp (stmt);
2293   if (lp_nr <= 0)
2294     return;
2295 
2296   lp = get_eh_landing_pad_from_number (lp_nr);
2297   gcc_assert (lp != NULL);
2298 
2299   src = gimple_bb (stmt);
2300   dst = label_to_block (cfun, lp->post_landing_pad);
2301   make_edge (src, dst, EDGE_EH);
2302 }
2303 
2304 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2305    do not actually perform the final edge redirection.
2306 
2307    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2308    we intend to change the destination EH region as well; this means
2309    EH_LANDING_PAD_NR must already be set on the destination block label.
2310    If false, we're being called from generic cfg manipulation code and we
2311    should preserve our place within the region tree.  */
2312 
2313 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2314 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2315 {
2316   eh_landing_pad old_lp, new_lp;
2317   basic_block old_bb;
2318   gimple *throw_stmt;
2319   int old_lp_nr, new_lp_nr;
2320   tree old_label, new_label;
2321   edge_iterator ei;
2322   edge e;
2323 
2324   old_bb = edge_in->dest;
2325   old_label = gimple_block_label (old_bb);
2326   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2327   gcc_assert (old_lp_nr > 0);
2328   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2329 
2330   throw_stmt = last_stmt (edge_in->src);
2331   gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2332 
2333   new_label = gimple_block_label (new_bb);
2334 
2335   /* Look for an existing region that might be using NEW_BB already.  */
2336   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2337   if (new_lp_nr)
2338     {
2339       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2340       gcc_assert (new_lp);
2341 
2342       /* Unless CHANGE_REGION is true, the new and old landing pad
2343 	 had better be associated with the same EH region.  */
2344       gcc_assert (change_region || new_lp->region == old_lp->region);
2345     }
2346   else
2347     {
2348       new_lp = NULL;
2349       gcc_assert (!change_region);
2350     }
2351 
2352   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2353   FOR_EACH_EDGE (e, ei, old_bb->preds)
2354     if (e != edge_in && (e->flags & EDGE_EH))
2355       break;
2356 
2357   if (new_lp)
2358     {
2359       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2360 	 there's nothing to do with the EH tree.  If there are no more
2361 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2362 	 If CHANGE_REGION is true, then our caller is expecting to remove
2363 	 the landing pad.  */
2364       if (e == NULL && !change_region)
2365 	remove_eh_landing_pad (old_lp);
2366     }
2367   else
2368     {
2369       /* No correct landing pad exists.  If there are no more edges
2370 	 into OLD_LP, then we can simply re-use the existing landing pad.
2371 	 Otherwise, we have to create a new landing pad.  */
2372       if (e == NULL)
2373 	{
2374 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2375 	  new_lp = old_lp;
2376 	}
2377       else
2378 	new_lp = gen_eh_landing_pad (old_lp->region);
2379       new_lp->post_landing_pad = new_label;
2380       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2381     }
2382 
2383   /* Maybe move the throwing statement to the new region.  */
2384   if (old_lp != new_lp)
2385     {
2386       remove_stmt_from_eh_lp (throw_stmt);
2387       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2388     }
2389 }
2390 
2391 /* Redirect EH edge E to NEW_BB.  */
2392 
2393 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2394 redirect_eh_edge (edge edge_in, basic_block new_bb)
2395 {
2396   redirect_eh_edge_1 (edge_in, new_bb, false);
2397   return ssa_redirect_edge (edge_in, new_bb);
2398 }
2399 
2400 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2401    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2402    The actual edge update will happen in the caller.  */
2403 
2404 void
redirect_eh_dispatch_edge(geh_dispatch * stmt,edge e,basic_block new_bb)2405 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2406 {
2407   tree new_lab = gimple_block_label (new_bb);
2408   bool any_changed = false;
2409   basic_block old_bb;
2410   eh_region r;
2411   eh_catch c;
2412 
2413   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2414   switch (r->type)
2415     {
2416     case ERT_TRY:
2417       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2418 	{
2419 	  old_bb = label_to_block (cfun, c->label);
2420 	  if (old_bb == e->dest)
2421 	    {
2422 	      c->label = new_lab;
2423 	      any_changed = true;
2424 	    }
2425 	}
2426       break;
2427 
2428     case ERT_ALLOWED_EXCEPTIONS:
2429       old_bb = label_to_block (cfun, r->u.allowed.label);
2430       gcc_assert (old_bb == e->dest);
2431       r->u.allowed.label = new_lab;
2432       any_changed = true;
2433       break;
2434 
2435     default:
2436       gcc_unreachable ();
2437     }
2438 
2439   gcc_assert (any_changed);
2440 }
2441 
2442 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2443 
2444 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2445 operation_could_trap_helper_p (enum tree_code op,
2446 			       bool fp_operation,
2447 			       bool honor_trapv,
2448 			       bool honor_nans,
2449 			       bool honor_snans,
2450 			       tree divisor,
2451 			       bool *handled)
2452 {
2453   *handled = true;
2454   switch (op)
2455     {
2456     case TRUNC_DIV_EXPR:
2457     case CEIL_DIV_EXPR:
2458     case FLOOR_DIV_EXPR:
2459     case ROUND_DIV_EXPR:
2460     case EXACT_DIV_EXPR:
2461     case CEIL_MOD_EXPR:
2462     case FLOOR_MOD_EXPR:
2463     case ROUND_MOD_EXPR:
2464     case TRUNC_MOD_EXPR:
2465     case RDIV_EXPR:
2466       if (honor_snans)
2467 	return true;
2468       if (fp_operation)
2469 	return flag_trapping_math;
2470       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2471         return true;
2472       return false;
2473 
2474     case LT_EXPR:
2475     case LE_EXPR:
2476     case GT_EXPR:
2477     case GE_EXPR:
2478     case LTGT_EXPR:
2479       /* Some floating point comparisons may trap.  */
2480       return honor_nans;
2481 
2482     case EQ_EXPR:
2483     case NE_EXPR:
2484     case UNORDERED_EXPR:
2485     case ORDERED_EXPR:
2486     case UNLT_EXPR:
2487     case UNLE_EXPR:
2488     case UNGT_EXPR:
2489     case UNGE_EXPR:
2490     case UNEQ_EXPR:
2491       return honor_snans;
2492 
2493     case NEGATE_EXPR:
2494     case ABS_EXPR:
2495     case CONJ_EXPR:
2496       /* These operations don't trap with floating point.  */
2497       if (honor_trapv)
2498 	return true;
2499       return false;
2500 
2501     case ABSU_EXPR:
2502       /* ABSU_EXPR never traps.  */
2503       return false;
2504 
2505     case PLUS_EXPR:
2506     case MINUS_EXPR:
2507     case MULT_EXPR:
2508       /* Any floating arithmetic may trap.  */
2509       if (fp_operation && flag_trapping_math)
2510 	return true;
2511       if (honor_trapv)
2512 	return true;
2513       return false;
2514 
2515     case COMPLEX_EXPR:
2516     case CONSTRUCTOR:
2517       /* Constructing an object cannot trap.  */
2518       return false;
2519 
2520     case COND_EXPR:
2521     case VEC_COND_EXPR:
2522       /* Whether *COND_EXPR can trap depends on whether the
2523 	 first argument can trap, so signal it as not handled.
2524 	 Whether lhs is floating or not doesn't matter.  */
2525       *handled = false;
2526       return false;
2527 
2528     default:
2529       /* Any floating arithmetic may trap.  */
2530       if (fp_operation && flag_trapping_math)
2531 	return true;
2532 
2533       *handled = false;
2534       return false;
2535     }
2536 }
2537 
2538 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2539    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2540    type operands that may trap.  If OP is a division operator, DIVISOR contains
2541    the value of the divisor.  */
2542 
2543 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2544 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2545 			tree divisor)
2546 {
2547   bool honor_nans = (fp_operation && flag_trapping_math
2548 		     && !flag_finite_math_only);
2549   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2550   bool handled;
2551 
2552   /* This function cannot tell whether or not COND_EXPR and VEC_COND_EXPR could
2553      trap, because that depends on the respective condition op.  */
2554   gcc_assert (op != COND_EXPR && op != VEC_COND_EXPR);
2555 
2556   if (TREE_CODE_CLASS (op) != tcc_comparison
2557       && TREE_CODE_CLASS (op) != tcc_unary
2558       && TREE_CODE_CLASS (op) != tcc_binary)
2559     return false;
2560 
2561   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2562 					honor_nans, honor_snans, divisor,
2563 					&handled);
2564 }
2565 
2566 
2567 /* Returns true if it is possible to prove that the index of
2568    an array access REF (an ARRAY_REF expression) falls into the
2569    array bounds.  */
2570 
2571 static bool
in_array_bounds_p(tree ref)2572 in_array_bounds_p (tree ref)
2573 {
2574   tree idx = TREE_OPERAND (ref, 1);
2575   tree min, max;
2576 
2577   if (TREE_CODE (idx) != INTEGER_CST)
2578     return false;
2579 
2580   min = array_ref_low_bound (ref);
2581   max = array_ref_up_bound (ref);
2582   if (!min
2583       || !max
2584       || TREE_CODE (min) != INTEGER_CST
2585       || TREE_CODE (max) != INTEGER_CST)
2586     return false;
2587 
2588   if (tree_int_cst_lt (idx, min)
2589       || tree_int_cst_lt (max, idx))
2590     return false;
2591 
2592   return true;
2593 }
2594 
2595 /* Returns true if it is possible to prove that the range of
2596    an array access REF (an ARRAY_RANGE_REF expression) falls
2597    into the array bounds.  */
2598 
2599 static bool
range_in_array_bounds_p(tree ref)2600 range_in_array_bounds_p (tree ref)
2601 {
2602   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2603   tree range_min, range_max, min, max;
2604 
2605   range_min = TYPE_MIN_VALUE (domain_type);
2606   range_max = TYPE_MAX_VALUE (domain_type);
2607   if (!range_min
2608       || !range_max
2609       || TREE_CODE (range_min) != INTEGER_CST
2610       || TREE_CODE (range_max) != INTEGER_CST)
2611     return false;
2612 
2613   min = array_ref_low_bound (ref);
2614   max = array_ref_up_bound (ref);
2615   if (!min
2616       || !max
2617       || TREE_CODE (min) != INTEGER_CST
2618       || TREE_CODE (max) != INTEGER_CST)
2619     return false;
2620 
2621   if (tree_int_cst_lt (range_min, min)
2622       || tree_int_cst_lt (max, range_max))
2623     return false;
2624 
2625   return true;
2626 }
2627 
2628 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2629    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2630    This routine expects only GIMPLE lhs or rhs input.  */
2631 
2632 bool
tree_could_trap_p(tree expr)2633 tree_could_trap_p (tree expr)
2634 {
2635   enum tree_code code;
2636   bool fp_operation = false;
2637   bool honor_trapv = false;
2638   tree t, base, div = NULL_TREE;
2639 
2640   if (!expr)
2641     return false;
2642 
2643   /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but
2644      they won't appear as operands in GIMPLE form, so this is just for the
2645      GENERIC uses where it needs to recurse on the operands and so
2646      *COND_EXPR itself doesn't trap.  */
2647   if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR)
2648     return false;
2649 
2650   code = TREE_CODE (expr);
2651   t = TREE_TYPE (expr);
2652 
2653   if (t)
2654     {
2655       if (COMPARISON_CLASS_P (expr))
2656 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2657       else
2658 	fp_operation = FLOAT_TYPE_P (t);
2659       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2660     }
2661 
2662   if (TREE_CODE_CLASS (code) == tcc_binary)
2663     div = TREE_OPERAND (expr, 1);
2664   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2665     return true;
2666 
2667  restart:
2668   switch (code)
2669     {
2670     case COMPONENT_REF:
2671     case REALPART_EXPR:
2672     case IMAGPART_EXPR:
2673     case BIT_FIELD_REF:
2674     case VIEW_CONVERT_EXPR:
2675     case WITH_SIZE_EXPR:
2676       expr = TREE_OPERAND (expr, 0);
2677       code = TREE_CODE (expr);
2678       goto restart;
2679 
2680     case ARRAY_RANGE_REF:
2681       base = TREE_OPERAND (expr, 0);
2682       if (tree_could_trap_p (base))
2683 	return true;
2684       if (TREE_THIS_NOTRAP (expr))
2685 	return false;
2686       return !range_in_array_bounds_p (expr);
2687 
2688     case ARRAY_REF:
2689       base = TREE_OPERAND (expr, 0);
2690       if (tree_could_trap_p (base))
2691 	return true;
2692       if (TREE_THIS_NOTRAP (expr))
2693 	return false;
2694       return !in_array_bounds_p (expr);
2695 
2696     case TARGET_MEM_REF:
2697     case MEM_REF:
2698       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2699 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2700 	return true;
2701       if (TREE_THIS_NOTRAP (expr))
2702 	return false;
2703       /* We cannot prove that the access is in-bounds when we have
2704          variable-index TARGET_MEM_REFs.  */
2705       if (code == TARGET_MEM_REF
2706 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2707 	return true;
2708       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2709 	{
2710 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2711 	  poly_offset_int off = mem_ref_offset (expr);
2712 	  if (maybe_lt (off, 0))
2713 	    return true;
2714 	  if (TREE_CODE (base) == STRING_CST)
2715 	    return maybe_le (TREE_STRING_LENGTH (base), off);
2716 	  tree size = DECL_SIZE_UNIT (base);
2717 	  if (size == NULL_TREE
2718 	      || !poly_int_tree_p (size)
2719 	      || maybe_le (wi::to_poly_offset (size), off))
2720 	    return true;
2721 	  /* Now we are sure the first byte of the access is inside
2722 	     the object.  */
2723 	  return false;
2724 	}
2725       return true;
2726 
2727     case INDIRECT_REF:
2728       return !TREE_THIS_NOTRAP (expr);
2729 
2730     case ASM_EXPR:
2731       return TREE_THIS_VOLATILE (expr);
2732 
2733     case CALL_EXPR:
2734       t = get_callee_fndecl (expr);
2735       /* Assume that calls to weak functions may trap.  */
2736       if (!t || !DECL_P (t))
2737 	return true;
2738       if (DECL_WEAK (t))
2739 	return tree_could_trap_p (t);
2740       return false;
2741 
2742     case FUNCTION_DECL:
2743       /* Assume that accesses to weak functions may trap, unless we know
2744 	 they are certainly defined in current TU or in some other
2745 	 LTO partition.  */
2746       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2747 	{
2748 	  cgraph_node *node = cgraph_node::get (expr);
2749 	  if (node)
2750 	    node = node->function_symbol ();
2751 	  return !(node && node->in_other_partition);
2752 	}
2753       return false;
2754 
2755     case VAR_DECL:
2756       /* Assume that accesses to weak vars may trap, unless we know
2757 	 they are certainly defined in current TU or in some other
2758 	 LTO partition.  */
2759       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2760 	{
2761 	  varpool_node *node = varpool_node::get (expr);
2762 	  if (node)
2763 	    node = node->ultimate_alias_target ();
2764 	  return !(node && node->in_other_partition);
2765 	}
2766       return false;
2767 
2768     default:
2769       return false;
2770     }
2771 }
2772 
2773 /* Return non-NULL if there is an integer operation with trapping overflow
2774    we can rewrite into non-trapping.  Called via walk_tree from
2775    rewrite_to_non_trapping_overflow.  */
2776 
2777 static tree
find_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2778 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2779 {
2780   if (EXPR_P (*tp)
2781       && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2782       && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2783     return *tp;
2784   if (IS_TYPE_OR_DECL_P (*tp)
2785       || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2786     *walk_subtrees = 0;
2787   return NULL_TREE;
2788 }
2789 
2790 /* Rewrite selected operations into unsigned arithmetics, so that they
2791    don't trap on overflow.  */
2792 
2793 static tree
replace_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2794 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2795 {
2796   if (find_trapping_overflow (tp, walk_subtrees, data))
2797     {
2798       tree type = TREE_TYPE (*tp);
2799       tree utype = unsigned_type_for (type);
2800       *walk_subtrees = 0;
2801       int len = TREE_OPERAND_LENGTH (*tp);
2802       for (int i = 0; i < len; ++i)
2803 	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2804 		   data, (hash_set<tree> *) data);
2805 
2806       if (TREE_CODE (*tp) == ABS_EXPR)
2807 	{
2808 	  TREE_SET_CODE (*tp, ABSU_EXPR);
2809 	  TREE_TYPE (*tp) = utype;
2810 	  *tp = fold_convert (type, *tp);
2811 	}
2812       else
2813 	{
2814 	  TREE_TYPE (*tp) = utype;
2815 	  len = TREE_OPERAND_LENGTH (*tp);
2816 	  for (int i = 0; i < len; ++i)
2817 	    TREE_OPERAND (*tp, i)
2818 	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2819 	  *tp = fold_convert (type, *tp);
2820 	}
2821     }
2822   return NULL_TREE;
2823 }
2824 
2825 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2826    using unsigned arithmetics to avoid traps in it.  */
2827 
2828 tree
rewrite_to_non_trapping_overflow(tree expr)2829 rewrite_to_non_trapping_overflow (tree expr)
2830 {
2831   if (!flag_trapv)
2832     return expr;
2833   hash_set<tree> pset;
2834   if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2835     return expr;
2836   expr = unshare_expr (expr);
2837   pset.empty ();
2838   walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2839   return expr;
2840 }
2841 
2842 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2843    an assignment or a conditional) may throw.  */
2844 
2845 static bool
stmt_could_throw_1_p(gassign * stmt)2846 stmt_could_throw_1_p (gassign *stmt)
2847 {
2848   enum tree_code code = gimple_assign_rhs_code (stmt);
2849   bool honor_nans = false;
2850   bool honor_snans = false;
2851   bool fp_operation = false;
2852   bool honor_trapv = false;
2853   tree t;
2854   size_t i;
2855   bool handled, ret;
2856 
2857   if (TREE_CODE_CLASS (code) == tcc_comparison
2858       || TREE_CODE_CLASS (code) == tcc_unary
2859       || TREE_CODE_CLASS (code) == tcc_binary)
2860     {
2861       if (TREE_CODE_CLASS (code) == tcc_comparison)
2862 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2863       else
2864 	t = gimple_expr_type (stmt);
2865       fp_operation = FLOAT_TYPE_P (t);
2866       if (fp_operation)
2867 	{
2868 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2869 	  honor_snans = flag_signaling_nans != 0;
2870 	}
2871       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2872 	honor_trapv = true;
2873     }
2874 
2875   /* First check the LHS.  */
2876   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2877     return true;
2878 
2879   /* Check if the main expression may trap.  */
2880   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2881 				       honor_nans, honor_snans,
2882 				       gimple_assign_rhs2 (stmt),
2883 				       &handled);
2884   if (handled)
2885     return ret;
2886 
2887   /* If the expression does not trap, see if any of the individual operands may
2888      trap.  */
2889   for (i = 1; i < gimple_num_ops (stmt); i++)
2890     if (tree_could_trap_p (gimple_op (stmt, i)))
2891       return true;
2892 
2893   return false;
2894 }
2895 
2896 
2897 /* Return true if statement STMT within FUN could throw an exception.  */
2898 
2899 bool
stmt_could_throw_p(function * fun,gimple * stmt)2900 stmt_could_throw_p (function *fun, gimple *stmt)
2901 {
2902   if (!flag_exceptions)
2903     return false;
2904 
2905   /* The only statements that can throw an exception are assignments,
2906      conditionals, calls, resx, and asms.  */
2907   switch (gimple_code (stmt))
2908     {
2909     case GIMPLE_RESX:
2910       return true;
2911 
2912     case GIMPLE_CALL:
2913       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2914 
2915     case GIMPLE_COND:
2916       {
2917 	if (fun && !fun->can_throw_non_call_exceptions)
2918 	  return false;
2919 	gcond *cond = as_a <gcond *> (stmt);
2920 	tree lhs = gimple_cond_lhs (cond);
2921 	return operation_could_trap_p (gimple_cond_code (cond),
2922 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2923 				       false, NULL_TREE);
2924       }
2925 
2926     case GIMPLE_ASSIGN:
2927       if ((fun && !fun->can_throw_non_call_exceptions)
2928 	  || gimple_clobber_p (stmt))
2929         return false;
2930       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2931 
2932     case GIMPLE_ASM:
2933       if (fun && !fun->can_throw_non_call_exceptions)
2934         return false;
2935       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2936 
2937     default:
2938       return false;
2939     }
2940 }
2941 
2942 /* Return true if STMT in function FUN must be assumed necessary because of
2943    non-call exceptions.  */
2944 
2945 bool
stmt_unremovable_because_of_non_call_eh_p(function * fun,gimple * stmt)2946 stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt)
2947 {
2948   return (fun->can_throw_non_call_exceptions
2949 	  && !fun->can_delete_dead_exceptions
2950 	  && stmt_could_throw_p (fun, stmt));
2951 }
2952 
2953 /* Return true if expression T could throw an exception.  */
2954 
2955 bool
tree_could_throw_p(tree t)2956 tree_could_throw_p (tree t)
2957 {
2958   if (!flag_exceptions)
2959     return false;
2960   if (TREE_CODE (t) == MODIFY_EXPR)
2961     {
2962       if (cfun->can_throw_non_call_exceptions
2963           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2964         return true;
2965       t = TREE_OPERAND (t, 1);
2966     }
2967 
2968   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2969     t = TREE_OPERAND (t, 0);
2970   if (TREE_CODE (t) == CALL_EXPR)
2971     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2972   if (cfun->can_throw_non_call_exceptions)
2973     return tree_could_trap_p (t);
2974   return false;
2975 }
2976 
2977 /* Return true if STMT can throw an exception that is not caught within its
2978    function FUN.  FUN can be NULL but the function is extra conservative
2979    then.  */
2980 
2981 bool
stmt_can_throw_external(function * fun,gimple * stmt)2982 stmt_can_throw_external (function *fun, gimple *stmt)
2983 {
2984   int lp_nr;
2985 
2986   if (!stmt_could_throw_p (fun, stmt))
2987     return false;
2988   if (!fun)
2989     return true;
2990 
2991   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
2992   return lp_nr == 0;
2993 }
2994 
2995 /* Return true if STMT can throw an exception that is caught within its
2996    function FUN.  */
2997 
2998 bool
stmt_can_throw_internal(function * fun,gimple * stmt)2999 stmt_can_throw_internal (function *fun, gimple *stmt)
3000 {
3001   int lp_nr;
3002 
3003   gcc_checking_assert (fun);
3004   if (!stmt_could_throw_p (fun, stmt))
3005     return false;
3006 
3007   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3008   return lp_nr > 0;
3009 }
3010 
3011 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
3012    remove any entry it might have from the EH table.  Return true if
3013    any change was made.  */
3014 
3015 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple * stmt)3016 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
3017 {
3018   if (stmt_could_throw_p (ifun, stmt))
3019     return false;
3020   return remove_stmt_from_eh_lp_fn (ifun, stmt);
3021 }
3022 
3023 /* Likewise, but always use the current function.  */
3024 
3025 bool
maybe_clean_eh_stmt(gimple * stmt)3026 maybe_clean_eh_stmt (gimple *stmt)
3027 {
3028   return maybe_clean_eh_stmt_fn (cfun, stmt);
3029 }
3030 
3031 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
3032    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
3033    in the table if it should be in there.  Return TRUE if a replacement was
3034    done that my require an EH edge purge.  */
3035 
3036 bool
maybe_clean_or_replace_eh_stmt(gimple * old_stmt,gimple * new_stmt)3037 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
3038 {
3039   int lp_nr = lookup_stmt_eh_lp (old_stmt);
3040 
3041   if (lp_nr != 0)
3042     {
3043       bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt);
3044 
3045       if (new_stmt == old_stmt && new_stmt_could_throw)
3046 	return false;
3047 
3048       remove_stmt_from_eh_lp (old_stmt);
3049       if (new_stmt_could_throw)
3050 	{
3051 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
3052 	  return false;
3053 	}
3054       else
3055 	return true;
3056     }
3057 
3058   return false;
3059 }
3060 
3061 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3062    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3063    operand is the return value of duplicate_eh_regions.  */
3064 
3065 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple * new_stmt,struct function * old_fun,gimple * old_stmt,hash_map<void *,void * > * map,int default_lp_nr)3066 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3067 			    struct function *old_fun, gimple *old_stmt,
3068 			    hash_map<void *, void *> *map,
3069 			    int default_lp_nr)
3070 {
3071   int old_lp_nr, new_lp_nr;
3072 
3073   if (!stmt_could_throw_p (new_fun, new_stmt))
3074     return false;
3075 
3076   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3077   if (old_lp_nr == 0)
3078     {
3079       if (default_lp_nr == 0)
3080 	return false;
3081       new_lp_nr = default_lp_nr;
3082     }
3083   else if (old_lp_nr > 0)
3084     {
3085       eh_landing_pad old_lp, new_lp;
3086 
3087       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3088       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3089       new_lp_nr = new_lp->index;
3090     }
3091   else
3092     {
3093       eh_region old_r, new_r;
3094 
3095       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3096       new_r = static_cast<eh_region> (*map->get (old_r));
3097       new_lp_nr = -new_r->index;
3098     }
3099 
3100   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3101   return true;
3102 }
3103 
3104 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3105    and thus no remapping is required.  */
3106 
3107 bool
maybe_duplicate_eh_stmt(gimple * new_stmt,gimple * old_stmt)3108 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3109 {
3110   int lp_nr;
3111 
3112   if (!stmt_could_throw_p (cfun, new_stmt))
3113     return false;
3114 
3115   lp_nr = lookup_stmt_eh_lp (old_stmt);
3116   if (lp_nr == 0)
3117     return false;
3118 
3119   add_stmt_to_eh_lp (new_stmt, lp_nr);
3120   return true;
3121 }
3122 
3123 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3124    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3125    this only handles handlers consisting of a single call, as that's the
3126    important case for C++: a destructor call for a particular object showing
3127    up in multiple handlers.  */
3128 
3129 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)3130 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3131 {
3132   gimple_stmt_iterator gsi;
3133   gimple *ones, *twos;
3134   unsigned int ai;
3135 
3136   gsi = gsi_start (oneh);
3137   if (!gsi_one_before_end_p (gsi))
3138     return false;
3139   ones = gsi_stmt (gsi);
3140 
3141   gsi = gsi_start (twoh);
3142   if (!gsi_one_before_end_p (gsi))
3143     return false;
3144   twos = gsi_stmt (gsi);
3145 
3146   if (!is_gimple_call (ones)
3147       || !is_gimple_call (twos)
3148       || gimple_call_lhs (ones)
3149       || gimple_call_lhs (twos)
3150       || gimple_call_chain (ones)
3151       || gimple_call_chain (twos)
3152       || !gimple_call_same_target_p (ones, twos)
3153       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3154     return false;
3155 
3156   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3157     if (!operand_equal_p (gimple_call_arg (ones, ai),
3158                           gimple_call_arg (twos, ai), 0))
3159       return false;
3160 
3161   return true;
3162 }
3163 
3164 /* Optimize
3165     try { A() } finally { try { ~B() } catch { ~A() } }
3166     try { ... } finally { ~A() }
3167    into
3168     try { A() } catch { ~B() }
3169     try { ~B() ... } finally { ~A() }
3170 
3171    This occurs frequently in C++, where A is a local variable and B is a
3172    temporary used in the initializer for A.  */
3173 
3174 static void
optimize_double_finally(gtry * one,gtry * two)3175 optimize_double_finally (gtry *one, gtry *two)
3176 {
3177   gimple *oneh;
3178   gimple_stmt_iterator gsi;
3179   gimple_seq cleanup;
3180 
3181   cleanup = gimple_try_cleanup (one);
3182   gsi = gsi_start (cleanup);
3183   if (!gsi_one_before_end_p (gsi))
3184     return;
3185 
3186   oneh = gsi_stmt (gsi);
3187   if (gimple_code (oneh) != GIMPLE_TRY
3188       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3189     return;
3190 
3191   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3192     {
3193       gimple_seq seq = gimple_try_eval (oneh);
3194 
3195       gimple_try_set_cleanup (one, seq);
3196       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3197       seq = copy_gimple_seq_and_replace_locals (seq);
3198       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3199       gimple_try_set_eval (two, seq);
3200     }
3201 }
3202 
3203 /* Perform EH refactoring optimizations that are simpler to do when code
3204    flow has been lowered but EH structures haven't.  */
3205 
3206 static void
refactor_eh_r(gimple_seq seq)3207 refactor_eh_r (gimple_seq seq)
3208 {
3209   gimple_stmt_iterator gsi;
3210   gimple *one, *two;
3211 
3212   one = NULL;
3213   two = NULL;
3214   gsi = gsi_start (seq);
3215   while (1)
3216     {
3217       one = two;
3218       if (gsi_end_p (gsi))
3219 	two = NULL;
3220       else
3221 	two = gsi_stmt (gsi);
3222       if (one && two)
3223 	if (gtry *try_one = dyn_cast <gtry *> (one))
3224 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3225 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3226 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3227 	      optimize_double_finally (try_one, try_two);
3228       if (one)
3229 	switch (gimple_code (one))
3230 	  {
3231 	  case GIMPLE_TRY:
3232 	    refactor_eh_r (gimple_try_eval (one));
3233 	    refactor_eh_r (gimple_try_cleanup (one));
3234 	    break;
3235 	  case GIMPLE_CATCH:
3236 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3237 	    break;
3238 	  case GIMPLE_EH_FILTER:
3239 	    refactor_eh_r (gimple_eh_filter_failure (one));
3240 	    break;
3241 	  case GIMPLE_EH_ELSE:
3242 	    {
3243 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3244 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3245 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3246 	    }
3247 	    break;
3248 	  default:
3249 	    break;
3250 	  }
3251       if (two)
3252 	gsi_next (&gsi);
3253       else
3254 	break;
3255     }
3256 }
3257 
3258 namespace {
3259 
3260 const pass_data pass_data_refactor_eh =
3261 {
3262   GIMPLE_PASS, /* type */
3263   "ehopt", /* name */
3264   OPTGROUP_NONE, /* optinfo_flags */
3265   TV_TREE_EH, /* tv_id */
3266   PROP_gimple_lcf, /* properties_required */
3267   0, /* properties_provided */
3268   0, /* properties_destroyed */
3269   0, /* todo_flags_start */
3270   0, /* todo_flags_finish */
3271 };
3272 
3273 class pass_refactor_eh : public gimple_opt_pass
3274 {
3275 public:
pass_refactor_eh(gcc::context * ctxt)3276   pass_refactor_eh (gcc::context *ctxt)
3277     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3278   {}
3279 
3280   /* opt_pass methods: */
gate(function *)3281   virtual bool gate (function *) { return flag_exceptions != 0; }
execute(function *)3282   virtual unsigned int execute (function *)
3283     {
3284       refactor_eh_r (gimple_body (current_function_decl));
3285       return 0;
3286     }
3287 
3288 }; // class pass_refactor_eh
3289 
3290 } // anon namespace
3291 
3292 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3293 make_pass_refactor_eh (gcc::context *ctxt)
3294 {
3295   return new pass_refactor_eh (ctxt);
3296 }
3297 
3298 /* At the end of gimple optimization, we can lower RESX.  */
3299 
3300 static bool
lower_resx(basic_block bb,gresx * stmt,hash_map<eh_region,tree> * mnt_map)3301 lower_resx (basic_block bb, gresx *stmt,
3302 	    hash_map<eh_region, tree> *mnt_map)
3303 {
3304   int lp_nr;
3305   eh_region src_r, dst_r;
3306   gimple_stmt_iterator gsi;
3307   gimple *x;
3308   tree fn, src_nr;
3309   bool ret = false;
3310 
3311   lp_nr = lookup_stmt_eh_lp (stmt);
3312   if (lp_nr != 0)
3313     dst_r = get_eh_region_from_lp_number (lp_nr);
3314   else
3315     dst_r = NULL;
3316 
3317   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3318   gsi = gsi_last_bb (bb);
3319 
3320   if (src_r == NULL)
3321     {
3322       /* We can wind up with no source region when pass_cleanup_eh shows
3323 	 that there are no entries into an eh region and deletes it, but
3324 	 then the block that contains the resx isn't removed.  This can
3325 	 happen without optimization when the switch statement created by
3326 	 lower_try_finally_switch isn't simplified to remove the eh case.
3327 
3328 	 Resolve this by expanding the resx node to an abort.  */
3329 
3330       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3331       x = gimple_build_call (fn, 0);
3332       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3333 
3334       while (EDGE_COUNT (bb->succs) > 0)
3335 	remove_edge (EDGE_SUCC (bb, 0));
3336     }
3337   else if (dst_r)
3338     {
3339       /* When we have a destination region, we resolve this by copying
3340 	 the excptr and filter values into place, and changing the edge
3341 	 to immediately after the landing pad.  */
3342       edge e;
3343 
3344       if (lp_nr < 0)
3345 	{
3346 	  basic_block new_bb;
3347 	  tree lab;
3348 
3349 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3350 	     the failure decl into a new block, if needed.  */
3351 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3352 
3353 	  tree *slot = mnt_map->get (dst_r);
3354 	  if (slot == NULL)
3355 	    {
3356 	      gimple_stmt_iterator gsi2;
3357 
3358 	      new_bb = create_empty_bb (bb);
3359 	      new_bb->count = bb->count;
3360 	      add_bb_to_loop (new_bb, bb->loop_father);
3361 	      lab = gimple_block_label (new_bb);
3362 	      gsi2 = gsi_start_bb (new_bb);
3363 
3364 	      fn = dst_r->u.must_not_throw.failure_decl;
3365 	      x = gimple_build_call (fn, 0);
3366 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3367 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3368 
3369 	      mnt_map->put (dst_r, lab);
3370 	    }
3371 	  else
3372 	    {
3373 	      lab = *slot;
3374 	      new_bb = label_to_block (cfun, lab);
3375 	    }
3376 
3377 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3378 	  e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3379 	}
3380       else
3381 	{
3382 	  edge_iterator ei;
3383 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3384 
3385 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3386 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3387 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3388 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3389 
3390 	  /* Update the flags for the outgoing edge.  */
3391 	  e = single_succ_edge (bb);
3392 	  gcc_assert (e->flags & EDGE_EH);
3393 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3394 	  e->probability = profile_probability::always ();
3395 
3396 	  /* If there are no more EH users of the landing pad, delete it.  */
3397 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3398 	    if (e->flags & EDGE_EH)
3399 	      break;
3400 	  if (e == NULL)
3401 	    {
3402 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3403 	      remove_eh_landing_pad (lp);
3404 	    }
3405 	}
3406 
3407       ret = true;
3408     }
3409   else
3410     {
3411       tree var;
3412 
3413       /* When we don't have a destination region, this exception escapes
3414 	 up the call chain.  We resolve this by generating a call to the
3415 	 _Unwind_Resume library function.  */
3416 
3417       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3418 	 with no arguments for C++.  Check for that.  */
3419       if (src_r->use_cxa_end_cleanup)
3420 	{
3421 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3422 	  x = gimple_build_call (fn, 0);
3423 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3424 	}
3425       else
3426 	{
3427 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3428 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3429 	  x = gimple_build_call (fn, 1, src_nr);
3430 	  var = create_tmp_var (ptr_type_node);
3431 	  var = make_ssa_name (var, x);
3432 	  gimple_call_set_lhs (x, var);
3433 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3434 
3435 	  /* When exception handling is delegated to a caller function, we
3436 	     have to guarantee that shadow memory variables living on stack
3437 	     will be cleaner before control is given to a parent function.  */
3438 	  if (sanitize_flags_p (SANITIZE_ADDRESS))
3439 	    {
3440 	      tree decl
3441 		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3442 	      gimple *g = gimple_build_call (decl, 0);
3443 	      gimple_set_location (g, gimple_location (stmt));
3444 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3445 	    }
3446 
3447 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3448 	  x = gimple_build_call (fn, 1, var);
3449 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3450 	}
3451 
3452       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3453     }
3454 
3455   gsi_remove (&gsi, true);
3456 
3457   return ret;
3458 }
3459 
3460 namespace {
3461 
3462 const pass_data pass_data_lower_resx =
3463 {
3464   GIMPLE_PASS, /* type */
3465   "resx", /* name */
3466   OPTGROUP_NONE, /* optinfo_flags */
3467   TV_TREE_EH, /* tv_id */
3468   PROP_gimple_lcf, /* properties_required */
3469   0, /* properties_provided */
3470   0, /* properties_destroyed */
3471   0, /* todo_flags_start */
3472   0, /* todo_flags_finish */
3473 };
3474 
3475 class pass_lower_resx : public gimple_opt_pass
3476 {
3477 public:
pass_lower_resx(gcc::context * ctxt)3478   pass_lower_resx (gcc::context *ctxt)
3479     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3480   {}
3481 
3482   /* opt_pass methods: */
gate(function *)3483   virtual bool gate (function *) { return flag_exceptions != 0; }
3484   virtual unsigned int execute (function *);
3485 
3486 }; // class pass_lower_resx
3487 
3488 unsigned
execute(function * fun)3489 pass_lower_resx::execute (function *fun)
3490 {
3491   basic_block bb;
3492   bool dominance_invalidated = false;
3493   bool any_rewritten = false;
3494 
3495   hash_map<eh_region, tree> mnt_map;
3496 
3497   FOR_EACH_BB_FN (bb, fun)
3498     {
3499       gimple *last = last_stmt (bb);
3500       if (last && is_gimple_resx (last))
3501 	{
3502 	  dominance_invalidated |=
3503 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3504 	  any_rewritten = true;
3505 	}
3506     }
3507 
3508   if (dominance_invalidated)
3509     {
3510       free_dominance_info (CDI_DOMINATORS);
3511       free_dominance_info (CDI_POST_DOMINATORS);
3512     }
3513 
3514   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3515 }
3516 
3517 } // anon namespace
3518 
3519 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3520 make_pass_lower_resx (gcc::context *ctxt)
3521 {
3522   return new pass_lower_resx (ctxt);
3523 }
3524 
3525 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3526    external throw.  */
3527 
3528 static void
optimize_clobbers(basic_block bb)3529 optimize_clobbers (basic_block bb)
3530 {
3531   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3532   bool any_clobbers = false;
3533   bool seen_stack_restore = false;
3534   edge_iterator ei;
3535   edge e;
3536 
3537   /* Only optimize anything if the bb contains at least one clobber,
3538      ends with resx (checked by caller), optionally contains some
3539      debug stmts or labels, or at most one __builtin_stack_restore
3540      call, and has an incoming EH edge.  */
3541   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3542     {
3543       gimple *stmt = gsi_stmt (gsi);
3544       if (is_gimple_debug (stmt))
3545 	continue;
3546       if (gimple_clobber_p (stmt))
3547 	{
3548 	  any_clobbers = true;
3549 	  continue;
3550 	}
3551       if (!seen_stack_restore
3552 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3553 	{
3554 	  seen_stack_restore = true;
3555 	  continue;
3556 	}
3557       if (gimple_code (stmt) == GIMPLE_LABEL)
3558 	break;
3559       return;
3560     }
3561   if (!any_clobbers)
3562     return;
3563   FOR_EACH_EDGE (e, ei, bb->preds)
3564     if (e->flags & EDGE_EH)
3565       break;
3566   if (e == NULL)
3567     return;
3568   gsi = gsi_last_bb (bb);
3569   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3570     {
3571       gimple *stmt = gsi_stmt (gsi);
3572       if (!gimple_clobber_p (stmt))
3573 	continue;
3574       unlink_stmt_vdef (stmt);
3575       gsi_remove (&gsi, true);
3576       release_defs (stmt);
3577     }
3578 }
3579 
3580 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3581    internal throw to successor BB.
3582    SUNK, if not NULL, is an array of sequences indexed by basic-block
3583    index to sink to and to pick up sinking opportunities from.
3584    If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
3585    but set *FOUND_OPPORTUNITY to true.  */
3586 
3587 static int
3588 sink_clobbers (basic_block bb,
3589 	       gimple_seq *sunk = NULL, bool *found_opportunity = NULL)
3590 {
3591   edge e;
3592   edge_iterator ei;
3593   gimple_stmt_iterator gsi, dgsi;
3594   basic_block succbb;
3595   bool any_clobbers = false;
3596   unsigned todo = 0;
3597 
3598   /* Only optimize if BB has a single EH successor and
3599      all predecessor edges are EH too.  */
3600   if (!single_succ_p (bb)
3601       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3602     return 0;
3603 
3604   FOR_EACH_EDGE (e, ei, bb->preds)
3605     {
3606       if ((e->flags & EDGE_EH) == 0)
3607 	return 0;
3608     }
3609 
3610   /* And BB contains only CLOBBER stmts before the final
3611      RESX.  */
3612   gsi = gsi_last_bb (bb);
3613   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3614     {
3615       gimple *stmt = gsi_stmt (gsi);
3616       if (is_gimple_debug (stmt))
3617 	continue;
3618       if (gimple_code (stmt) == GIMPLE_LABEL)
3619 	break;
3620       if (!gimple_clobber_p (stmt))
3621 	return 0;
3622       any_clobbers = true;
3623     }
3624   if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index])))
3625     return 0;
3626 
3627   /* If this was a dry run, tell it we found clobbers to sink.  */
3628   if (found_opportunity)
3629     {
3630       *found_opportunity = true;
3631       return 0;
3632     }
3633 
3634   edge succe = single_succ_edge (bb);
3635   succbb = succe->dest;
3636 
3637   /* See if there is a virtual PHI node to take an updated virtual
3638      operand from.  */
3639   gphi *vphi = NULL;
3640   for (gphi_iterator gpi = gsi_start_phis (succbb);
3641        !gsi_end_p (gpi); gsi_next (&gpi))
3642     {
3643       tree res = gimple_phi_result (gpi.phi ());
3644       if (virtual_operand_p (res))
3645 	{
3646 	  vphi = gpi.phi ();
3647 	  break;
3648 	}
3649     }
3650 
3651   gimple *first_sunk = NULL;
3652   gimple *last_sunk = NULL;
3653   if (sunk && !(succbb->flags & BB_VISITED))
3654     dgsi = gsi_start (sunk[succbb->index]);
3655   else
3656     dgsi = gsi_after_labels (succbb);
3657   gsi = gsi_last_bb (bb);
3658   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3659     {
3660       gimple *stmt = gsi_stmt (gsi);
3661       tree lhs;
3662       if (is_gimple_debug (stmt))
3663 	continue;
3664       if (gimple_code (stmt) == GIMPLE_LABEL)
3665 	break;
3666       lhs = gimple_assign_lhs (stmt);
3667       /* Unfortunately we don't have dominance info updated at this
3668 	 point, so checking if
3669 	 dominated_by_p (CDI_DOMINATORS, succbb,
3670 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3671 	 would be too costly.  Thus, avoid sinking any clobbers that
3672 	 refer to non-(D) SSA_NAMEs.  */
3673       if (TREE_CODE (lhs) == MEM_REF
3674 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3675 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3676 	{
3677 	  unlink_stmt_vdef (stmt);
3678 	  gsi_remove (&gsi, true);
3679 	  release_defs (stmt);
3680 	  continue;
3681 	}
3682 
3683       /* As we do not change stmt order when sinking across a
3684          forwarder edge we can keep virtual operands in place.  */
3685       gsi_remove (&gsi, false);
3686       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3687       if (!first_sunk)
3688 	first_sunk = stmt;
3689       last_sunk = stmt;
3690     }
3691   if (sunk && !gimple_seq_empty_p (sunk[bb->index]))
3692     {
3693       if (!first_sunk)
3694 	first_sunk = gsi_stmt (gsi_last (sunk[bb->index]));
3695       last_sunk = gsi_stmt (gsi_start (sunk[bb->index]));
3696       gsi_insert_seq_before_without_update (&dgsi,
3697 					    sunk[bb->index], GSI_NEW_STMT);
3698       sunk[bb->index] = NULL;
3699     }
3700   if (first_sunk)
3701     {
3702       /* Adjust virtual operands if we sunk across a virtual PHI.  */
3703       if (vphi)
3704 	{
3705 	  imm_use_iterator iter;
3706 	  use_operand_p use_p;
3707 	  gimple *use_stmt;
3708 	  tree phi_def = gimple_phi_result (vphi);
3709 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def)
3710 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3711               SET_USE (use_p, gimple_vdef (first_sunk));
3712 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def))
3713 	    {
3714 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1;
3715 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0;
3716 	    }
3717 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe),
3718 		   gimple_vuse (last_sunk));
3719 	  SET_USE (gimple_vuse_op (last_sunk), phi_def);
3720 	}
3721       /* If there isn't a single predecessor but no virtual PHI node
3722          arrange for virtual operands to be renamed.  */
3723       else if (!single_pred_p (succbb)
3724 	       && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME)
3725 	{
3726 	  mark_virtual_operand_for_renaming (gimple_vuse (last_sunk));
3727 	  todo |= TODO_update_ssa_only_virtuals;
3728 	}
3729     }
3730 
3731   return todo;
3732 }
3733 
3734 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3735    we have found some duplicate labels and removed some edges.  */
3736 
3737 static bool
lower_eh_dispatch(basic_block src,geh_dispatch * stmt)3738 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3739 {
3740   gimple_stmt_iterator gsi;
3741   int region_nr;
3742   eh_region r;
3743   tree filter, fn;
3744   gimple *x;
3745   bool redirected = false;
3746 
3747   region_nr = gimple_eh_dispatch_region (stmt);
3748   r = get_eh_region_from_number (region_nr);
3749 
3750   gsi = gsi_last_bb (src);
3751 
3752   switch (r->type)
3753     {
3754     case ERT_TRY:
3755       {
3756 	auto_vec<tree> labels;
3757 	tree default_label = NULL;
3758 	eh_catch c;
3759 	edge_iterator ei;
3760 	edge e;
3761 	hash_set<tree> seen_values;
3762 
3763 	/* Collect the labels for a switch.  Zero the post_landing_pad
3764 	   field becase we'll no longer have anything keeping these labels
3765 	   in existence and the optimizer will be free to merge these
3766 	   blocks at will.  */
3767 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3768 	  {
3769 	    tree tp_node, flt_node, lab = c->label;
3770 	    bool have_label = false;
3771 
3772 	    c->label = NULL;
3773 	    tp_node = c->type_list;
3774 	    flt_node = c->filter_list;
3775 
3776 	    if (tp_node == NULL)
3777 	      {
3778 	        default_label = lab;
3779 		break;
3780 	      }
3781 	    do
3782 	      {
3783 		/* Filter out duplicate labels that arise when this handler
3784 		   is shadowed by an earlier one.  When no labels are
3785 		   attached to the handler anymore, we remove
3786 		   the corresponding edge and then we delete unreachable
3787 		   blocks at the end of this pass.  */
3788 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3789 		  {
3790 		    tree t = build_case_label (TREE_VALUE (flt_node),
3791 					       NULL, lab);
3792 		    labels.safe_push (t);
3793 		    seen_values.add (TREE_VALUE (flt_node));
3794 		    have_label = true;
3795 		  }
3796 
3797 		tp_node = TREE_CHAIN (tp_node);
3798 		flt_node = TREE_CHAIN (flt_node);
3799 	      }
3800 	    while (tp_node);
3801 	    if (! have_label)
3802 	      {
3803 		remove_edge (find_edge (src, label_to_block (cfun, lab)));
3804 	        redirected = true;
3805 	      }
3806 	  }
3807 
3808 	/* Clean up the edge flags.  */
3809 	FOR_EACH_EDGE (e, ei, src->succs)
3810 	  {
3811 	    if (e->flags & EDGE_FALLTHRU)
3812 	      {
3813 		/* If there was no catch-all, use the fallthru edge.  */
3814 		if (default_label == NULL)
3815 		  default_label = gimple_block_label (e->dest);
3816 		e->flags &= ~EDGE_FALLTHRU;
3817 	      }
3818 	  }
3819 	gcc_assert (default_label != NULL);
3820 
3821 	/* Don't generate a switch if there's only a default case.
3822 	   This is common in the form of try { A; } catch (...) { B; }.  */
3823 	if (!labels.exists ())
3824 	  {
3825 	    e = single_succ_edge (src);
3826 	    e->flags |= EDGE_FALLTHRU;
3827 	  }
3828 	else
3829 	  {
3830 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3831 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3832 							 region_nr));
3833 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3834 	    filter = make_ssa_name (filter, x);
3835 	    gimple_call_set_lhs (x, filter);
3836 	    gimple_set_location (x, gimple_location (stmt));
3837 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3838 
3839 	    /* Turn the default label into a default case.  */
3840 	    default_label = build_case_label (NULL, NULL, default_label);
3841 	    sort_case_labels (labels);
3842 
3843 	    x = gimple_build_switch (filter, default_label, labels);
3844 	    gimple_set_location (x, gimple_location (stmt));
3845 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3846 	  }
3847       }
3848       break;
3849 
3850     case ERT_ALLOWED_EXCEPTIONS:
3851       {
3852 	edge b_e = BRANCH_EDGE (src);
3853 	edge f_e = FALLTHRU_EDGE (src);
3854 
3855 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3856 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3857 						     region_nr));
3858 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3859 	filter = make_ssa_name (filter, x);
3860 	gimple_call_set_lhs (x, filter);
3861 	gimple_set_location (x, gimple_location (stmt));
3862 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3863 
3864 	r->u.allowed.label = NULL;
3865 	x = gimple_build_cond (EQ_EXPR, filter,
3866 			       build_int_cst (TREE_TYPE (filter),
3867 					      r->u.allowed.filter),
3868 			       NULL_TREE, NULL_TREE);
3869 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3870 
3871 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3872         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3873       }
3874       break;
3875 
3876     default:
3877       gcc_unreachable ();
3878     }
3879 
3880   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3881   gsi_remove (&gsi, true);
3882   return redirected;
3883 }
3884 
3885 namespace {
3886 
3887 const pass_data pass_data_lower_eh_dispatch =
3888 {
3889   GIMPLE_PASS, /* type */
3890   "ehdisp", /* name */
3891   OPTGROUP_NONE, /* optinfo_flags */
3892   TV_TREE_EH, /* tv_id */
3893   PROP_gimple_lcf, /* properties_required */
3894   0, /* properties_provided */
3895   0, /* properties_destroyed */
3896   0, /* todo_flags_start */
3897   0, /* todo_flags_finish */
3898 };
3899 
3900 class pass_lower_eh_dispatch : public gimple_opt_pass
3901 {
3902 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3903   pass_lower_eh_dispatch (gcc::context *ctxt)
3904     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3905   {}
3906 
3907   /* opt_pass methods: */
gate(function * fun)3908   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3909   virtual unsigned int execute (function *);
3910 
3911 }; // class pass_lower_eh_dispatch
3912 
3913 unsigned
execute(function * fun)3914 pass_lower_eh_dispatch::execute (function *fun)
3915 {
3916   basic_block bb;
3917   int flags = 0;
3918   bool redirected = false;
3919   bool any_resx_to_process = false;
3920 
3921   assign_filter_values ();
3922 
3923   FOR_EACH_BB_FN (bb, fun)
3924     {
3925       gimple *last = last_stmt (bb);
3926       if (last == NULL)
3927 	continue;
3928       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3929 	{
3930 	  redirected |= lower_eh_dispatch (bb,
3931 					   as_a <geh_dispatch *> (last));
3932 	  flags |= TODO_update_ssa_only_virtuals;
3933 	}
3934       else if (gimple_code (last) == GIMPLE_RESX)
3935 	{
3936 	  if (stmt_can_throw_external (fun, last))
3937 	    optimize_clobbers (bb);
3938 	  else if (!any_resx_to_process)
3939 	    sink_clobbers (bb, NULL, &any_resx_to_process);
3940 	}
3941       bb->flags &= ~BB_VISITED;
3942     }
3943   if (redirected)
3944     {
3945       free_dominance_info (CDI_DOMINATORS);
3946       delete_unreachable_blocks ();
3947     }
3948 
3949   if (any_resx_to_process)
3950     {
3951       /* Make sure to catch all secondary sinking opportunities by processing
3952 	 blocks in RPO order and after all CFG modifications from lowering
3953 	 and unreachable block removal.  */
3954       int *rpo = XNEWVEC  (int, n_basic_blocks_for_fn (fun));
3955       int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
3956       gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun));
3957       for (int i = 0; i < rpo_n; ++i)
3958 	{
3959 	  bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3960 	  gimple *last = last_stmt (bb);
3961 	  if (last
3962 	      && gimple_code (last) == GIMPLE_RESX
3963 	      && !stmt_can_throw_external (fun, last))
3964 	    flags |= sink_clobbers (bb, sunk);
3965 	  /* If there were any clobbers sunk into this BB, insert them now.  */
3966 	  if (!gimple_seq_empty_p (sunk[bb->index]))
3967 	    {
3968 	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
3969 	      gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT);
3970 	      sunk[bb->index] = NULL;
3971 	    }
3972 	  bb->flags |= BB_VISITED;
3973 	}
3974       free (rpo);
3975       free (sunk);
3976     }
3977 
3978   return flags;
3979 }
3980 
3981 } // anon namespace
3982 
3983 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)3984 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3985 {
3986   return new pass_lower_eh_dispatch (ctxt);
3987 }
3988 
3989 /* Walk statements, see what regions and, optionally, landing pads
3990    are really referenced.
3991 
3992    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3993    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3994 
3995    Passing NULL for LP_REACHABLE is valid, in this case only reachable
3996    regions are marked.
3997 
3998    The caller is responsible for freeing the returned sbitmaps.  */
3999 
4000 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)4001 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
4002 {
4003   sbitmap r_reachable, lp_reachable;
4004   basic_block bb;
4005   bool mark_landing_pads = (lp_reachablep != NULL);
4006   gcc_checking_assert (r_reachablep != NULL);
4007 
4008   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
4009   bitmap_clear (r_reachable);
4010   *r_reachablep = r_reachable;
4011 
4012   if (mark_landing_pads)
4013     {
4014       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
4015       bitmap_clear (lp_reachable);
4016       *lp_reachablep = lp_reachable;
4017     }
4018   else
4019     lp_reachable = NULL;
4020 
4021   FOR_EACH_BB_FN (bb, cfun)
4022     {
4023       gimple_stmt_iterator gsi;
4024 
4025       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4026 	{
4027 	  gimple *stmt = gsi_stmt (gsi);
4028 
4029 	  if (mark_landing_pads)
4030 	    {
4031 	      int lp_nr = lookup_stmt_eh_lp (stmt);
4032 
4033 	      /* Negative LP numbers are MUST_NOT_THROW regions which
4034 		 are not considered BB enders.  */
4035 	      if (lp_nr < 0)
4036 		bitmap_set_bit (r_reachable, -lp_nr);
4037 
4038 	      /* Positive LP numbers are real landing pads, and BB enders.  */
4039 	      else if (lp_nr > 0)
4040 		{
4041 		  gcc_assert (gsi_one_before_end_p (gsi));
4042 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
4043 		  bitmap_set_bit (r_reachable, region->index);
4044 		  bitmap_set_bit (lp_reachable, lp_nr);
4045 		}
4046 	    }
4047 
4048 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
4049 	  switch (gimple_code (stmt))
4050 	    {
4051 	    case GIMPLE_RESX:
4052 	      bitmap_set_bit (r_reachable,
4053 			      gimple_resx_region (as_a <gresx *> (stmt)));
4054 	      break;
4055 	    case GIMPLE_EH_DISPATCH:
4056 	      bitmap_set_bit (r_reachable,
4057 			      gimple_eh_dispatch_region (
4058                                 as_a <geh_dispatch *> (stmt)));
4059 	      break;
4060 	    case GIMPLE_CALL:
4061 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
4062 		for (int i = 0; i < 2; ++i)
4063 		  {
4064 		    tree rt = gimple_call_arg (stmt, i);
4065 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
4066 
4067 		    gcc_assert (ri == (int)ri);
4068 		    bitmap_set_bit (r_reachable, ri);
4069 		  }
4070 	      break;
4071 	    default:
4072 	      break;
4073 	    }
4074 	}
4075     }
4076 }
4077 
4078 /* Remove unreachable handlers and unreachable landing pads.  */
4079 
4080 static void
remove_unreachable_handlers(void)4081 remove_unreachable_handlers (void)
4082 {
4083   sbitmap r_reachable, lp_reachable;
4084   eh_region region;
4085   eh_landing_pad lp;
4086   unsigned i;
4087 
4088   mark_reachable_handlers (&r_reachable, &lp_reachable);
4089 
4090   if (dump_file)
4091     {
4092       fprintf (dump_file, "Before removal of unreachable regions:\n");
4093       dump_eh_tree (dump_file, cfun);
4094       fprintf (dump_file, "Reachable regions: ");
4095       dump_bitmap_file (dump_file, r_reachable);
4096       fprintf (dump_file, "Reachable landing pads: ");
4097       dump_bitmap_file (dump_file, lp_reachable);
4098     }
4099 
4100   if (dump_file)
4101     {
4102       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4103 	if (region && !bitmap_bit_p (r_reachable, region->index))
4104 	  fprintf (dump_file,
4105 		   "Removing unreachable region %d\n",
4106 		   region->index);
4107     }
4108 
4109   remove_unreachable_eh_regions (r_reachable);
4110 
4111   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4112     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4113       {
4114 	if (dump_file)
4115 	  fprintf (dump_file,
4116 		   "Removing unreachable landing pad %d\n",
4117 		   lp->index);
4118 	remove_eh_landing_pad (lp);
4119       }
4120 
4121   if (dump_file)
4122     {
4123       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4124       dump_eh_tree (dump_file, cfun);
4125       fprintf (dump_file, "\n\n");
4126     }
4127 
4128   sbitmap_free (r_reachable);
4129   sbitmap_free (lp_reachable);
4130 
4131   if (flag_checking)
4132     verify_eh_tree (cfun);
4133 }
4134 
4135 /* Remove unreachable handlers if any landing pads have been removed after
4136    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4137 
4138 void
maybe_remove_unreachable_handlers(void)4139 maybe_remove_unreachable_handlers (void)
4140 {
4141   eh_landing_pad lp;
4142   unsigned i;
4143 
4144   if (cfun->eh == NULL)
4145     return;
4146 
4147   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4148     if (lp
4149 	&& (lp->post_landing_pad == NULL_TREE
4150 	    || label_to_block (cfun, lp->post_landing_pad) == NULL))
4151       {
4152 	remove_unreachable_handlers ();
4153 	return;
4154       }
4155 }
4156 
4157 /* Remove regions that do not have landing pads.  This assumes
4158    that remove_unreachable_handlers has already been run, and
4159    that we've just manipulated the landing pads since then.
4160 
4161    Preserve regions with landing pads and regions that prevent
4162    exceptions from propagating further, even if these regions
4163    are not reachable.  */
4164 
4165 static void
remove_unreachable_handlers_no_lp(void)4166 remove_unreachable_handlers_no_lp (void)
4167 {
4168   eh_region region;
4169   sbitmap r_reachable;
4170   unsigned i;
4171 
4172   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4173 
4174   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4175     {
4176       if (! region)
4177 	continue;
4178 
4179       if (region->landing_pads != NULL
4180 	  || region->type == ERT_MUST_NOT_THROW)
4181 	bitmap_set_bit (r_reachable, region->index);
4182 
4183       if (dump_file
4184 	  && !bitmap_bit_p (r_reachable, region->index))
4185 	fprintf (dump_file,
4186 		 "Removing unreachable region %d\n",
4187 		 region->index);
4188     }
4189 
4190   remove_unreachable_eh_regions (r_reachable);
4191 
4192   sbitmap_free (r_reachable);
4193 }
4194 
4195 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
4196    optimisticaly split all sorts of edges, including EH edges.  The
4197    optimization passes in between may not have needed them; if not,
4198    we should undo the split.
4199 
4200    Recognize this case by having one EH edge incoming to the BB and
4201    one normal edge outgoing; BB should be empty apart from the
4202    post_landing_pad label.
4203 
4204    Note that this is slightly different from the empty handler case
4205    handled by cleanup_empty_eh, in that the actual handler may yet
4206    have actual code but the landing pad has been separated from the
4207    handler.  As such, cleanup_empty_eh relies on this transformation
4208    having been done first.  */
4209 
4210 static bool
unsplit_eh(eh_landing_pad lp)4211 unsplit_eh (eh_landing_pad lp)
4212 {
4213   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4214   gimple_stmt_iterator gsi;
4215   edge e_in, e_out;
4216 
4217   /* Quickly check the edge counts on BB for singularity.  */
4218   if (!single_pred_p (bb) || !single_succ_p (bb))
4219     return false;
4220   e_in = single_pred_edge (bb);
4221   e_out = single_succ_edge (bb);
4222 
4223   /* Input edge must be EH and output edge must be normal.  */
4224   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4225     return false;
4226 
4227   /* The block must be empty except for the labels and debug insns.  */
4228   gsi = gsi_after_labels (bb);
4229   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4230     gsi_next_nondebug (&gsi);
4231   if (!gsi_end_p (gsi))
4232     return false;
4233 
4234   /* The destination block must not already have a landing pad
4235      for a different region.  */
4236   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4237     {
4238       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4239       tree lab;
4240       int lp_nr;
4241 
4242       if (!label_stmt)
4243 	break;
4244       lab = gimple_label_label (label_stmt);
4245       lp_nr = EH_LANDING_PAD_NR (lab);
4246       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4247 	return false;
4248     }
4249 
4250   /* The new destination block must not already be a destination of
4251      the source block, lest we merge fallthru and eh edges and get
4252      all sorts of confused.  */
4253   if (find_edge (e_in->src, e_out->dest))
4254     return false;
4255 
4256   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4257      thought this should have been cleaned up by a phicprop pass, but
4258      that doesn't appear to handle virtuals.  Propagate by hand.  */
4259   if (!gimple_seq_empty_p (phi_nodes (bb)))
4260     {
4261       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4262 	{
4263 	  gimple *use_stmt;
4264 	  gphi *phi = gpi.phi ();
4265 	  tree lhs = gimple_phi_result (phi);
4266 	  tree rhs = gimple_phi_arg_def (phi, 0);
4267 	  use_operand_p use_p;
4268 	  imm_use_iterator iter;
4269 
4270 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4271 	    {
4272 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4273 		SET_USE (use_p, rhs);
4274 	    }
4275 
4276 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4277 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4278 
4279 	  remove_phi_node (&gpi, true);
4280 	}
4281     }
4282 
4283   if (dump_file && (dump_flags & TDF_DETAILS))
4284     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4285 	     lp->index, e_out->dest->index);
4286 
4287   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4288      a successor edge, humor it.  But do the real CFG change with the
4289      predecessor of E_OUT in order to preserve the ordering of arguments
4290      to the PHI nodes in E_OUT->DEST.  */
4291   redirect_eh_edge_1 (e_in, e_out->dest, false);
4292   redirect_edge_pred (e_out, e_in->src);
4293   e_out->flags = e_in->flags;
4294   e_out->probability = e_in->probability;
4295   remove_edge (e_in);
4296 
4297   return true;
4298 }
4299 
4300 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4301 
4302 static bool
unsplit_all_eh(void)4303 unsplit_all_eh (void)
4304 {
4305   bool changed = false;
4306   eh_landing_pad lp;
4307   int i;
4308 
4309   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4310     if (lp)
4311       changed |= unsplit_eh (lp);
4312 
4313   return changed;
4314 }
4315 
4316 /* Wrapper around unsplit_all_eh that makes it usable everywhere.  */
4317 
4318 void
unsplit_eh_edges(void)4319 unsplit_eh_edges (void)
4320 {
4321   bool changed;
4322 
4323   /* unsplit_all_eh can die looking up unreachable landing pads.  */
4324   maybe_remove_unreachable_handlers ();
4325 
4326   changed = unsplit_all_eh ();
4327 
4328   /* If EH edges have been unsplit, delete unreachable forwarder blocks.  */
4329   if (changed)
4330     {
4331       free_dominance_info (CDI_DOMINATORS);
4332       free_dominance_info (CDI_POST_DOMINATORS);
4333       delete_unreachable_blocks ();
4334     }
4335 }
4336 
4337 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4338    to OLD_BB to NEW_BB; return true on success, false on failure.
4339 
4340    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4341    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4342    Virtual PHIs may be deleted and marked for renaming.  */
4343 
4344 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4345 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4346 			     edge old_bb_out, bool change_region)
4347 {
4348   gphi_iterator ngsi, ogsi;
4349   edge_iterator ei;
4350   edge e;
4351   bitmap ophi_handled;
4352 
4353   /* The destination block must not be a regular successor for any
4354      of the preds of the landing pad.  Thus, avoid turning
4355         <..>
4356 	 |  \ EH
4357 	 |  <..>
4358 	 |  /
4359 	<..>
4360      into
4361         <..>
4362 	|  | EH
4363 	<..>
4364      which CFG verification would choke on.  See PR45172 and PR51089.  */
4365   if (!single_pred_p (new_bb))
4366     FOR_EACH_EDGE (e, ei, old_bb->preds)
4367       if (find_edge (e->src, new_bb))
4368 	return false;
4369 
4370   FOR_EACH_EDGE (e, ei, old_bb->preds)
4371     redirect_edge_var_map_clear (e);
4372 
4373   ophi_handled = BITMAP_ALLOC (NULL);
4374 
4375   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4376      for the edges we're going to move.  */
4377   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4378     {
4379       gphi *ophi, *nphi = ngsi.phi ();
4380       tree nresult, nop;
4381 
4382       nresult = gimple_phi_result (nphi);
4383       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4384 
4385       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4386 	 the source ssa_name.  */
4387       ophi = NULL;
4388       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4389 	{
4390 	  ophi = ogsi.phi ();
4391 	  if (gimple_phi_result (ophi) == nop)
4392 	    break;
4393 	  ophi = NULL;
4394 	}
4395 
4396       /* If we did find the corresponding PHI, copy those inputs.  */
4397       if (ophi)
4398 	{
4399 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4400 	  if (!has_single_use (nop))
4401 	    {
4402 	      imm_use_iterator imm_iter;
4403 	      use_operand_p use_p;
4404 
4405 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4406 		{
4407 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4408 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4409 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4410 		    goto fail;
4411 		}
4412 	    }
4413 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4414 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4415 	    {
4416 	      location_t oloc;
4417 	      tree oop;
4418 
4419 	      if ((e->flags & EDGE_EH) == 0)
4420 		continue;
4421 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4422 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4423 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4424 	    }
4425 	}
4426       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4427 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4428 	 variable is unchanged from input to the block and we can simply
4429 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4430       else
4431 	{
4432 	  location_t nloc
4433 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4434 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4435 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4436 	}
4437     }
4438 
4439   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4440      we don't know what values from the other edges into NEW_BB to use.  */
4441   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4442     {
4443       gphi *ophi = ogsi.phi ();
4444       tree oresult = gimple_phi_result (ophi);
4445       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4446 	goto fail;
4447     }
4448 
4449   /* Finally, move the edges and update the PHIs.  */
4450   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4451     if (e->flags & EDGE_EH)
4452       {
4453 	/* ???  CFG manipluation routines do not try to update loop
4454 	   form on edge redirection.  Do so manually here for now.  */
4455 	/* If we redirect a loop entry or latch edge that will either create
4456 	   a multiple entry loop or rotate the loop.  If the loops merge
4457 	   we may have created a loop with multiple latches.
4458 	   All of this isn't easily fixed thus cancel the affected loop
4459 	   and mark the other loop as possibly having multiple latches.  */
4460 	if (e->dest == e->dest->loop_father->header)
4461 	  {
4462 	    mark_loop_for_removal (e->dest->loop_father);
4463 	    new_bb->loop_father->latch = NULL;
4464 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4465 	  }
4466 	redirect_eh_edge_1 (e, new_bb, change_region);
4467 	redirect_edge_succ (e, new_bb);
4468 	flush_pending_stmts (e);
4469       }
4470     else
4471       ei_next (&ei);
4472 
4473   BITMAP_FREE (ophi_handled);
4474   return true;
4475 
4476  fail:
4477   FOR_EACH_EDGE (e, ei, old_bb->preds)
4478     redirect_edge_var_map_clear (e);
4479   BITMAP_FREE (ophi_handled);
4480   return false;
4481 }
4482 
4483 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4484    old region to NEW_REGION at BB.  */
4485 
4486 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4487 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4488 			  eh_landing_pad lp, eh_region new_region)
4489 {
4490   gimple_stmt_iterator gsi;
4491   eh_landing_pad *pp;
4492 
4493   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4494     continue;
4495   *pp = lp->next_lp;
4496 
4497   lp->region = new_region;
4498   lp->next_lp = new_region->landing_pads;
4499   new_region->landing_pads = lp;
4500 
4501   /* Delete the RESX that was matched within the empty handler block.  */
4502   gsi = gsi_last_bb (bb);
4503   unlink_stmt_vdef (gsi_stmt (gsi));
4504   gsi_remove (&gsi, true);
4505 
4506   /* Clean up E_OUT for the fallthru.  */
4507   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4508   e_out->probability = profile_probability::always ();
4509 }
4510 
4511 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4512    unsplitting than unsplit_eh was prepared to handle, e.g. when
4513    multiple incoming edges and phis are involved.  */
4514 
4515 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4516 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4517 {
4518   gimple_stmt_iterator gsi;
4519   tree lab;
4520 
4521   /* We really ought not have totally lost everything following
4522      a landing pad label.  Given that BB is empty, there had better
4523      be a successor.  */
4524   gcc_assert (e_out != NULL);
4525 
4526   /* The destination block must not already have a landing pad
4527      for a different region.  */
4528   lab = NULL;
4529   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4530     {
4531       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4532       int lp_nr;
4533 
4534       if (!stmt)
4535 	break;
4536       lab = gimple_label_label (stmt);
4537       lp_nr = EH_LANDING_PAD_NR (lab);
4538       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4539 	return false;
4540     }
4541 
4542   /* Attempt to move the PHIs into the successor block.  */
4543   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4544     {
4545       if (dump_file && (dump_flags & TDF_DETAILS))
4546 	fprintf (dump_file,
4547 		 "Unsplit EH landing pad %d to block %i "
4548 		 "(via cleanup_empty_eh).\n",
4549 		 lp->index, e_out->dest->index);
4550       return true;
4551     }
4552 
4553   return false;
4554 }
4555 
4556 /* Return true if edge E_FIRST is part of an empty infinite loop
4557    or leads to such a loop through a series of single successor
4558    empty bbs.  */
4559 
4560 static bool
infinite_empty_loop_p(edge e_first)4561 infinite_empty_loop_p (edge e_first)
4562 {
4563   bool inf_loop = false;
4564   edge e;
4565 
4566   if (e_first->dest == e_first->src)
4567     return true;
4568 
4569   e_first->src->aux = (void *) 1;
4570   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4571     {
4572       gimple_stmt_iterator gsi;
4573       if (e->dest->aux)
4574 	{
4575 	  inf_loop = true;
4576 	  break;
4577 	}
4578       e->dest->aux = (void *) 1;
4579       gsi = gsi_after_labels (e->dest);
4580       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4581 	gsi_next_nondebug (&gsi);
4582       if (!gsi_end_p (gsi))
4583 	break;
4584     }
4585   e_first->src->aux = NULL;
4586   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4587     e->dest->aux = NULL;
4588 
4589   return inf_loop;
4590 }
4591 
4592 /* Examine the block associated with LP to determine if it's an empty
4593    handler for its EH region.  If so, attempt to redirect EH edges to
4594    an outer region.  Return true the CFG was updated in any way.  This
4595    is similar to jump forwarding, just across EH edges.  */
4596 
4597 static bool
cleanup_empty_eh(eh_landing_pad lp)4598 cleanup_empty_eh (eh_landing_pad lp)
4599 {
4600   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4601   gimple_stmt_iterator gsi;
4602   gimple *resx;
4603   eh_region new_region;
4604   edge_iterator ei;
4605   edge e, e_out;
4606   bool has_non_eh_pred;
4607   bool ret = false;
4608   int new_lp_nr;
4609 
4610   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4611   switch (EDGE_COUNT (bb->succs))
4612     {
4613     case 0:
4614       e_out = NULL;
4615       break;
4616     case 1:
4617       e_out = single_succ_edge (bb);
4618       break;
4619     default:
4620       return false;
4621     }
4622 
4623   gsi = gsi_last_nondebug_bb (bb);
4624   resx = gsi_stmt (gsi);
4625   if (resx && is_gimple_resx (resx))
4626     {
4627       if (stmt_can_throw_external (cfun, resx))
4628 	optimize_clobbers (bb);
4629       else if (sink_clobbers (bb))
4630 	ret = true;
4631     }
4632 
4633   gsi = gsi_after_labels (bb);
4634 
4635   /* Make sure to skip debug statements.  */
4636   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4637     gsi_next_nondebug (&gsi);
4638 
4639   /* If the block is totally empty, look for more unsplitting cases.  */
4640   if (gsi_end_p (gsi))
4641     {
4642       /* For the degenerate case of an infinite loop bail out.
4643 	 If bb has no successors and is totally empty, which can happen e.g.
4644 	 because of incorrect noreturn attribute, bail out too.  */
4645       if (e_out == NULL
4646 	  || infinite_empty_loop_p (e_out))
4647 	return ret;
4648 
4649       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4650     }
4651 
4652   /* The block should consist only of a single RESX statement, modulo a
4653      preceding call to __builtin_stack_restore if there is no outgoing
4654      edge, since the call can be eliminated in this case.  */
4655   resx = gsi_stmt (gsi);
4656   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4657     {
4658       gsi_next_nondebug (&gsi);
4659       resx = gsi_stmt (gsi);
4660     }
4661   if (!is_gimple_resx (resx))
4662     return ret;
4663   gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4664 
4665   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4666   has_non_eh_pred = false;
4667   FOR_EACH_EDGE (e, ei, bb->preds)
4668     if (!(e->flags & EDGE_EH))
4669       has_non_eh_pred = true;
4670 
4671   /* Find the handler that's outer of the empty handler by looking at
4672      where the RESX instruction was vectored.  */
4673   new_lp_nr = lookup_stmt_eh_lp (resx);
4674   new_region = get_eh_region_from_lp_number (new_lp_nr);
4675 
4676   /* If there's no destination region within the current function,
4677      redirection is trivial via removing the throwing statements from
4678      the EH region, removing the EH edges, and allowing the block
4679      to go unreachable.  */
4680   if (new_region == NULL)
4681     {
4682       gcc_assert (e_out == NULL);
4683       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4684 	if (e->flags & EDGE_EH)
4685 	  {
4686 	    gimple *stmt = last_stmt (e->src);
4687 	    remove_stmt_from_eh_lp (stmt);
4688 	    remove_edge (e);
4689 	  }
4690 	else
4691 	  ei_next (&ei);
4692       goto succeed;
4693     }
4694 
4695   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4696      to handle the abort and allow the blocks to go unreachable.  */
4697   if (new_region->type == ERT_MUST_NOT_THROW)
4698     {
4699       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4700 	if (e->flags & EDGE_EH)
4701 	  {
4702 	    gimple *stmt = last_stmt (e->src);
4703 	    remove_stmt_from_eh_lp (stmt);
4704 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4705 	    remove_edge (e);
4706 	  }
4707 	else
4708 	  ei_next (&ei);
4709       goto succeed;
4710     }
4711 
4712   /* Try to redirect the EH edges and merge the PHIs into the destination
4713      landing pad block.  If the merge succeeds, we'll already have redirected
4714      all the EH edges.  The handler itself will go unreachable if there were
4715      no normal edges.  */
4716   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4717     goto succeed;
4718 
4719   /* Finally, if all input edges are EH edges, then we can (potentially)
4720      reduce the number of transfers from the runtime by moving the landing
4721      pad from the original region to the new region.  This is a win when
4722      we remove the last CLEANUP region along a particular exception
4723      propagation path.  Since nothing changes except for the region with
4724      which the landing pad is associated, the PHI nodes do not need to be
4725      adjusted at all.  */
4726   if (!has_non_eh_pred)
4727     {
4728       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4729       if (dump_file && (dump_flags & TDF_DETAILS))
4730 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4731 		 lp->index, new_region->index);
4732 
4733       /* ??? The CFG didn't change, but we may have rendered the
4734 	 old EH region unreachable.  Trigger a cleanup there.  */
4735       return true;
4736     }
4737 
4738   return ret;
4739 
4740  succeed:
4741   if (dump_file && (dump_flags & TDF_DETAILS))
4742     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4743   remove_eh_landing_pad (lp);
4744   return true;
4745 }
4746 
4747 /* Do a post-order traversal of the EH region tree.  Examine each
4748    post_landing_pad block and see if we can eliminate it as empty.  */
4749 
4750 static bool
cleanup_all_empty_eh(void)4751 cleanup_all_empty_eh (void)
4752 {
4753   bool changed = false;
4754   eh_landing_pad lp;
4755   int i;
4756 
4757   /* Ideally we'd walk the region tree and process LPs inner to outer
4758      to avoid quadraticness in EH redirection.  Walking the LP array
4759      in reverse seems to be an approximation of that.  */
4760   for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i)
4761     {
4762       lp = (*cfun->eh->lp_array)[i];
4763       if (lp)
4764 	changed |= cleanup_empty_eh (lp);
4765     }
4766 
4767   return changed;
4768 }
4769 
4770 /* Perform cleanups and lowering of exception handling
4771     1) cleanups regions with handlers doing nothing are optimized out
4772     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4773     3) Info about regions that are containing instructions, and regions
4774        reachable via local EH edges is collected
4775     4) Eh tree is pruned for regions no longer necessary.
4776 
4777    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4778 	 Unify those that have the same failure decl and locus.
4779 */
4780 
4781 static unsigned int
execute_cleanup_eh_1(void)4782 execute_cleanup_eh_1 (void)
4783 {
4784   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4785      looking up unreachable landing pads.  */
4786   remove_unreachable_handlers ();
4787 
4788   /* Watch out for the region tree vanishing due to all unreachable.  */
4789   if (cfun->eh->region_tree)
4790     {
4791       bool changed = false;
4792 
4793       if (optimize)
4794 	changed |= unsplit_all_eh ();
4795       changed |= cleanup_all_empty_eh ();
4796 
4797       if (changed)
4798 	{
4799 	  free_dominance_info (CDI_DOMINATORS);
4800 	  free_dominance_info (CDI_POST_DOMINATORS);
4801 
4802           /* We delayed all basic block deletion, as we may have performed
4803 	     cleanups on EH edges while non-EH edges were still present.  */
4804 	  delete_unreachable_blocks ();
4805 
4806 	  /* We manipulated the landing pads.  Remove any region that no
4807 	     longer has a landing pad.  */
4808 	  remove_unreachable_handlers_no_lp ();
4809 
4810 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4811 	}
4812     }
4813 
4814   return 0;
4815 }
4816 
4817 namespace {
4818 
4819 const pass_data pass_data_cleanup_eh =
4820 {
4821   GIMPLE_PASS, /* type */
4822   "ehcleanup", /* name */
4823   OPTGROUP_NONE, /* optinfo_flags */
4824   TV_TREE_EH, /* tv_id */
4825   PROP_gimple_lcf, /* properties_required */
4826   0, /* properties_provided */
4827   0, /* properties_destroyed */
4828   0, /* todo_flags_start */
4829   0, /* todo_flags_finish */
4830 };
4831 
4832 class pass_cleanup_eh : public gimple_opt_pass
4833 {
4834 public:
pass_cleanup_eh(gcc::context * ctxt)4835   pass_cleanup_eh (gcc::context *ctxt)
4836     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4837   {}
4838 
4839   /* opt_pass methods: */
clone()4840   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate(function * fun)4841   virtual bool gate (function *fun)
4842     {
4843       return fun->eh != NULL && fun->eh->region_tree != NULL;
4844     }
4845 
4846   virtual unsigned int execute (function *);
4847 
4848 }; // class pass_cleanup_eh
4849 
4850 unsigned int
execute(function * fun)4851 pass_cleanup_eh::execute (function *fun)
4852 {
4853   int ret = execute_cleanup_eh_1 ();
4854 
4855   /* If the function no longer needs an EH personality routine
4856      clear it.  This exposes cross-language inlining opportunities
4857      and avoids references to a never defined personality routine.  */
4858   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4859       && function_needs_eh_personality (fun) != eh_personality_lang)
4860     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4861 
4862   return ret;
4863 }
4864 
4865 } // anon namespace
4866 
4867 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4868 make_pass_cleanup_eh (gcc::context *ctxt)
4869 {
4870   return new pass_cleanup_eh (ctxt);
4871 }
4872 
4873 /* Disable warnings about missing quoting in GCC diagnostics for
4874    the verification errors.  Their format strings don't follow GCC
4875    diagnostic conventions but are only used for debugging.  */
4876 #if __GNUC__ >= 10
4877 #  pragma GCC diagnostic push
4878 #  pragma GCC diagnostic ignored "-Wformat-diag"
4879 #endif
4880 
4881 /* Verify that BB containing STMT as the last statement, has precisely the
4882    edge that make_eh_edges would create.  */
4883 
4884 DEBUG_FUNCTION bool
verify_eh_edges(gimple * stmt)4885 verify_eh_edges (gimple *stmt)
4886 {
4887   basic_block bb = gimple_bb (stmt);
4888   eh_landing_pad lp = NULL;
4889   int lp_nr;
4890   edge_iterator ei;
4891   edge e, eh_edge;
4892 
4893   lp_nr = lookup_stmt_eh_lp (stmt);
4894   if (lp_nr > 0)
4895     lp = get_eh_landing_pad_from_number (lp_nr);
4896 
4897   eh_edge = NULL;
4898   FOR_EACH_EDGE (e, ei, bb->succs)
4899     {
4900       if (e->flags & EDGE_EH)
4901 	{
4902 	  if (eh_edge)
4903 	    {
4904 	      error ("BB %i has multiple EH edges", bb->index);
4905 	      return true;
4906 	    }
4907 	  else
4908 	    eh_edge = e;
4909 	}
4910     }
4911 
4912   if (lp == NULL)
4913     {
4914       if (eh_edge)
4915 	{
4916 	  error ("BB %i cannot throw but has an EH edge", bb->index);
4917 	  return true;
4918 	}
4919       return false;
4920     }
4921 
4922   if (!stmt_could_throw_p (cfun, stmt))
4923     {
4924       error ("BB %i last statement has incorrectly set lp", bb->index);
4925       return true;
4926     }
4927 
4928   if (eh_edge == NULL)
4929     {
4930       error ("BB %i is missing an EH edge", bb->index);
4931       return true;
4932     }
4933 
4934   if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
4935     {
4936       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4937       return true;
4938     }
4939 
4940   return false;
4941 }
4942 
4943 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4944 
4945 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(geh_dispatch * stmt)4946 verify_eh_dispatch_edge (geh_dispatch *stmt)
4947 {
4948   eh_region r;
4949   eh_catch c;
4950   basic_block src, dst;
4951   bool want_fallthru = true;
4952   edge_iterator ei;
4953   edge e, fall_edge;
4954 
4955   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4956   src = gimple_bb (stmt);
4957 
4958   FOR_EACH_EDGE (e, ei, src->succs)
4959     gcc_assert (e->aux == NULL);
4960 
4961   switch (r->type)
4962     {
4963     case ERT_TRY:
4964       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4965 	{
4966 	  dst = label_to_block (cfun, c->label);
4967 	  e = find_edge (src, dst);
4968 	  if (e == NULL)
4969 	    {
4970 	      error ("BB %i is missing an edge", src->index);
4971 	      return true;
4972 	    }
4973 	  e->aux = (void *)e;
4974 
4975 	  /* A catch-all handler doesn't have a fallthru.  */
4976 	  if (c->type_list == NULL)
4977 	    {
4978 	      want_fallthru = false;
4979 	      break;
4980 	    }
4981 	}
4982       break;
4983 
4984     case ERT_ALLOWED_EXCEPTIONS:
4985       dst = label_to_block (cfun, r->u.allowed.label);
4986       e = find_edge (src, dst);
4987       if (e == NULL)
4988 	{
4989 	  error ("BB %i is missing an edge", src->index);
4990 	  return true;
4991 	}
4992       e->aux = (void *)e;
4993       break;
4994 
4995     default:
4996       gcc_unreachable ();
4997     }
4998 
4999   fall_edge = NULL;
5000   FOR_EACH_EDGE (e, ei, src->succs)
5001     {
5002       if (e->flags & EDGE_FALLTHRU)
5003 	{
5004 	  if (fall_edge != NULL)
5005 	    {
5006 	      error ("BB %i too many fallthru edges", src->index);
5007 	      return true;
5008 	    }
5009 	  fall_edge = e;
5010 	}
5011       else if (e->aux)
5012 	e->aux = NULL;
5013       else
5014 	{
5015 	  error ("BB %i has incorrect edge", src->index);
5016 	  return true;
5017 	}
5018     }
5019   if ((fall_edge != NULL) ^ want_fallthru)
5020     {
5021       error ("BB %i has incorrect fallthru edge", src->index);
5022       return true;
5023     }
5024 
5025   return false;
5026 }
5027 
5028 #if __GNUC__ >= 10
5029 #  pragma GCC diagnostic pop
5030 #endif
5031