1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 
47 /* In some instances a tree and a gimple need to be stored in a same table,
48    i.e. in hash tables. This is a structure to do this. */
49 typedef union {tree *tp; tree t; gimple *g;} treemple;
50 
51 /* Misc functions used in this file.  */
52 
53 /* Remember and lookup EH landing pad data for arbitrary statements.
54    Really this means any statement that could_throw_p.  We could
55    stuff this information into the stmt_ann data structure, but:
56 
57    (1) We absolutely rely on this information being kept until
58    we get to rtl.  Once we're done with lowering here, if we lose
59    the information there's no way to recover it!
60 
61    (2) There are many more statements that *cannot* throw as
62    compared to those that can.  We should be saving some amount
63    of space by only allocating memory for those that can throw.  */
64 
65 /* Add statement T in function IFUN to landing pad NUM.  */
66 
67 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple * t,int num)68 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
69 {
70   gcc_assert (num != 0);
71 
72   if (!get_eh_throw_stmt_table (ifun))
73     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
74 
75   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
76 }
77 
78 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
79 
80 void
add_stmt_to_eh_lp(gimple * t,int num)81 add_stmt_to_eh_lp (gimple *t, int num)
82 {
83   add_stmt_to_eh_lp_fn (cfun, t, num);
84 }
85 
86 /* Add statement T to the single EH landing pad in REGION.  */
87 
88 static void
record_stmt_eh_region(eh_region region,gimple * t)89 record_stmt_eh_region (eh_region region, gimple *t)
90 {
91   if (region == NULL)
92     return;
93   if (region->type == ERT_MUST_NOT_THROW)
94     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
95   else
96     {
97       eh_landing_pad lp = region->landing_pads;
98       if (lp == NULL)
99 	lp = gen_eh_landing_pad (region);
100       else
101 	gcc_assert (lp->next_lp == NULL);
102       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
103     }
104 }
105 
106 
107 /* Remove statement T in function IFUN from its EH landing pad.  */
108 
109 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple * t)110 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
111 {
112   if (!get_eh_throw_stmt_table (ifun))
113     return false;
114 
115   if (!get_eh_throw_stmt_table (ifun)->get (t))
116     return false;
117 
118   get_eh_throw_stmt_table (ifun)->remove (t);
119       return true;
120 }
121 
122 
123 /* Remove statement T in the current function (cfun) from its
124    EH landing pad.  */
125 
126 bool
remove_stmt_from_eh_lp(gimple * t)127 remove_stmt_from_eh_lp (gimple *t)
128 {
129   return remove_stmt_from_eh_lp_fn (cfun, t);
130 }
131 
132 /* Determine if statement T is inside an EH region in function IFUN.
133    Positive numbers indicate a landing pad index; negative numbers
134    indicate a MUST_NOT_THROW region index; zero indicates that the
135    statement is not recorded in the region table.  */
136 
137 int
lookup_stmt_eh_lp_fn(struct function * ifun,gimple * t)138 lookup_stmt_eh_lp_fn (struct function *ifun, gimple *t)
139 {
140   if (ifun->eh->throw_stmt_table == NULL)
141     return 0;
142 
143   int *lp_nr = ifun->eh->throw_stmt_table->get (t);
144   return lp_nr ? *lp_nr : 0;
145 }
146 
147 /* Likewise, but always use the current function.  */
148 
149 int
lookup_stmt_eh_lp(gimple * t)150 lookup_stmt_eh_lp (gimple *t)
151 {
152   /* We can get called from initialized data when -fnon-call-exceptions
153      is on; prevent crash.  */
154   if (!cfun)
155     return 0;
156   return lookup_stmt_eh_lp_fn (cfun, t);
157 }
158 
159 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
160    nodes and LABEL_DECL nodes.  We will use this during the second phase to
161    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
162 
163 struct finally_tree_node
164 {
165   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
166      when deciding whether a GOTO to a certain LABEL_DECL (which is a
167      tree) leaves the TRY block, its necessary to record a tree in
168      this field.  Thus a treemple is used. */
169   treemple child;
170   gtry *parent;
171 };
172 
173 /* Hashtable helpers.  */
174 
175 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
176 {
177   static inline hashval_t hash (const finally_tree_node *);
178   static inline bool equal (const finally_tree_node *,
179 			    const finally_tree_node *);
180 };
181 
182 inline hashval_t
hash(const finally_tree_node * v)183 finally_tree_hasher::hash (const finally_tree_node *v)
184 {
185   return (intptr_t)v->child.t >> 4;
186 }
187 
188 inline bool
equal(const finally_tree_node * v,const finally_tree_node * c)189 finally_tree_hasher::equal (const finally_tree_node *v,
190 			    const finally_tree_node *c)
191 {
192   return v->child.t == c->child.t;
193 }
194 
195 /* Note that this table is *not* marked GTY.  It is short-lived.  */
196 static hash_table<finally_tree_hasher> *finally_tree;
197 
198 static void
record_in_finally_tree(treemple child,gtry * parent)199 record_in_finally_tree (treemple child, gtry *parent)
200 {
201   struct finally_tree_node *n;
202   finally_tree_node **slot;
203 
204   n = XNEW (struct finally_tree_node);
205   n->child = child;
206   n->parent = parent;
207 
208   slot = finally_tree->find_slot (n, INSERT);
209   gcc_assert (!*slot);
210   *slot = n;
211 }
212 
213 static void
214 collect_finally_tree (gimple *stmt, gtry *region);
215 
216 /* Go through the gimple sequence.  Works with collect_finally_tree to
217    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
218 
219 static void
collect_finally_tree_1(gimple_seq seq,gtry * region)220 collect_finally_tree_1 (gimple_seq seq, gtry *region)
221 {
222   gimple_stmt_iterator gsi;
223 
224   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
225     collect_finally_tree (gsi_stmt (gsi), region);
226 }
227 
228 static void
collect_finally_tree(gimple * stmt,gtry * region)229 collect_finally_tree (gimple *stmt, gtry *region)
230 {
231   treemple temp;
232 
233   switch (gimple_code (stmt))
234     {
235     case GIMPLE_LABEL:
236       temp.t = gimple_label_label (as_a <glabel *> (stmt));
237       record_in_finally_tree (temp, region);
238       break;
239 
240     case GIMPLE_TRY:
241       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
242         {
243           temp.g = stmt;
244           record_in_finally_tree (temp, region);
245           collect_finally_tree_1 (gimple_try_eval (stmt),
246 				  as_a <gtry *> (stmt));
247 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
248         }
249       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
250         {
251           collect_finally_tree_1 (gimple_try_eval (stmt), region);
252           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
253         }
254       break;
255 
256     case GIMPLE_CATCH:
257       collect_finally_tree_1 (gimple_catch_handler (
258 				 as_a <gcatch *> (stmt)),
259 			      region);
260       break;
261 
262     case GIMPLE_EH_FILTER:
263       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
264       break;
265 
266     case GIMPLE_EH_ELSE:
267       {
268 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
269 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
270 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
271       }
272       break;
273 
274     default:
275       /* A type, a decl, or some kind of statement that we're not
276 	 interested in.  Don't walk them.  */
277       break;
278     }
279 }
280 
281 
282 /* Use the finally tree to determine if a jump from START to TARGET
283    would leave the try_finally node that START lives in.  */
284 
285 static bool
outside_finally_tree(treemple start,gimple * target)286 outside_finally_tree (treemple start, gimple *target)
287 {
288   struct finally_tree_node n, *p;
289 
290   do
291     {
292       n.child = start;
293       p = finally_tree->find (&n);
294       if (!p)
295 	return true;
296       start.g = p->parent;
297     }
298   while (start.g != target);
299 
300   return false;
301 }
302 
303 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
304    nodes into a set of gotos, magic labels, and eh regions.
305    The eh region creation is straight-forward, but frobbing all the gotos
306    and such into shape isn't.  */
307 
308 /* The sequence into which we record all EH stuff.  This will be
309    placed at the end of the function when we're all done.  */
310 static gimple_seq eh_seq;
311 
312 /* Record whether an EH region contains something that can throw,
313    indexed by EH region number.  */
314 static bitmap eh_region_may_contain_throw_map;
315 
316 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
317    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
318    The idea is to record a gimple statement for everything except for
319    the conditionals, which get their labels recorded. Since labels are
320    of type 'tree', we need this node to store both gimple and tree
321    objects.  REPL_STMT is the sequence used to replace the goto/return
322    statement.  CONT_STMT is used to store the statement that allows
323    the return/goto to jump to the original destination. */
324 
325 struct goto_queue_node
326 {
327   treemple stmt;
328   location_t location;
329   gimple_seq repl_stmt;
330   gimple *cont_stmt;
331   int index;
332   /* This is used when index >= 0 to indicate that stmt is a label (as
333      opposed to a goto stmt).  */
334   int is_label;
335 };
336 
337 /* State of the world while lowering.  */
338 
339 struct leh_state
340 {
341   /* What's "current" while constructing the eh region tree.  These
342      correspond to variables of the same name in cfun->eh, which we
343      don't have easy access to.  */
344   eh_region cur_region;
345 
346   /* What's "current" for the purposes of __builtin_eh_pointer.  For
347      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
348      the associated ALLOWED_EXCEPTIONS, etc.  */
349   eh_region ehp_region;
350 
351   /* Processing of TRY_FINALLY requires a bit more state.  This is
352      split out into a separate structure so that we don't have to
353      copy so much when processing other nodes.  */
354   struct leh_tf_state *tf;
355 };
356 
357 struct leh_tf_state
358 {
359   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
360      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
361      this so that outside_finally_tree can reliably reference the tree used
362      in the collect_finally_tree data structures.  */
363   gtry *try_finally_expr;
364   gtry *top_p;
365 
366   /* While lowering a top_p usually it is expanded into multiple statements,
367      thus we need the following field to store them. */
368   gimple_seq top_p_seq;
369 
370   /* The state outside this try_finally node.  */
371   struct leh_state *outer;
372 
373   /* The exception region created for it.  */
374   eh_region region;
375 
376   /* The goto queue.  */
377   struct goto_queue_node *goto_queue;
378   size_t goto_queue_size;
379   size_t goto_queue_active;
380 
381   /* Pointer map to help in searching goto_queue when it is large.  */
382   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
383 
384   /* The set of unique labels seen as entries in the goto queue.  */
385   vec<tree> dest_array;
386 
387   /* A label to be added at the end of the completed transformed
388      sequence.  It will be set if may_fallthru was true *at one time*,
389      though subsequent transformations may have cleared that flag.  */
390   tree fallthru_label;
391 
392   /* True if it is possible to fall out the bottom of the try block.
393      Cleared if the fallthru is converted to a goto.  */
394   bool may_fallthru;
395 
396   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
397   bool may_return;
398 
399   /* True if the finally block can receive an exception edge.
400      Cleared if the exception case is handled by code duplication.  */
401   bool may_throw;
402 };
403 
404 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
405 
406 /* Search for STMT in the goto queue.  Return the replacement,
407    or null if the statement isn't in the queue.  */
408 
409 #define LARGE_GOTO_QUEUE 20
410 
411 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
412 
413 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)414 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
415 {
416   unsigned int i;
417 
418   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
419     {
420       for (i = 0; i < tf->goto_queue_active; i++)
421 	if ( tf->goto_queue[i].stmt.g == stmt.g)
422 	  return tf->goto_queue[i].repl_stmt;
423       return NULL;
424     }
425 
426   /* If we have a large number of entries in the goto_queue, create a
427      pointer map and use that for searching.  */
428 
429   if (!tf->goto_queue_map)
430     {
431       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
432       for (i = 0; i < tf->goto_queue_active; i++)
433 	{
434 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
435 						  &tf->goto_queue[i]);
436 	  gcc_assert (!existed);
437 	}
438     }
439 
440   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
441   if (slot != NULL)
442     return ((*slot)->repl_stmt);
443 
444   return NULL;
445 }
446 
447 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
448    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
449    then we can just splat it in, otherwise we add the new stmts immediately
450    after the GIMPLE_COND and redirect.  */
451 
452 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)453 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
454 				gimple_stmt_iterator *gsi)
455 {
456   tree label;
457   gimple_seq new_seq;
458   treemple temp;
459   location_t loc = gimple_location (gsi_stmt (*gsi));
460 
461   temp.tp = tp;
462   new_seq = find_goto_replacement (tf, temp);
463   if (!new_seq)
464     return;
465 
466   if (gimple_seq_singleton_p (new_seq)
467       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
468     {
469       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
470       return;
471     }
472 
473   label = create_artificial_label (loc);
474   /* Set the new label for the GIMPLE_COND */
475   *tp = label;
476 
477   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
478   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
479 }
480 
481 /* The real work of replace_goto_queue.  Returns with TSI updated to
482    point to the next statement.  */
483 
484 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
485 
486 static void
replace_goto_queue_1(gimple * stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)487 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
488 		      gimple_stmt_iterator *gsi)
489 {
490   gimple_seq seq;
491   treemple temp;
492   temp.g = NULL;
493 
494   switch (gimple_code (stmt))
495     {
496     case GIMPLE_GOTO:
497     case GIMPLE_RETURN:
498       temp.g = stmt;
499       seq = find_goto_replacement (tf, temp);
500       if (seq)
501 	{
502 	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
503 	  gsi_remove (gsi, false);
504 	  return;
505 	}
506       break;
507 
508     case GIMPLE_COND:
509       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
510       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
511       break;
512 
513     case GIMPLE_TRY:
514       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
515       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
516       break;
517     case GIMPLE_CATCH:
518       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
519 				      as_a <gcatch *> (stmt)),
520 				    tf);
521       break;
522     case GIMPLE_EH_FILTER:
523       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
524       break;
525     case GIMPLE_EH_ELSE:
526       {
527 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
528 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
529 				      tf);
530 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
531 				      tf);
532       }
533       break;
534 
535     default:
536       /* These won't have gotos in them.  */
537       break;
538     }
539 
540   gsi_next (gsi);
541 }
542 
543 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
544 
545 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)546 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
547 {
548   gimple_stmt_iterator gsi = gsi_start (*seq);
549 
550   while (!gsi_end_p (gsi))
551     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
552 }
553 
554 /* Replace all goto queue members.  */
555 
556 static void
replace_goto_queue(struct leh_tf_state * tf)557 replace_goto_queue (struct leh_tf_state *tf)
558 {
559   if (tf->goto_queue_active == 0)
560     return;
561   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
562   replace_goto_queue_stmt_list (&eh_seq, tf);
563 }
564 
565 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
566    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
567    a gimple return. */
568 
569 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)570 record_in_goto_queue (struct leh_tf_state *tf,
571                       treemple new_stmt,
572                       int index,
573                       bool is_label,
574 		      location_t location)
575 {
576   size_t active, size;
577   struct goto_queue_node *q;
578 
579   gcc_assert (!tf->goto_queue_map);
580 
581   active = tf->goto_queue_active;
582   size = tf->goto_queue_size;
583   if (active >= size)
584     {
585       size = (size ? size * 2 : 32);
586       tf->goto_queue_size = size;
587       tf->goto_queue
588          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
589     }
590 
591   q = &tf->goto_queue[active];
592   tf->goto_queue_active = active + 1;
593 
594   memset (q, 0, sizeof (*q));
595   q->stmt = new_stmt;
596   q->index = index;
597   q->location = location;
598   q->is_label = is_label;
599 }
600 
601 /* Record the LABEL label in the goto queue contained in TF.
602    TF is not null.  */
603 
604 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)605 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
606 			    location_t location)
607 {
608   int index;
609   treemple temp, new_stmt;
610 
611   if (!label)
612     return;
613 
614   /* Computed and non-local gotos do not get processed.  Given
615      their nature we can neither tell whether we've escaped the
616      finally block nor redirect them if we knew.  */
617   if (TREE_CODE (label) != LABEL_DECL)
618     return;
619 
620   /* No need to record gotos that don't leave the try block.  */
621   temp.t = label;
622   if (!outside_finally_tree (temp, tf->try_finally_expr))
623     return;
624 
625   if (! tf->dest_array.exists ())
626     {
627       tf->dest_array.create (10);
628       tf->dest_array.quick_push (label);
629       index = 0;
630     }
631   else
632     {
633       int n = tf->dest_array.length ();
634       for (index = 0; index < n; ++index)
635         if (tf->dest_array[index] == label)
636           break;
637       if (index == n)
638         tf->dest_array.safe_push (label);
639     }
640 
641   /* In the case of a GOTO we want to record the destination label,
642      since with a GIMPLE_COND we have an easy access to the then/else
643      labels. */
644   new_stmt = stmt;
645   record_in_goto_queue (tf, new_stmt, index, true, location);
646 }
647 
648 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
649    node, and if so record that fact in the goto queue associated with that
650    try_finally node.  */
651 
652 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple * stmt)653 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
654 {
655   struct leh_tf_state *tf = state->tf;
656   treemple new_stmt;
657 
658   if (!tf)
659     return;
660 
661   switch (gimple_code (stmt))
662     {
663     case GIMPLE_COND:
664       {
665 	gcond *cond_stmt = as_a <gcond *> (stmt);
666 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
667 	record_in_goto_queue_label (tf, new_stmt,
668 				    gimple_cond_true_label (cond_stmt),
669 				    EXPR_LOCATION (*new_stmt.tp));
670 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
671 	record_in_goto_queue_label (tf, new_stmt,
672 				    gimple_cond_false_label (cond_stmt),
673 				    EXPR_LOCATION (*new_stmt.tp));
674       }
675       break;
676     case GIMPLE_GOTO:
677       new_stmt.g = stmt;
678       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
679 				  gimple_location (stmt));
680       break;
681 
682     case GIMPLE_RETURN:
683       tf->may_return = true;
684       new_stmt.g = stmt;
685       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
686       break;
687 
688     default:
689       gcc_unreachable ();
690     }
691 }
692 
693 
694 #if CHECKING_P
695 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
696    was in fact structured, and we've not yet done jump threading, then none
697    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
698 
699 static void
verify_norecord_switch_expr(struct leh_state * state,gswitch * switch_expr)700 verify_norecord_switch_expr (struct leh_state *state,
701 			     gswitch *switch_expr)
702 {
703   struct leh_tf_state *tf = state->tf;
704   size_t i, n;
705 
706   if (!tf)
707     return;
708 
709   n = gimple_switch_num_labels (switch_expr);
710 
711   for (i = 0; i < n; ++i)
712     {
713       treemple temp;
714       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
715       temp.t = lab;
716       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
717     }
718 }
719 #else
720 #define verify_norecord_switch_expr(state, switch_expr)
721 #endif
722 
723 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
724    non-null, insert it before the new branch.  */
725 
726 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)727 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
728 {
729   gimple *x;
730 
731   /* In the case of a return, the queue node must be a gimple statement.  */
732   gcc_assert (!q->is_label);
733 
734   /* Note that the return value may have already been computed, e.g.,
735 
736 	int x;
737 	int foo (void)
738 	{
739 	  x = 0;
740 	  try {
741 	    return x;
742 	  } finally {
743 	    x++;
744 	  }
745 	}
746 
747      should return 0, not 1.  We don't have to do anything to make
748      this happens because the return value has been placed in the
749      RESULT_DECL already.  */
750 
751   q->cont_stmt = q->stmt.g;
752 
753   if (mod)
754     gimple_seq_add_seq (&q->repl_stmt, mod);
755 
756   x = gimple_build_goto (finlab);
757   gimple_set_location (x, q->location);
758   gimple_seq_add_stmt (&q->repl_stmt, x);
759 }
760 
761 /* Similar, but easier, for GIMPLE_GOTO.  */
762 
763 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)764 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
765 		     struct leh_tf_state *tf)
766 {
767   ggoto *x;
768 
769   gcc_assert (q->is_label);
770 
771   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
772 
773   if (mod)
774     gimple_seq_add_seq (&q->repl_stmt, mod);
775 
776   x = gimple_build_goto (finlab);
777   gimple_set_location (x, q->location);
778   gimple_seq_add_stmt (&q->repl_stmt, x);
779 }
780 
781 /* Emit a standard landing pad sequence into SEQ for REGION.  */
782 
783 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)784 emit_post_landing_pad (gimple_seq *seq, eh_region region)
785 {
786   eh_landing_pad lp = region->landing_pads;
787   glabel *x;
788 
789   if (lp == NULL)
790     lp = gen_eh_landing_pad (region);
791 
792   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
793   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
794 
795   x = gimple_build_label (lp->post_landing_pad);
796   gimple_seq_add_stmt (seq, x);
797 }
798 
799 /* Emit a RESX statement into SEQ for REGION.  */
800 
801 static void
emit_resx(gimple_seq * seq,eh_region region)802 emit_resx (gimple_seq *seq, eh_region region)
803 {
804   gresx *x = gimple_build_resx (region->index);
805   gimple_seq_add_stmt (seq, x);
806   if (region->outer)
807     record_stmt_eh_region (region->outer, x);
808 }
809 
810 /* Emit an EH_DISPATCH statement into SEQ for REGION.  */
811 
812 static void
emit_eh_dispatch(gimple_seq * seq,eh_region region)813 emit_eh_dispatch (gimple_seq *seq, eh_region region)
814 {
815   geh_dispatch *x = gimple_build_eh_dispatch (region->index);
816   gimple_seq_add_stmt (seq, x);
817 }
818 
819 /* Note that the current EH region may contain a throw, or a
820    call to a function which itself may contain a throw.  */
821 
822 static void
note_eh_region_may_contain_throw(eh_region region)823 note_eh_region_may_contain_throw (eh_region region)
824 {
825   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
826     {
827       if (region->type == ERT_MUST_NOT_THROW)
828 	break;
829       region = region->outer;
830       if (region == NULL)
831 	break;
832     }
833 }
834 
835 /* Check if REGION has been marked as containing a throw.  If REGION is
836    NULL, this predicate is false.  */
837 
838 static inline bool
eh_region_may_contain_throw(eh_region r)839 eh_region_may_contain_throw (eh_region r)
840 {
841   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
842 }
843 
844 /* We want to transform
845 	try { body; } catch { stuff; }
846    to
847 	normal_sequence:
848 	  body;
849 	  over:
850 	eh_sequence:
851 	  landing_pad:
852 	  stuff;
853 	  goto over;
854 
855    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
856    should be placed before the second operand, or NULL.  OVER is
857    an existing label that should be put at the exit, or NULL.  */
858 
859 static gimple_seq
frob_into_branch_around(gtry * tp,eh_region region,tree over)860 frob_into_branch_around (gtry *tp, eh_region region, tree over)
861 {
862   gimple *x;
863   gimple_seq cleanup, result;
864   location_t loc = gimple_location (tp);
865 
866   cleanup = gimple_try_cleanup (tp);
867   result = gimple_try_eval (tp);
868 
869   if (region)
870     emit_post_landing_pad (&eh_seq, region);
871 
872   if (gimple_seq_may_fallthru (cleanup))
873     {
874       if (!over)
875 	over = create_artificial_label (loc);
876       x = gimple_build_goto (over);
877       gimple_set_location (x, loc);
878       gimple_seq_add_stmt (&cleanup, x);
879     }
880   gimple_seq_add_seq (&eh_seq, cleanup);
881 
882   if (over)
883     {
884       x = gimple_build_label (over);
885       gimple_seq_add_stmt (&result, x);
886     }
887   return result;
888 }
889 
890 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
891    Make sure to record all new labels found.  */
892 
893 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)894 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
895 			     location_t loc)
896 {
897   gtry *region = NULL;
898   gimple_seq new_seq;
899   gimple_stmt_iterator gsi;
900 
901   new_seq = copy_gimple_seq_and_replace_locals (seq);
902 
903   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
904     {
905       gimple *stmt = gsi_stmt (gsi);
906       /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
907 	 it on the EH paths.  When it is not eliminated, make it transparent in
908 	 the debug info.  */
909       if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
910 	gimple_set_location (stmt, UNKNOWN_LOCATION);
911       else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
912 	{
913 	  tree block = gimple_block (stmt);
914 	  gimple_set_location (stmt, loc);
915 	  gimple_set_block (stmt, block);
916 	}
917     }
918 
919   if (outer_state->tf)
920     region = outer_state->tf->try_finally_expr;
921   collect_finally_tree_1 (new_seq, region);
922 
923   return new_seq;
924 }
925 
926 /* A subroutine of lower_try_finally.  Create a fallthru label for
927    the given try_finally state.  The only tricky bit here is that
928    we have to make sure to record the label in our outer context.  */
929 
930 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)931 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
932 {
933   tree label = tf->fallthru_label;
934   treemple temp;
935 
936   if (!label)
937     {
938       label = create_artificial_label (gimple_location (tf->try_finally_expr));
939       tf->fallthru_label = label;
940       if (tf->outer->tf)
941         {
942           temp.t = label;
943           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
944         }
945     }
946   return label;
947 }
948 
949 /* A subroutine of lower_try_finally.  If FINALLY consits of a
950    GIMPLE_EH_ELSE node, return it.  */
951 
952 static inline geh_else *
get_eh_else(gimple_seq finally)953 get_eh_else (gimple_seq finally)
954 {
955   gimple *x = gimple_seq_first_stmt (finally);
956   if (gimple_code (x) == GIMPLE_EH_ELSE)
957     {
958       gcc_assert (gimple_seq_singleton_p (finally));
959       return as_a <geh_else *> (x);
960     }
961   return NULL;
962 }
963 
964 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
965    langhook returns non-null, then the language requires that the exception
966    path out of a try_finally be treated specially.  To wit: the code within
967    the finally block may not itself throw an exception.  We have two choices
968    here. First we can duplicate the finally block and wrap it in a
969    must_not_throw region.  Second, we can generate code like
970 
971 	try {
972 	  finally_block;
973 	} catch {
974 	  if (fintmp == eh_edge)
975 	    protect_cleanup_actions;
976 	}
977 
978    where "fintmp" is the temporary used in the switch statement generation
979    alternative considered below.  For the nonce, we always choose the first
980    option.
981 
982    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
983 
984 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)985 honor_protect_cleanup_actions (struct leh_state *outer_state,
986 			       struct leh_state *this_state,
987 			       struct leh_tf_state *tf)
988 {
989   gimple_seq finally = gimple_try_cleanup (tf->top_p);
990 
991   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
992      It does need to be handled here, so as to separate the (different)
993      EH path from the normal path.  But we should not attempt to wrap
994      it with a must-not-throw node (which indeed gets in the way).  */
995   if (geh_else *eh_else = get_eh_else (finally))
996     {
997       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
998       finally = gimple_eh_else_e_body (eh_else);
999 
1000       /* Let the ELSE see the exception that's being processed.  */
1001       eh_region save_ehp = this_state->ehp_region;
1002       this_state->ehp_region = this_state->cur_region;
1003       lower_eh_constructs_1 (this_state, &finally);
1004       this_state->ehp_region = save_ehp;
1005     }
1006   else
1007     {
1008       /* First check for nothing to do.  */
1009       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1010 	return;
1011       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1012       if (actions == NULL)
1013 	return;
1014 
1015       if (this_state)
1016 	finally = lower_try_finally_dup_block (finally, outer_state,
1017 	  gimple_location (tf->try_finally_expr));
1018 
1019       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1020 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1021 	 to be in an enclosing scope, but needs to be implemented at this level
1022 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1023 	 cp/decl.c).  Since it's logically at an outer level, we should call
1024 	 terminate before we get to it, so strip it away before adding the
1025 	 MUST_NOT_THROW filter.  */
1026       gimple_stmt_iterator gsi = gsi_start (finally);
1027       gimple *x = gsi_stmt (gsi);
1028       if (gimple_code (x) == GIMPLE_TRY
1029 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1030 	  && gimple_try_catch_is_cleanup (x))
1031 	{
1032 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1033 	  gsi_remove (&gsi, false);
1034 	}
1035 
1036       /* Wrap the block with protect_cleanup_actions as the action.  */
1037       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1038       gtry *try_stmt = gimple_build_try (finally,
1039 					 gimple_seq_alloc_with_stmt (eh_mnt),
1040 					 GIMPLE_TRY_CATCH);
1041       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1042     }
1043 
1044   /* Drop all of this into the exception sequence.  */
1045   emit_post_landing_pad (&eh_seq, tf->region);
1046   gimple_seq_add_seq (&eh_seq, finally);
1047   if (gimple_seq_may_fallthru (finally))
1048     emit_resx (&eh_seq, tf->region);
1049 
1050   /* Having now been handled, EH isn't to be considered with
1051      the rest of the outgoing edges.  */
1052   tf->may_throw = false;
1053 }
1054 
1055 /* A subroutine of lower_try_finally.  We have determined that there is
1056    no fallthru edge out of the finally block.  This means that there is
1057    no outgoing edge corresponding to any incoming edge.  Restructure the
1058    try_finally node for this special case.  */
1059 
1060 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1061 lower_try_finally_nofallthru (struct leh_state *state,
1062 			      struct leh_tf_state *tf)
1063 {
1064   tree lab;
1065   gimple *x;
1066   geh_else *eh_else;
1067   gimple_seq finally;
1068   struct goto_queue_node *q, *qe;
1069 
1070   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1071 
1072   /* We expect that tf->top_p is a GIMPLE_TRY. */
1073   finally = gimple_try_cleanup (tf->top_p);
1074   tf->top_p_seq = gimple_try_eval (tf->top_p);
1075 
1076   x = gimple_build_label (lab);
1077   gimple_seq_add_stmt (&tf->top_p_seq, x);
1078 
1079   q = tf->goto_queue;
1080   qe = q + tf->goto_queue_active;
1081   for (; q < qe; ++q)
1082     if (q->index < 0)
1083       do_return_redirection (q, lab, NULL);
1084     else
1085       do_goto_redirection (q, lab, NULL, tf);
1086 
1087   replace_goto_queue (tf);
1088 
1089   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1090   eh_else = get_eh_else (finally);
1091   if (eh_else)
1092     {
1093       finally = gimple_eh_else_n_body (eh_else);
1094       lower_eh_constructs_1 (state, &finally);
1095       gimple_seq_add_seq (&tf->top_p_seq, finally);
1096 
1097       if (tf->may_throw)
1098 	{
1099 	  finally = gimple_eh_else_e_body (eh_else);
1100 	  lower_eh_constructs_1 (state, &finally);
1101 
1102 	  emit_post_landing_pad (&eh_seq, tf->region);
1103 	  gimple_seq_add_seq (&eh_seq, finally);
1104 	}
1105     }
1106   else
1107     {
1108       lower_eh_constructs_1 (state, &finally);
1109       gimple_seq_add_seq (&tf->top_p_seq, finally);
1110 
1111       if (tf->may_throw)
1112 	{
1113 	  emit_post_landing_pad (&eh_seq, tf->region);
1114 
1115 	  x = gimple_build_goto (lab);
1116 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1117 	  gimple_seq_add_stmt (&eh_seq, x);
1118 	}
1119     }
1120 }
1121 
1122 /* A subroutine of lower_try_finally.  We have determined that there is
1123    exactly one destination of the finally block.  Restructure the
1124    try_finally node for this special case.  */
1125 
1126 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1127 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1128 {
1129   struct goto_queue_node *q, *qe;
1130   geh_else *eh_else;
1131   glabel *label_stmt;
1132   gimple *x;
1133   gimple_seq finally;
1134   gimple_stmt_iterator gsi;
1135   tree finally_label;
1136   location_t loc = gimple_location (tf->try_finally_expr);
1137 
1138   finally = gimple_try_cleanup (tf->top_p);
1139   tf->top_p_seq = gimple_try_eval (tf->top_p);
1140 
1141   /* Since there's only one destination, and the destination edge can only
1142      either be EH or non-EH, that implies that all of our incoming edges
1143      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1144   eh_else = get_eh_else (finally);
1145   if (eh_else)
1146     {
1147       if (tf->may_throw)
1148 	finally = gimple_eh_else_e_body (eh_else);
1149       else
1150 	finally = gimple_eh_else_n_body (eh_else);
1151     }
1152 
1153   lower_eh_constructs_1 (state, &finally);
1154 
1155   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1156     {
1157       gimple *stmt = gsi_stmt (gsi);
1158       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1159 	{
1160 	  tree block = gimple_block (stmt);
1161 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1162 	  gimple_set_block (stmt, block);
1163 	}
1164     }
1165 
1166   if (tf->may_throw)
1167     {
1168       /* Only reachable via the exception edge.  Add the given label to
1169          the head of the FINALLY block.  Append a RESX at the end.  */
1170       emit_post_landing_pad (&eh_seq, tf->region);
1171       gimple_seq_add_seq (&eh_seq, finally);
1172       emit_resx (&eh_seq, tf->region);
1173       return;
1174     }
1175 
1176   if (tf->may_fallthru)
1177     {
1178       /* Only reachable via the fallthru edge.  Do nothing but let
1179 	 the two blocks run together; we'll fall out the bottom.  */
1180       gimple_seq_add_seq (&tf->top_p_seq, finally);
1181       return;
1182     }
1183 
1184   finally_label = create_artificial_label (loc);
1185   label_stmt = gimple_build_label (finally_label);
1186   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1187 
1188   gimple_seq_add_seq (&tf->top_p_seq, finally);
1189 
1190   q = tf->goto_queue;
1191   qe = q + tf->goto_queue_active;
1192 
1193   if (tf->may_return)
1194     {
1195       /* Reachable by return expressions only.  Redirect them.  */
1196       for (; q < qe; ++q)
1197 	do_return_redirection (q, finally_label, NULL);
1198       replace_goto_queue (tf);
1199     }
1200   else
1201     {
1202       /* Reachable by goto expressions only.  Redirect them.  */
1203       for (; q < qe; ++q)
1204 	do_goto_redirection (q, finally_label, NULL, tf);
1205       replace_goto_queue (tf);
1206 
1207       if (tf->dest_array[0] == tf->fallthru_label)
1208 	{
1209 	  /* Reachable by goto to fallthru label only.  Redirect it
1210 	     to the new label (already created, sadly), and do not
1211 	     emit the final branch out, or the fallthru label.  */
1212 	  tf->fallthru_label = NULL;
1213 	  return;
1214 	}
1215     }
1216 
1217   /* Place the original return/goto to the original destination
1218      immediately after the finally block. */
1219   x = tf->goto_queue[0].cont_stmt;
1220   gimple_seq_add_stmt (&tf->top_p_seq, x);
1221   maybe_record_in_goto_queue (state, x);
1222 }
1223 
1224 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1225    and outgoing from the finally block.  Implement this by duplicating the
1226    finally block for every destination.  */
1227 
1228 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1229 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1230 {
1231   gimple_seq finally;
1232   gimple_seq new_stmt;
1233   gimple_seq seq;
1234   gimple *x;
1235   geh_else *eh_else;
1236   tree tmp;
1237   location_t tf_loc = gimple_location (tf->try_finally_expr);
1238 
1239   finally = gimple_try_cleanup (tf->top_p);
1240 
1241   /* Notice EH_ELSE, and simplify some of the remaining code
1242      by considering FINALLY to be the normal return path only.  */
1243   eh_else = get_eh_else (finally);
1244   if (eh_else)
1245     finally = gimple_eh_else_n_body (eh_else);
1246 
1247   tf->top_p_seq = gimple_try_eval (tf->top_p);
1248   new_stmt = NULL;
1249 
1250   if (tf->may_fallthru)
1251     {
1252       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1253       lower_eh_constructs_1 (state, &seq);
1254       gimple_seq_add_seq (&new_stmt, seq);
1255 
1256       tmp = lower_try_finally_fallthru_label (tf);
1257       x = gimple_build_goto (tmp);
1258       gimple_set_location (x, tf_loc);
1259       gimple_seq_add_stmt (&new_stmt, x);
1260     }
1261 
1262   if (tf->may_throw)
1263     {
1264       /* We don't need to copy the EH path of EH_ELSE,
1265 	 since it is only emitted once.  */
1266       if (eh_else)
1267 	seq = gimple_eh_else_e_body (eh_else);
1268       else
1269 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1270       lower_eh_constructs_1 (state, &seq);
1271 
1272       emit_post_landing_pad (&eh_seq, tf->region);
1273       gimple_seq_add_seq (&eh_seq, seq);
1274       emit_resx (&eh_seq, tf->region);
1275     }
1276 
1277   if (tf->goto_queue)
1278     {
1279       struct goto_queue_node *q, *qe;
1280       int return_index, index;
1281       struct labels_s
1282       {
1283 	struct goto_queue_node *q;
1284 	tree label;
1285       } *labels;
1286 
1287       return_index = tf->dest_array.length ();
1288       labels = XCNEWVEC (struct labels_s, return_index + 1);
1289 
1290       q = tf->goto_queue;
1291       qe = q + tf->goto_queue_active;
1292       for (; q < qe; q++)
1293 	{
1294 	  index = q->index < 0 ? return_index : q->index;
1295 
1296 	  if (!labels[index].q)
1297 	    labels[index].q = q;
1298 	}
1299 
1300       for (index = 0; index < return_index + 1; index++)
1301 	{
1302 	  tree lab;
1303 
1304 	  q = labels[index].q;
1305 	  if (! q)
1306 	    continue;
1307 
1308 	  lab = labels[index].label
1309 	    = create_artificial_label (tf_loc);
1310 
1311 	  if (index == return_index)
1312 	    do_return_redirection (q, lab, NULL);
1313 	  else
1314 	    do_goto_redirection (q, lab, NULL, tf);
1315 
1316 	  x = gimple_build_label (lab);
1317           gimple_seq_add_stmt (&new_stmt, x);
1318 
1319 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1320 	  lower_eh_constructs_1 (state, &seq);
1321           gimple_seq_add_seq (&new_stmt, seq);
1322 
1323           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1324 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1325 	}
1326 
1327       for (q = tf->goto_queue; q < qe; q++)
1328 	{
1329 	  tree lab;
1330 
1331 	  index = q->index < 0 ? return_index : q->index;
1332 
1333 	  if (labels[index].q == q)
1334 	    continue;
1335 
1336 	  lab = labels[index].label;
1337 
1338 	  if (index == return_index)
1339 	    do_return_redirection (q, lab, NULL);
1340 	  else
1341 	    do_goto_redirection (q, lab, NULL, tf);
1342 	}
1343 
1344       replace_goto_queue (tf);
1345       free (labels);
1346     }
1347 
1348   /* Need to link new stmts after running replace_goto_queue due
1349      to not wanting to process the same goto stmts twice.  */
1350   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1351 }
1352 
1353 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1354    and outgoing from the finally block.  Implement this by instrumenting
1355    each incoming edge and creating a switch statement at the end of the
1356    finally block that branches to the appropriate destination.  */
1357 
1358 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1359 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1360 {
1361   struct goto_queue_node *q, *qe;
1362   tree finally_tmp, finally_label;
1363   int return_index, eh_index, fallthru_index;
1364   int nlabels, ndests, j, last_case_index;
1365   tree last_case;
1366   auto_vec<tree> case_label_vec;
1367   gimple_seq switch_body = NULL;
1368   gimple *x;
1369   geh_else *eh_else;
1370   tree tmp;
1371   gimple *switch_stmt;
1372   gimple_seq finally;
1373   hash_map<tree, gimple *> *cont_map = NULL;
1374   /* The location of the TRY_FINALLY stmt.  */
1375   location_t tf_loc = gimple_location (tf->try_finally_expr);
1376   /* The location of the finally block.  */
1377   location_t finally_loc;
1378 
1379   finally = gimple_try_cleanup (tf->top_p);
1380   eh_else = get_eh_else (finally);
1381 
1382   /* Mash the TRY block to the head of the chain.  */
1383   tf->top_p_seq = gimple_try_eval (tf->top_p);
1384 
1385   /* The location of the finally is either the last stmt in the finally
1386      block or the location of the TRY_FINALLY itself.  */
1387   x = gimple_seq_last_stmt (finally);
1388   finally_loc = x ? gimple_location (x) : tf_loc;
1389 
1390   /* Prepare for switch statement generation.  */
1391   nlabels = tf->dest_array.length ();
1392   return_index = nlabels;
1393   eh_index = return_index + tf->may_return;
1394   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1395   ndests = fallthru_index + tf->may_fallthru;
1396 
1397   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1398   finally_label = create_artificial_label (finally_loc);
1399 
1400   /* We use vec::quick_push on case_label_vec throughout this function,
1401      since we know the size in advance and allocate precisely as muce
1402      space as needed.  */
1403   case_label_vec.create (ndests);
1404   last_case = NULL;
1405   last_case_index = 0;
1406 
1407   /* Begin inserting code for getting to the finally block.  Things
1408      are done in this order to correspond to the sequence the code is
1409      laid out.  */
1410 
1411   if (tf->may_fallthru)
1412     {
1413       x = gimple_build_assign (finally_tmp,
1414 			       build_int_cst (integer_type_node,
1415 					      fallthru_index));
1416       gimple_seq_add_stmt (&tf->top_p_seq, x);
1417 
1418       tmp = build_int_cst (integer_type_node, fallthru_index);
1419       last_case = build_case_label (tmp, NULL,
1420 				    create_artificial_label (tf_loc));
1421       case_label_vec.quick_push (last_case);
1422       last_case_index++;
1423 
1424       x = gimple_build_label (CASE_LABEL (last_case));
1425       gimple_seq_add_stmt (&switch_body, x);
1426 
1427       tmp = lower_try_finally_fallthru_label (tf);
1428       x = gimple_build_goto (tmp);
1429       gimple_set_location (x, tf_loc);
1430       gimple_seq_add_stmt (&switch_body, x);
1431     }
1432 
1433   /* For EH_ELSE, emit the exception path (plus resx) now, then
1434      subsequently we only need consider the normal path.  */
1435   if (eh_else)
1436     {
1437       if (tf->may_throw)
1438 	{
1439 	  finally = gimple_eh_else_e_body (eh_else);
1440 	  lower_eh_constructs_1 (state, &finally);
1441 
1442 	  emit_post_landing_pad (&eh_seq, tf->region);
1443 	  gimple_seq_add_seq (&eh_seq, finally);
1444 	  emit_resx (&eh_seq, tf->region);
1445 	}
1446 
1447       finally = gimple_eh_else_n_body (eh_else);
1448     }
1449   else if (tf->may_throw)
1450     {
1451       emit_post_landing_pad (&eh_seq, tf->region);
1452 
1453       x = gimple_build_assign (finally_tmp,
1454 			       build_int_cst (integer_type_node, eh_index));
1455       gimple_seq_add_stmt (&eh_seq, x);
1456 
1457       x = gimple_build_goto (finally_label);
1458       gimple_set_location (x, tf_loc);
1459       gimple_seq_add_stmt (&eh_seq, x);
1460 
1461       tmp = build_int_cst (integer_type_node, eh_index);
1462       last_case = build_case_label (tmp, NULL,
1463 				    create_artificial_label (tf_loc));
1464       case_label_vec.quick_push (last_case);
1465       last_case_index++;
1466 
1467       x = gimple_build_label (CASE_LABEL (last_case));
1468       gimple_seq_add_stmt (&eh_seq, x);
1469       emit_resx (&eh_seq, tf->region);
1470     }
1471 
1472   x = gimple_build_label (finally_label);
1473   gimple_seq_add_stmt (&tf->top_p_seq, x);
1474 
1475   lower_eh_constructs_1 (state, &finally);
1476   gimple_seq_add_seq (&tf->top_p_seq, finally);
1477 
1478   /* Redirect each incoming goto edge.  */
1479   q = tf->goto_queue;
1480   qe = q + tf->goto_queue_active;
1481   j = last_case_index + tf->may_return;
1482   /* Prepare the assignments to finally_tmp that are executed upon the
1483      entrance through a particular edge. */
1484   for (; q < qe; ++q)
1485     {
1486       gimple_seq mod = NULL;
1487       int switch_id;
1488       unsigned int case_index;
1489 
1490       if (q->index < 0)
1491 	{
1492 	  x = gimple_build_assign (finally_tmp,
1493 				   build_int_cst (integer_type_node,
1494 						  return_index));
1495 	  gimple_seq_add_stmt (&mod, x);
1496 	  do_return_redirection (q, finally_label, mod);
1497 	  switch_id = return_index;
1498 	}
1499       else
1500 	{
1501 	  x = gimple_build_assign (finally_tmp,
1502 				   build_int_cst (integer_type_node, q->index));
1503 	  gimple_seq_add_stmt (&mod, x);
1504 	  do_goto_redirection (q, finally_label, mod, tf);
1505 	  switch_id = q->index;
1506 	}
1507 
1508       case_index = j + q->index;
1509       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1510         {
1511           tree case_lab;
1512 	  tmp = build_int_cst (integer_type_node, switch_id);
1513           case_lab = build_case_label (tmp, NULL,
1514 				       create_artificial_label (tf_loc));
1515           /* We store the cont_stmt in the pointer map, so that we can recover
1516              it in the loop below.  */
1517           if (!cont_map)
1518 	    cont_map = new hash_map<tree, gimple *>;
1519           cont_map->put (case_lab, q->cont_stmt);
1520           case_label_vec.quick_push (case_lab);
1521         }
1522     }
1523   for (j = last_case_index; j < last_case_index + nlabels; j++)
1524     {
1525       gimple *cont_stmt;
1526 
1527       last_case = case_label_vec[j];
1528 
1529       gcc_assert (last_case);
1530       gcc_assert (cont_map);
1531 
1532       cont_stmt = *cont_map->get (last_case);
1533 
1534       x = gimple_build_label (CASE_LABEL (last_case));
1535       gimple_seq_add_stmt (&switch_body, x);
1536       gimple_seq_add_stmt (&switch_body, cont_stmt);
1537       maybe_record_in_goto_queue (state, cont_stmt);
1538     }
1539   if (cont_map)
1540     delete cont_map;
1541 
1542   replace_goto_queue (tf);
1543 
1544   /* Make sure that the last case is the default label, as one is required.
1545      Then sort the labels, which is also required in GIMPLE.  */
1546   CASE_LOW (last_case) = NULL;
1547   tree tem = case_label_vec.pop ();
1548   gcc_assert (tem == last_case);
1549   sort_case_labels (case_label_vec);
1550 
1551   /* Build the switch statement, setting last_case to be the default
1552      label.  */
1553   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1554 				     case_label_vec);
1555   gimple_set_location (switch_stmt, finally_loc);
1556 
1557   /* Need to link SWITCH_STMT after running replace_goto_queue
1558      due to not wanting to process the same goto stmts twice.  */
1559   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1560   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1561 }
1562 
1563 /* Decide whether or not we are going to duplicate the finally block.
1564    There are several considerations.
1565 
1566    First, if this is Java, then the finally block contains code
1567    written by the user.  It has line numbers associated with it,
1568    so duplicating the block means it's difficult to set a breakpoint.
1569    Since controlling code generation via -g is verboten, we simply
1570    never duplicate code without optimization.
1571 
1572    Second, we'd like to prevent egregious code growth.  One way to
1573    do this is to estimate the size of the finally block, multiply
1574    that by the number of copies we'd need to make, and compare against
1575    the estimate of the size of the switch machinery we'd have to add.  */
1576 
1577 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1578 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1579 {
1580   int f_estimate, sw_estimate;
1581   geh_else *eh_else;
1582 
1583   /* If there's an EH_ELSE involved, the exception path is separate
1584      and really doesn't come into play for this computation.  */
1585   eh_else = get_eh_else (finally);
1586   if (eh_else)
1587     {
1588       ndests -= may_throw;
1589       finally = gimple_eh_else_n_body (eh_else);
1590     }
1591 
1592   if (!optimize)
1593     {
1594       gimple_stmt_iterator gsi;
1595 
1596       if (ndests == 1)
1597         return true;
1598 
1599       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1600 	{
1601 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1602 	     on the EH paths and, consequently, useless cleanups.  */
1603 	  gimple *stmt = gsi_stmt (gsi);
1604 	  if (!is_gimple_debug (stmt)
1605 	      && !gimple_clobber_p (stmt)
1606 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1607 	    return false;
1608 	}
1609       return true;
1610     }
1611 
1612   /* Finally estimate N times, plus N gotos.  */
1613   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1614   f_estimate = (f_estimate + 1) * ndests;
1615 
1616   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1617   sw_estimate = 10 + 2 * ndests;
1618 
1619   /* Optimize for size clearly wants our best guess.  */
1620   if (optimize_function_for_size_p (cfun))
1621     return f_estimate < sw_estimate;
1622 
1623   /* ??? These numbers are completely made up so far.  */
1624   if (optimize > 1)
1625     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1626   else
1627     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1628 }
1629 
1630 /* REG is the enclosing region for a possible cleanup region, or the region
1631    itself.  Returns TRUE if such a region would be unreachable.
1632 
1633    Cleanup regions within a must-not-throw region aren't actually reachable
1634    even if there are throwing stmts within them, because the personality
1635    routine will call terminate before unwinding.  */
1636 
1637 static bool
cleanup_is_dead_in(eh_region reg)1638 cleanup_is_dead_in (eh_region reg)
1639 {
1640   while (reg && reg->type == ERT_CLEANUP)
1641     reg = reg->outer;
1642   return (reg && reg->type == ERT_MUST_NOT_THROW);
1643 }
1644 
1645 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1646    to a sequence of labels and blocks, plus the exception region trees
1647    that record all the magic.  This is complicated by the need to
1648    arrange for the FINALLY block to be executed on all exits.  */
1649 
1650 static gimple_seq
lower_try_finally(struct leh_state * state,gtry * tp)1651 lower_try_finally (struct leh_state *state, gtry *tp)
1652 {
1653   struct leh_tf_state this_tf;
1654   struct leh_state this_state;
1655   int ndests;
1656   gimple_seq old_eh_seq;
1657 
1658   /* Process the try block.  */
1659 
1660   memset (&this_tf, 0, sizeof (this_tf));
1661   this_tf.try_finally_expr = tp;
1662   this_tf.top_p = tp;
1663   this_tf.outer = state;
1664   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1665     {
1666       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1667       this_state.cur_region = this_tf.region;
1668     }
1669   else
1670     {
1671       this_tf.region = NULL;
1672       this_state.cur_region = state->cur_region;
1673     }
1674 
1675   this_state.ehp_region = state->ehp_region;
1676   this_state.tf = &this_tf;
1677 
1678   old_eh_seq = eh_seq;
1679   eh_seq = NULL;
1680 
1681   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1682 
1683   /* Determine if the try block is escaped through the bottom.  */
1684   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1685 
1686   /* Determine if any exceptions are possible within the try block.  */
1687   if (this_tf.region)
1688     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1689   if (this_tf.may_throw)
1690     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1691 
1692   /* Determine how many edges (still) reach the finally block.  Or rather,
1693      how many destinations are reached by the finally block.  Use this to
1694      determine how we process the finally block itself.  */
1695 
1696   ndests = this_tf.dest_array.length ();
1697   ndests += this_tf.may_fallthru;
1698   ndests += this_tf.may_return;
1699   ndests += this_tf.may_throw;
1700 
1701   /* If the FINALLY block is not reachable, dike it out.  */
1702   if (ndests == 0)
1703     {
1704       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1705       gimple_try_set_cleanup (tp, NULL);
1706     }
1707   /* If the finally block doesn't fall through, then any destination
1708      we might try to impose there isn't reached either.  There may be
1709      some minor amount of cleanup and redirection still needed.  */
1710   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1711     lower_try_finally_nofallthru (state, &this_tf);
1712 
1713   /* We can easily special-case redirection to a single destination.  */
1714   else if (ndests == 1)
1715     lower_try_finally_onedest (state, &this_tf);
1716   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1717 				    gimple_try_cleanup (tp)))
1718     lower_try_finally_copy (state, &this_tf);
1719   else
1720     lower_try_finally_switch (state, &this_tf);
1721 
1722   /* If someone requested we add a label at the end of the transformed
1723      block, do so.  */
1724   if (this_tf.fallthru_label)
1725     {
1726       /* This must be reached only if ndests == 0. */
1727       gimple *x = gimple_build_label (this_tf.fallthru_label);
1728       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1729     }
1730 
1731   this_tf.dest_array.release ();
1732   free (this_tf.goto_queue);
1733   if (this_tf.goto_queue_map)
1734     delete this_tf.goto_queue_map;
1735 
1736   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1737      If there was no old eh_seq, then the append is trivially already done.  */
1738   if (old_eh_seq)
1739     {
1740       if (eh_seq == NULL)
1741 	eh_seq = old_eh_seq;
1742       else
1743 	{
1744 	  gimple_seq new_eh_seq = eh_seq;
1745 	  eh_seq = old_eh_seq;
1746 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1747 	}
1748     }
1749 
1750   return this_tf.top_p_seq;
1751 }
1752 
1753 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1754    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1755    exception region trees that records all the magic.  */
1756 
1757 static gimple_seq
lower_catch(struct leh_state * state,gtry * tp)1758 lower_catch (struct leh_state *state, gtry *tp)
1759 {
1760   eh_region try_region = NULL;
1761   struct leh_state this_state = *state;
1762   gimple_stmt_iterator gsi;
1763   tree out_label;
1764   gimple_seq new_seq, cleanup;
1765   gimple *x;
1766   location_t try_catch_loc = gimple_location (tp);
1767 
1768   if (flag_exceptions)
1769     {
1770       try_region = gen_eh_region_try (state->cur_region);
1771       this_state.cur_region = try_region;
1772     }
1773 
1774   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1775 
1776   if (!eh_region_may_contain_throw (try_region))
1777     return gimple_try_eval (tp);
1778 
1779   new_seq = NULL;
1780   emit_eh_dispatch (&new_seq, try_region);
1781   emit_resx (&new_seq, try_region);
1782 
1783   this_state.cur_region = state->cur_region;
1784   this_state.ehp_region = try_region;
1785 
1786   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1787      itself, so that e.g. for coverage purposes the nested cleanups don't
1788      appear before the cleanup body.  See PR64634 for details.  */
1789   gimple_seq old_eh_seq = eh_seq;
1790   eh_seq = NULL;
1791 
1792   out_label = NULL;
1793   cleanup = gimple_try_cleanup (tp);
1794   for (gsi = gsi_start (cleanup);
1795        !gsi_end_p (gsi);
1796        gsi_next (&gsi))
1797     {
1798       eh_catch c;
1799       gcatch *catch_stmt;
1800       gimple_seq handler;
1801 
1802       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1803       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1804 
1805       handler = gimple_catch_handler (catch_stmt);
1806       lower_eh_constructs_1 (&this_state, &handler);
1807 
1808       c->label = create_artificial_label (UNKNOWN_LOCATION);
1809       x = gimple_build_label (c->label);
1810       gimple_seq_add_stmt (&new_seq, x);
1811 
1812       gimple_seq_add_seq (&new_seq, handler);
1813 
1814       if (gimple_seq_may_fallthru (new_seq))
1815 	{
1816 	  if (!out_label)
1817 	    out_label = create_artificial_label (try_catch_loc);
1818 
1819 	  x = gimple_build_goto (out_label);
1820 	  gimple_seq_add_stmt (&new_seq, x);
1821 	}
1822       if (!c->type_list)
1823 	break;
1824     }
1825 
1826   gimple_try_set_cleanup (tp, new_seq);
1827 
1828   gimple_seq new_eh_seq = eh_seq;
1829   eh_seq = old_eh_seq;
1830   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1831   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1832   return ret_seq;
1833 }
1834 
1835 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1836    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1837    region trees that record all the magic.  */
1838 
1839 static gimple_seq
lower_eh_filter(struct leh_state * state,gtry * tp)1840 lower_eh_filter (struct leh_state *state, gtry *tp)
1841 {
1842   struct leh_state this_state = *state;
1843   eh_region this_region = NULL;
1844   gimple *inner, *x;
1845   gimple_seq new_seq;
1846 
1847   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1848 
1849   if (flag_exceptions)
1850     {
1851       this_region = gen_eh_region_allowed (state->cur_region,
1852 				           gimple_eh_filter_types (inner));
1853       this_state.cur_region = this_region;
1854     }
1855 
1856   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1857 
1858   if (!eh_region_may_contain_throw (this_region))
1859     return gimple_try_eval (tp);
1860 
1861   new_seq = NULL;
1862   this_state.cur_region = state->cur_region;
1863   this_state.ehp_region = this_region;
1864 
1865   emit_eh_dispatch (&new_seq, this_region);
1866   emit_resx (&new_seq, this_region);
1867 
1868   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1869   x = gimple_build_label (this_region->u.allowed.label);
1870   gimple_seq_add_stmt (&new_seq, x);
1871 
1872   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1873   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1874 
1875   gimple_try_set_cleanup (tp, new_seq);
1876 
1877   return frob_into_branch_around (tp, this_region, NULL);
1878 }
1879 
1880 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1881    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1882    plus the exception region trees that record all the magic.  */
1883 
1884 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gtry * tp)1885 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1886 {
1887   struct leh_state this_state = *state;
1888 
1889   if (flag_exceptions)
1890     {
1891       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1892       eh_region this_region;
1893 
1894       this_region = gen_eh_region_must_not_throw (state->cur_region);
1895       this_region->u.must_not_throw.failure_decl
1896 	= gimple_eh_must_not_throw_fndecl (
1897 	    as_a <geh_mnt *> (inner));
1898       this_region->u.must_not_throw.failure_loc
1899 	= LOCATION_LOCUS (gimple_location (tp));
1900 
1901       /* In order to get mangling applied to this decl, we must mark it
1902 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1903 	 needs to happen.  */
1904       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1905 
1906       this_state.cur_region = this_region;
1907     }
1908 
1909   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1910 
1911   return gimple_try_eval (tp);
1912 }
1913 
1914 /* Implement a cleanup expression.  This is similar to try-finally,
1915    except that we only execute the cleanup block for exception edges.  */
1916 
1917 static gimple_seq
lower_cleanup(struct leh_state * state,gtry * tp)1918 lower_cleanup (struct leh_state *state, gtry *tp)
1919 {
1920   struct leh_state this_state = *state;
1921   eh_region this_region = NULL;
1922   struct leh_tf_state fake_tf;
1923   gimple_seq result;
1924   bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1925 
1926   if (flag_exceptions && !cleanup_dead)
1927     {
1928       this_region = gen_eh_region_cleanup (state->cur_region);
1929       this_state.cur_region = this_region;
1930     }
1931 
1932   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1933 
1934   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1935     return gimple_try_eval (tp);
1936 
1937   /* Build enough of a try-finally state so that we can reuse
1938      honor_protect_cleanup_actions.  */
1939   memset (&fake_tf, 0, sizeof (fake_tf));
1940   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1941   fake_tf.outer = state;
1942   fake_tf.region = this_region;
1943   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1944   fake_tf.may_throw = true;
1945 
1946   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1947 
1948   if (fake_tf.may_throw)
1949     {
1950       /* In this case honor_protect_cleanup_actions had nothing to do,
1951 	 and we should process this normally.  */
1952       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1953       result = frob_into_branch_around (tp, this_region,
1954                                         fake_tf.fallthru_label);
1955     }
1956   else
1957     {
1958       /* In this case honor_protect_cleanup_actions did nearly all of
1959 	 the work.  All we have left is to append the fallthru_label.  */
1960 
1961       result = gimple_try_eval (tp);
1962       if (fake_tf.fallthru_label)
1963 	{
1964 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1965 	  gimple_seq_add_stmt (&result, x);
1966 	}
1967     }
1968   return result;
1969 }
1970 
1971 /* Main loop for lowering eh constructs. Also moves gsi to the next
1972    statement. */
1973 
1974 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)1975 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1976 {
1977   gimple_seq replace;
1978   gimple *x;
1979   gimple *stmt = gsi_stmt (*gsi);
1980 
1981   switch (gimple_code (stmt))
1982     {
1983     case GIMPLE_CALL:
1984       {
1985 	tree fndecl = gimple_call_fndecl (stmt);
1986 	tree rhs, lhs;
1987 
1988 	if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1989 	  switch (DECL_FUNCTION_CODE (fndecl))
1990 	    {
1991 	    case BUILT_IN_EH_POINTER:
1992 	      /* The front end may have generated a call to
1993 		 __builtin_eh_pointer (0) within a catch region.  Replace
1994 		 this zero argument with the current catch region number.  */
1995 	      if (state->ehp_region)
1996 		{
1997 		  tree nr = build_int_cst (integer_type_node,
1998 					   state->ehp_region->index);
1999 		  gimple_call_set_arg (stmt, 0, nr);
2000 		}
2001 	      else
2002 		{
2003 		  /* The user has dome something silly.  Remove it.  */
2004 		  rhs = null_pointer_node;
2005 		  goto do_replace;
2006 		}
2007 	      break;
2008 
2009 	    case BUILT_IN_EH_FILTER:
2010 	      /* ??? This should never appear, but since it's a builtin it
2011 		 is accessible to abuse by users.  Just remove it and
2012 		 replace the use with the arbitrary value zero.  */
2013 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2014 	    do_replace:
2015 	      lhs = gimple_call_lhs (stmt);
2016 	      x = gimple_build_assign (lhs, rhs);
2017 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2018 	      /* FALLTHRU */
2019 
2020 	    case BUILT_IN_EH_COPY_VALUES:
2021 	      /* Likewise this should not appear.  Remove it.  */
2022 	      gsi_remove (gsi, true);
2023 	      return;
2024 
2025 	    default:
2026 	      break;
2027 	    }
2028       }
2029       /* FALLTHRU */
2030 
2031     case GIMPLE_ASSIGN:
2032       /* If the stmt can throw use a new temporary for the assignment
2033          to a LHS.  This makes sure the old value of the LHS is
2034 	 available on the EH edge.  Only do so for statements that
2035 	 potentially fall through (no noreturn calls e.g.), otherwise
2036 	 this new assignment might create fake fallthru regions.  */
2037       if (stmt_could_throw_p (stmt)
2038 	  && gimple_has_lhs (stmt)
2039 	  && gimple_stmt_may_fallthru (stmt)
2040 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2041 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2042 	{
2043 	  tree lhs = gimple_get_lhs (stmt);
2044 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2045 	  gimple *s = gimple_build_assign (lhs, tmp);
2046 	  gimple_set_location (s, gimple_location (stmt));
2047 	  gimple_set_block (s, gimple_block (stmt));
2048 	  gimple_set_lhs (stmt, tmp);
2049 	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2050 	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2051 	    DECL_GIMPLE_REG_P (tmp) = 1;
2052 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2053 	}
2054       /* Look for things that can throw exceptions, and record them.  */
2055       if (state->cur_region && stmt_could_throw_p (stmt))
2056 	{
2057 	  record_stmt_eh_region (state->cur_region, stmt);
2058 	  note_eh_region_may_contain_throw (state->cur_region);
2059 	}
2060       break;
2061 
2062     case GIMPLE_COND:
2063     case GIMPLE_GOTO:
2064     case GIMPLE_RETURN:
2065       maybe_record_in_goto_queue (state, stmt);
2066       break;
2067 
2068     case GIMPLE_SWITCH:
2069       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2070       break;
2071 
2072     case GIMPLE_TRY:
2073       {
2074 	gtry *try_stmt = as_a <gtry *> (stmt);
2075 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2076 	  replace = lower_try_finally (state, try_stmt);
2077 	else
2078 	  {
2079 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2080 	    if (!x)
2081 	      {
2082 		replace = gimple_try_eval (try_stmt);
2083 		lower_eh_constructs_1 (state, &replace);
2084 	      }
2085 	    else
2086 	      switch (gimple_code (x))
2087 		{
2088 		case GIMPLE_CATCH:
2089 		  replace = lower_catch (state, try_stmt);
2090 		  break;
2091 		case GIMPLE_EH_FILTER:
2092 		  replace = lower_eh_filter (state, try_stmt);
2093 		  break;
2094 		case GIMPLE_EH_MUST_NOT_THROW:
2095 		  replace = lower_eh_must_not_throw (state, try_stmt);
2096 		  break;
2097 		case GIMPLE_EH_ELSE:
2098 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2099 		  gcc_unreachable ();
2100 		default:
2101 		  replace = lower_cleanup (state, try_stmt);
2102 		  break;
2103 		}
2104 	  }
2105       }
2106 
2107       /* Remove the old stmt and insert the transformed sequence
2108 	 instead. */
2109       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2110       gsi_remove (gsi, true);
2111 
2112       /* Return since we don't want gsi_next () */
2113       return;
2114 
2115     case GIMPLE_EH_ELSE:
2116       /* We should be eliminating this in lower_try_finally et al.  */
2117       gcc_unreachable ();
2118 
2119     default:
2120       /* A type, a decl, or some kind of statement that we're not
2121 	 interested in.  Don't walk them.  */
2122       break;
2123     }
2124 
2125   gsi_next (gsi);
2126 }
2127 
2128 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2129 
2130 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2131 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2132 {
2133   gimple_stmt_iterator gsi;
2134   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2135     lower_eh_constructs_2 (state, &gsi);
2136 }
2137 
2138 namespace {
2139 
2140 const pass_data pass_data_lower_eh =
2141 {
2142   GIMPLE_PASS, /* type */
2143   "eh", /* name */
2144   OPTGROUP_NONE, /* optinfo_flags */
2145   TV_TREE_EH, /* tv_id */
2146   PROP_gimple_lcf, /* properties_required */
2147   PROP_gimple_leh, /* properties_provided */
2148   0, /* properties_destroyed */
2149   0, /* todo_flags_start */
2150   0, /* todo_flags_finish */
2151 };
2152 
2153 class pass_lower_eh : public gimple_opt_pass
2154 {
2155 public:
pass_lower_eh(gcc::context * ctxt)2156   pass_lower_eh (gcc::context *ctxt)
2157     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2158   {}
2159 
2160   /* opt_pass methods: */
2161   virtual unsigned int execute (function *);
2162 
2163 }; // class pass_lower_eh
2164 
2165 unsigned int
execute(function * fun)2166 pass_lower_eh::execute (function *fun)
2167 {
2168   struct leh_state null_state;
2169   gimple_seq bodyp;
2170 
2171   bodyp = gimple_body (current_function_decl);
2172   if (bodyp == NULL)
2173     return 0;
2174 
2175   finally_tree = new hash_table<finally_tree_hasher> (31);
2176   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2177   memset (&null_state, 0, sizeof (null_state));
2178 
2179   collect_finally_tree_1 (bodyp, NULL);
2180   lower_eh_constructs_1 (&null_state, &bodyp);
2181   gimple_set_body (current_function_decl, bodyp);
2182 
2183   /* We assume there's a return statement, or something, at the end of
2184      the function, and thus ploping the EH sequence afterward won't
2185      change anything.  */
2186   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2187   gimple_seq_add_seq (&bodyp, eh_seq);
2188 
2189   /* We assume that since BODYP already existed, adding EH_SEQ to it
2190      didn't change its value, and we don't have to re-set the function.  */
2191   gcc_assert (bodyp == gimple_body (current_function_decl));
2192 
2193   delete finally_tree;
2194   finally_tree = NULL;
2195   BITMAP_FREE (eh_region_may_contain_throw_map);
2196   eh_seq = NULL;
2197 
2198   /* If this function needs a language specific EH personality routine
2199      and the frontend didn't already set one do so now.  */
2200   if (function_needs_eh_personality (fun) == eh_personality_lang
2201       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2202     DECL_FUNCTION_PERSONALITY (current_function_decl)
2203       = lang_hooks.eh_personality ();
2204 
2205   return 0;
2206 }
2207 
2208 } // anon namespace
2209 
2210 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2211 make_pass_lower_eh (gcc::context *ctxt)
2212 {
2213   return new pass_lower_eh (ctxt);
2214 }
2215 
2216 /* Create the multiple edges from an EH_DISPATCH statement to all of
2217    the possible handlers for its EH region.  Return true if there's
2218    no fallthru edge; false if there is.  */
2219 
2220 bool
make_eh_dispatch_edges(geh_dispatch * stmt)2221 make_eh_dispatch_edges (geh_dispatch *stmt)
2222 {
2223   eh_region r;
2224   eh_catch c;
2225   basic_block src, dst;
2226 
2227   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2228   src = gimple_bb (stmt);
2229 
2230   switch (r->type)
2231     {
2232     case ERT_TRY:
2233       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2234 	{
2235 	  dst = label_to_block (c->label);
2236 	  make_edge (src, dst, 0);
2237 
2238 	  /* A catch-all handler doesn't have a fallthru.  */
2239 	  if (c->type_list == NULL)
2240 	    return false;
2241 	}
2242       break;
2243 
2244     case ERT_ALLOWED_EXCEPTIONS:
2245       dst = label_to_block (r->u.allowed.label);
2246       make_edge (src, dst, 0);
2247       break;
2248 
2249     default:
2250       gcc_unreachable ();
2251     }
2252 
2253   return true;
2254 }
2255 
2256 /* Create the single EH edge from STMT to its nearest landing pad,
2257    if there is such a landing pad within the current function.  */
2258 
2259 void
make_eh_edges(gimple * stmt)2260 make_eh_edges (gimple *stmt)
2261 {
2262   basic_block src, dst;
2263   eh_landing_pad lp;
2264   int lp_nr;
2265 
2266   lp_nr = lookup_stmt_eh_lp (stmt);
2267   if (lp_nr <= 0)
2268     return;
2269 
2270   lp = get_eh_landing_pad_from_number (lp_nr);
2271   gcc_assert (lp != NULL);
2272 
2273   src = gimple_bb (stmt);
2274   dst = label_to_block (lp->post_landing_pad);
2275   make_edge (src, dst, EDGE_EH);
2276 }
2277 
2278 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2279    do not actually perform the final edge redirection.
2280 
2281    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2282    we intend to change the destination EH region as well; this means
2283    EH_LANDING_PAD_NR must already be set on the destination block label.
2284    If false, we're being called from generic cfg manipulation code and we
2285    should preserve our place within the region tree.  */
2286 
2287 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2288 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2289 {
2290   eh_landing_pad old_lp, new_lp;
2291   basic_block old_bb;
2292   gimple *throw_stmt;
2293   int old_lp_nr, new_lp_nr;
2294   tree old_label, new_label;
2295   edge_iterator ei;
2296   edge e;
2297 
2298   old_bb = edge_in->dest;
2299   old_label = gimple_block_label (old_bb);
2300   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2301   gcc_assert (old_lp_nr > 0);
2302   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2303 
2304   throw_stmt = last_stmt (edge_in->src);
2305   gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2306 
2307   new_label = gimple_block_label (new_bb);
2308 
2309   /* Look for an existing region that might be using NEW_BB already.  */
2310   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2311   if (new_lp_nr)
2312     {
2313       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2314       gcc_assert (new_lp);
2315 
2316       /* Unless CHANGE_REGION is true, the new and old landing pad
2317 	 had better be associated with the same EH region.  */
2318       gcc_assert (change_region || new_lp->region == old_lp->region);
2319     }
2320   else
2321     {
2322       new_lp = NULL;
2323       gcc_assert (!change_region);
2324     }
2325 
2326   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2327   FOR_EACH_EDGE (e, ei, old_bb->preds)
2328     if (e != edge_in && (e->flags & EDGE_EH))
2329       break;
2330 
2331   if (new_lp)
2332     {
2333       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2334 	 there's nothing to do with the EH tree.  If there are no more
2335 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2336 	 If CHANGE_REGION is true, then our caller is expecting to remove
2337 	 the landing pad.  */
2338       if (e == NULL && !change_region)
2339 	remove_eh_landing_pad (old_lp);
2340     }
2341   else
2342     {
2343       /* No correct landing pad exists.  If there are no more edges
2344 	 into OLD_LP, then we can simply re-use the existing landing pad.
2345 	 Otherwise, we have to create a new landing pad.  */
2346       if (e == NULL)
2347 	{
2348 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2349 	  new_lp = old_lp;
2350 	}
2351       else
2352 	new_lp = gen_eh_landing_pad (old_lp->region);
2353       new_lp->post_landing_pad = new_label;
2354       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2355     }
2356 
2357   /* Maybe move the throwing statement to the new region.  */
2358   if (old_lp != new_lp)
2359     {
2360       remove_stmt_from_eh_lp (throw_stmt);
2361       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2362     }
2363 }
2364 
2365 /* Redirect EH edge E to NEW_BB.  */
2366 
2367 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2368 redirect_eh_edge (edge edge_in, basic_block new_bb)
2369 {
2370   redirect_eh_edge_1 (edge_in, new_bb, false);
2371   return ssa_redirect_edge (edge_in, new_bb);
2372 }
2373 
2374 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2375    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2376    The actual edge update will happen in the caller.  */
2377 
2378 void
redirect_eh_dispatch_edge(geh_dispatch * stmt,edge e,basic_block new_bb)2379 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2380 {
2381   tree new_lab = gimple_block_label (new_bb);
2382   bool any_changed = false;
2383   basic_block old_bb;
2384   eh_region r;
2385   eh_catch c;
2386 
2387   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2388   switch (r->type)
2389     {
2390     case ERT_TRY:
2391       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2392 	{
2393 	  old_bb = label_to_block (c->label);
2394 	  if (old_bb == e->dest)
2395 	    {
2396 	      c->label = new_lab;
2397 	      any_changed = true;
2398 	    }
2399 	}
2400       break;
2401 
2402     case ERT_ALLOWED_EXCEPTIONS:
2403       old_bb = label_to_block (r->u.allowed.label);
2404       gcc_assert (old_bb == e->dest);
2405       r->u.allowed.label = new_lab;
2406       any_changed = true;
2407       break;
2408 
2409     default:
2410       gcc_unreachable ();
2411     }
2412 
2413   gcc_assert (any_changed);
2414 }
2415 
2416 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2417 
2418 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2419 operation_could_trap_helper_p (enum tree_code op,
2420 			       bool fp_operation,
2421 			       bool honor_trapv,
2422 			       bool honor_nans,
2423 			       bool honor_snans,
2424 			       tree divisor,
2425 			       bool *handled)
2426 {
2427   *handled = true;
2428   switch (op)
2429     {
2430     case TRUNC_DIV_EXPR:
2431     case CEIL_DIV_EXPR:
2432     case FLOOR_DIV_EXPR:
2433     case ROUND_DIV_EXPR:
2434     case EXACT_DIV_EXPR:
2435     case CEIL_MOD_EXPR:
2436     case FLOOR_MOD_EXPR:
2437     case ROUND_MOD_EXPR:
2438     case TRUNC_MOD_EXPR:
2439     case RDIV_EXPR:
2440       if (honor_snans || honor_trapv)
2441 	return true;
2442       if (fp_operation)
2443 	return flag_trapping_math;
2444       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2445         return true;
2446       return false;
2447 
2448     case LT_EXPR:
2449     case LE_EXPR:
2450     case GT_EXPR:
2451     case GE_EXPR:
2452     case LTGT_EXPR:
2453       /* Some floating point comparisons may trap.  */
2454       return honor_nans;
2455 
2456     case EQ_EXPR:
2457     case NE_EXPR:
2458     case UNORDERED_EXPR:
2459     case ORDERED_EXPR:
2460     case UNLT_EXPR:
2461     case UNLE_EXPR:
2462     case UNGT_EXPR:
2463     case UNGE_EXPR:
2464     case UNEQ_EXPR:
2465       return honor_snans;
2466 
2467     case NEGATE_EXPR:
2468     case ABS_EXPR:
2469     case CONJ_EXPR:
2470       /* These operations don't trap with floating point.  */
2471       if (honor_trapv)
2472 	return true;
2473       return false;
2474 
2475     case PLUS_EXPR:
2476     case MINUS_EXPR:
2477     case MULT_EXPR:
2478       /* Any floating arithmetic may trap.  */
2479       if (fp_operation && flag_trapping_math)
2480 	return true;
2481       if (honor_trapv)
2482 	return true;
2483       return false;
2484 
2485     case COMPLEX_EXPR:
2486     case CONSTRUCTOR:
2487       /* Constructing an object cannot trap.  */
2488       return false;
2489 
2490     default:
2491       /* Any floating arithmetic may trap.  */
2492       if (fp_operation && flag_trapping_math)
2493 	return true;
2494 
2495       *handled = false;
2496       return false;
2497     }
2498 }
2499 
2500 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2501    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2502    type operands that may trap.  If OP is a division operator, DIVISOR contains
2503    the value of the divisor.  */
2504 
2505 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2506 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2507 			tree divisor)
2508 {
2509   bool honor_nans = (fp_operation && flag_trapping_math
2510 		     && !flag_finite_math_only);
2511   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2512   bool handled;
2513 
2514   if (TREE_CODE_CLASS (op) != tcc_comparison
2515       && TREE_CODE_CLASS (op) != tcc_unary
2516       && TREE_CODE_CLASS (op) != tcc_binary
2517       && op != FMA_EXPR)
2518     return false;
2519 
2520   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2521 					honor_nans, honor_snans, divisor,
2522 					&handled);
2523 }
2524 
2525 
2526 /* Returns true if it is possible to prove that the index of
2527    an array access REF (an ARRAY_REF expression) falls into the
2528    array bounds.  */
2529 
2530 static bool
in_array_bounds_p(tree ref)2531 in_array_bounds_p (tree ref)
2532 {
2533   tree idx = TREE_OPERAND (ref, 1);
2534   tree min, max;
2535 
2536   if (TREE_CODE (idx) != INTEGER_CST)
2537     return false;
2538 
2539   min = array_ref_low_bound (ref);
2540   max = array_ref_up_bound (ref);
2541   if (!min
2542       || !max
2543       || TREE_CODE (min) != INTEGER_CST
2544       || TREE_CODE (max) != INTEGER_CST)
2545     return false;
2546 
2547   if (tree_int_cst_lt (idx, min)
2548       || tree_int_cst_lt (max, idx))
2549     return false;
2550 
2551   return true;
2552 }
2553 
2554 /* Returns true if it is possible to prove that the range of
2555    an array access REF (an ARRAY_RANGE_REF expression) falls
2556    into the array bounds.  */
2557 
2558 static bool
range_in_array_bounds_p(tree ref)2559 range_in_array_bounds_p (tree ref)
2560 {
2561   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2562   tree range_min, range_max, min, max;
2563 
2564   range_min = TYPE_MIN_VALUE (domain_type);
2565   range_max = TYPE_MAX_VALUE (domain_type);
2566   if (!range_min
2567       || !range_max
2568       || TREE_CODE (range_min) != INTEGER_CST
2569       || TREE_CODE (range_max) != INTEGER_CST)
2570     return false;
2571 
2572   min = array_ref_low_bound (ref);
2573   max = array_ref_up_bound (ref);
2574   if (!min
2575       || !max
2576       || TREE_CODE (min) != INTEGER_CST
2577       || TREE_CODE (max) != INTEGER_CST)
2578     return false;
2579 
2580   if (tree_int_cst_lt (range_min, min)
2581       || tree_int_cst_lt (max, range_max))
2582     return false;
2583 
2584   return true;
2585 }
2586 
2587 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2588    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2589    This routine expects only GIMPLE lhs or rhs input.  */
2590 
2591 bool
tree_could_trap_p(tree expr)2592 tree_could_trap_p (tree expr)
2593 {
2594   enum tree_code code;
2595   bool fp_operation = false;
2596   bool honor_trapv = false;
2597   tree t, base, div = NULL_TREE;
2598 
2599   if (!expr)
2600     return false;
2601 
2602   code = TREE_CODE (expr);
2603   t = TREE_TYPE (expr);
2604 
2605   if (t)
2606     {
2607       if (COMPARISON_CLASS_P (expr))
2608 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2609       else
2610 	fp_operation = FLOAT_TYPE_P (t);
2611       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2612     }
2613 
2614   if (TREE_CODE_CLASS (code) == tcc_binary)
2615     div = TREE_OPERAND (expr, 1);
2616   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2617     return true;
2618 
2619  restart:
2620   switch (code)
2621     {
2622     case COMPONENT_REF:
2623     case REALPART_EXPR:
2624     case IMAGPART_EXPR:
2625     case BIT_FIELD_REF:
2626     case VIEW_CONVERT_EXPR:
2627     case WITH_SIZE_EXPR:
2628       expr = TREE_OPERAND (expr, 0);
2629       code = TREE_CODE (expr);
2630       goto restart;
2631 
2632     case ARRAY_RANGE_REF:
2633       base = TREE_OPERAND (expr, 0);
2634       if (tree_could_trap_p (base))
2635 	return true;
2636       if (TREE_THIS_NOTRAP (expr))
2637 	return false;
2638       return !range_in_array_bounds_p (expr);
2639 
2640     case ARRAY_REF:
2641       base = TREE_OPERAND (expr, 0);
2642       if (tree_could_trap_p (base))
2643 	return true;
2644       if (TREE_THIS_NOTRAP (expr))
2645 	return false;
2646       return !in_array_bounds_p (expr);
2647 
2648     case TARGET_MEM_REF:
2649     case MEM_REF:
2650       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2651 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2652 	return true;
2653       if (TREE_THIS_NOTRAP (expr))
2654 	return false;
2655       /* We cannot prove that the access is in-bounds when we have
2656          variable-index TARGET_MEM_REFs.  */
2657       if (code == TARGET_MEM_REF
2658 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2659 	return true;
2660       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2661 	{
2662 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2663 	  offset_int off = mem_ref_offset (expr);
2664 	  if (wi::neg_p (off, SIGNED))
2665 	    return true;
2666 	  if (TREE_CODE (base) == STRING_CST)
2667 	    return wi::leu_p (TREE_STRING_LENGTH (base), off);
2668 	  else if (DECL_SIZE_UNIT (base) == NULL_TREE
2669 		   || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2670 		   || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2671 	    return true;
2672 	  /* Now we are sure the first byte of the access is inside
2673 	     the object.  */
2674 	  return false;
2675 	}
2676       return true;
2677 
2678     case INDIRECT_REF:
2679       return !TREE_THIS_NOTRAP (expr);
2680 
2681     case ASM_EXPR:
2682       return TREE_THIS_VOLATILE (expr);
2683 
2684     case CALL_EXPR:
2685       t = get_callee_fndecl (expr);
2686       /* Assume that calls to weak functions may trap.  */
2687       if (!t || !DECL_P (t))
2688 	return true;
2689       if (DECL_WEAK (t))
2690 	return tree_could_trap_p (t);
2691       return false;
2692 
2693     case FUNCTION_DECL:
2694       /* Assume that accesses to weak functions may trap, unless we know
2695 	 they are certainly defined in current TU or in some other
2696 	 LTO partition.  */
2697       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2698 	{
2699 	  cgraph_node *node = cgraph_node::get (expr);
2700 	  if (node)
2701 	    node = node->function_symbol ();
2702 	  return !(node && node->in_other_partition);
2703 	}
2704       return false;
2705 
2706     case VAR_DECL:
2707       /* Assume that accesses to weak vars may trap, unless we know
2708 	 they are certainly defined in current TU or in some other
2709 	 LTO partition.  */
2710       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2711 	{
2712 	  varpool_node *node = varpool_node::get (expr);
2713 	  if (node)
2714 	    node = node->ultimate_alias_target ();
2715 	  return !(node && node->in_other_partition);
2716 	}
2717       return false;
2718 
2719     default:
2720       return false;
2721     }
2722 }
2723 
2724 
2725 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2726    an assignment or a conditional) may throw.  */
2727 
2728 static bool
stmt_could_throw_1_p(gassign * stmt)2729 stmt_could_throw_1_p (gassign *stmt)
2730 {
2731   enum tree_code code = gimple_assign_rhs_code (stmt);
2732   bool honor_nans = false;
2733   bool honor_snans = false;
2734   bool fp_operation = false;
2735   bool honor_trapv = false;
2736   tree t;
2737   size_t i;
2738   bool handled, ret;
2739 
2740   if (TREE_CODE_CLASS (code) == tcc_comparison
2741       || TREE_CODE_CLASS (code) == tcc_unary
2742       || TREE_CODE_CLASS (code) == tcc_binary
2743       || code == FMA_EXPR)
2744     {
2745       if (TREE_CODE_CLASS (code) == tcc_comparison)
2746 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2747       else
2748 	t = gimple_expr_type (stmt);
2749       fp_operation = FLOAT_TYPE_P (t);
2750       if (fp_operation)
2751 	{
2752 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2753 	  honor_snans = flag_signaling_nans != 0;
2754 	}
2755       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2756 	honor_trapv = true;
2757     }
2758 
2759   /* First check the LHS.  */
2760   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2761     return true;
2762 
2763   /* Check if the main expression may trap.  */
2764   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2765 				       honor_nans, honor_snans,
2766 				       gimple_assign_rhs2 (stmt),
2767 				       &handled);
2768   if (handled)
2769     return ret;
2770 
2771   /* If the expression does not trap, see if any of the individual operands may
2772      trap.  */
2773   for (i = 1; i < gimple_num_ops (stmt); i++)
2774     if (tree_could_trap_p (gimple_op (stmt, i)))
2775       return true;
2776 
2777   return false;
2778 }
2779 
2780 
2781 /* Return true if statement STMT could throw an exception.  */
2782 
2783 bool
stmt_could_throw_p(gimple * stmt)2784 stmt_could_throw_p (gimple *stmt)
2785 {
2786   if (!flag_exceptions)
2787     return false;
2788 
2789   /* The only statements that can throw an exception are assignments,
2790      conditionals, calls, resx, and asms.  */
2791   switch (gimple_code (stmt))
2792     {
2793     case GIMPLE_RESX:
2794       return true;
2795 
2796     case GIMPLE_CALL:
2797       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2798 
2799     case GIMPLE_COND:
2800       {
2801 	if (!cfun->can_throw_non_call_exceptions)
2802 	  return false;
2803 	gcond *cond = as_a <gcond *> (stmt);
2804 	tree lhs = gimple_cond_lhs (cond);
2805 	return operation_could_trap_p (gimple_cond_code (cond),
2806 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2807 				       false, NULL_TREE);
2808       }
2809 
2810     case GIMPLE_ASSIGN:
2811       if (!cfun->can_throw_non_call_exceptions
2812 	  || gimple_clobber_p (stmt))
2813         return false;
2814       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2815 
2816     case GIMPLE_ASM:
2817       if (!cfun->can_throw_non_call_exceptions)
2818         return false;
2819       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2820 
2821     default:
2822       return false;
2823     }
2824 }
2825 
2826 
2827 /* Return true if expression T could throw an exception.  */
2828 
2829 bool
tree_could_throw_p(tree t)2830 tree_could_throw_p (tree t)
2831 {
2832   if (!flag_exceptions)
2833     return false;
2834   if (TREE_CODE (t) == MODIFY_EXPR)
2835     {
2836       if (cfun->can_throw_non_call_exceptions
2837           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2838         return true;
2839       t = TREE_OPERAND (t, 1);
2840     }
2841 
2842   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2843     t = TREE_OPERAND (t, 0);
2844   if (TREE_CODE (t) == CALL_EXPR)
2845     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2846   if (cfun->can_throw_non_call_exceptions)
2847     return tree_could_trap_p (t);
2848   return false;
2849 }
2850 
2851 /* Return true if STMT can throw an exception that is not caught within
2852    the current function (CFUN).  */
2853 
2854 bool
stmt_can_throw_external(gimple * stmt)2855 stmt_can_throw_external (gimple *stmt)
2856 {
2857   int lp_nr;
2858 
2859   if (!stmt_could_throw_p (stmt))
2860     return false;
2861 
2862   lp_nr = lookup_stmt_eh_lp (stmt);
2863   return lp_nr == 0;
2864 }
2865 
2866 /* Return true if STMT can throw an exception that is caught within
2867    the current function (CFUN).  */
2868 
2869 bool
stmt_can_throw_internal(gimple * stmt)2870 stmt_can_throw_internal (gimple *stmt)
2871 {
2872   int lp_nr;
2873 
2874   if (!stmt_could_throw_p (stmt))
2875     return false;
2876 
2877   lp_nr = lookup_stmt_eh_lp (stmt);
2878   return lp_nr > 0;
2879 }
2880 
2881 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2882    remove any entry it might have from the EH table.  Return true if
2883    any change was made.  */
2884 
2885 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple * stmt)2886 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
2887 {
2888   if (stmt_could_throw_p (stmt))
2889     return false;
2890   return remove_stmt_from_eh_lp_fn (ifun, stmt);
2891 }
2892 
2893 /* Likewise, but always use the current function.  */
2894 
2895 bool
maybe_clean_eh_stmt(gimple * stmt)2896 maybe_clean_eh_stmt (gimple *stmt)
2897 {
2898   return maybe_clean_eh_stmt_fn (cfun, stmt);
2899 }
2900 
2901 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2902    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2903    in the table if it should be in there.  Return TRUE if a replacement was
2904    done that my require an EH edge purge.  */
2905 
2906 bool
maybe_clean_or_replace_eh_stmt(gimple * old_stmt,gimple * new_stmt)2907 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
2908 {
2909   int lp_nr = lookup_stmt_eh_lp (old_stmt);
2910 
2911   if (lp_nr != 0)
2912     {
2913       bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2914 
2915       if (new_stmt == old_stmt && new_stmt_could_throw)
2916 	return false;
2917 
2918       remove_stmt_from_eh_lp (old_stmt);
2919       if (new_stmt_could_throw)
2920 	{
2921 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
2922 	  return false;
2923 	}
2924       else
2925 	return true;
2926     }
2927 
2928   return false;
2929 }
2930 
2931 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2932    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
2933    operand is the return value of duplicate_eh_regions.  */
2934 
2935 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple * new_stmt,struct function * old_fun,gimple * old_stmt,hash_map<void *,void * > * map,int default_lp_nr)2936 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
2937 			    struct function *old_fun, gimple *old_stmt,
2938 			    hash_map<void *, void *> *map,
2939 			    int default_lp_nr)
2940 {
2941   int old_lp_nr, new_lp_nr;
2942 
2943   if (!stmt_could_throw_p (new_stmt))
2944     return false;
2945 
2946   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2947   if (old_lp_nr == 0)
2948     {
2949       if (default_lp_nr == 0)
2950 	return false;
2951       new_lp_nr = default_lp_nr;
2952     }
2953   else if (old_lp_nr > 0)
2954     {
2955       eh_landing_pad old_lp, new_lp;
2956 
2957       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2958       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
2959       new_lp_nr = new_lp->index;
2960     }
2961   else
2962     {
2963       eh_region old_r, new_r;
2964 
2965       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2966       new_r = static_cast<eh_region> (*map->get (old_r));
2967       new_lp_nr = -new_r->index;
2968     }
2969 
2970   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2971   return true;
2972 }
2973 
2974 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2975    and thus no remapping is required.  */
2976 
2977 bool
maybe_duplicate_eh_stmt(gimple * new_stmt,gimple * old_stmt)2978 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
2979 {
2980   int lp_nr;
2981 
2982   if (!stmt_could_throw_p (new_stmt))
2983     return false;
2984 
2985   lp_nr = lookup_stmt_eh_lp (old_stmt);
2986   if (lp_nr == 0)
2987     return false;
2988 
2989   add_stmt_to_eh_lp (new_stmt, lp_nr);
2990   return true;
2991 }
2992 
2993 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2994    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
2995    this only handles handlers consisting of a single call, as that's the
2996    important case for C++: a destructor call for a particular object showing
2997    up in multiple handlers.  */
2998 
2999 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)3000 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3001 {
3002   gimple_stmt_iterator gsi;
3003   gimple *ones, *twos;
3004   unsigned int ai;
3005 
3006   gsi = gsi_start (oneh);
3007   if (!gsi_one_before_end_p (gsi))
3008     return false;
3009   ones = gsi_stmt (gsi);
3010 
3011   gsi = gsi_start (twoh);
3012   if (!gsi_one_before_end_p (gsi))
3013     return false;
3014   twos = gsi_stmt (gsi);
3015 
3016   if (!is_gimple_call (ones)
3017       || !is_gimple_call (twos)
3018       || gimple_call_lhs (ones)
3019       || gimple_call_lhs (twos)
3020       || gimple_call_chain (ones)
3021       || gimple_call_chain (twos)
3022       || !gimple_call_same_target_p (ones, twos)
3023       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3024     return false;
3025 
3026   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3027     if (!operand_equal_p (gimple_call_arg (ones, ai),
3028                           gimple_call_arg (twos, ai), 0))
3029       return false;
3030 
3031   return true;
3032 }
3033 
3034 /* Optimize
3035     try { A() } finally { try { ~B() } catch { ~A() } }
3036     try { ... } finally { ~A() }
3037    into
3038     try { A() } catch { ~B() }
3039     try { ~B() ... } finally { ~A() }
3040 
3041    This occurs frequently in C++, where A is a local variable and B is a
3042    temporary used in the initializer for A.  */
3043 
3044 static void
optimize_double_finally(gtry * one,gtry * two)3045 optimize_double_finally (gtry *one, gtry *two)
3046 {
3047   gimple *oneh;
3048   gimple_stmt_iterator gsi;
3049   gimple_seq cleanup;
3050 
3051   cleanup = gimple_try_cleanup (one);
3052   gsi = gsi_start (cleanup);
3053   if (!gsi_one_before_end_p (gsi))
3054     return;
3055 
3056   oneh = gsi_stmt (gsi);
3057   if (gimple_code (oneh) != GIMPLE_TRY
3058       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3059     return;
3060 
3061   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3062     {
3063       gimple_seq seq = gimple_try_eval (oneh);
3064 
3065       gimple_try_set_cleanup (one, seq);
3066       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3067       seq = copy_gimple_seq_and_replace_locals (seq);
3068       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3069       gimple_try_set_eval (two, seq);
3070     }
3071 }
3072 
3073 /* Perform EH refactoring optimizations that are simpler to do when code
3074    flow has been lowered but EH structures haven't.  */
3075 
3076 static void
refactor_eh_r(gimple_seq seq)3077 refactor_eh_r (gimple_seq seq)
3078 {
3079   gimple_stmt_iterator gsi;
3080   gimple *one, *two;
3081 
3082   one = NULL;
3083   two = NULL;
3084   gsi = gsi_start (seq);
3085   while (1)
3086     {
3087       one = two;
3088       if (gsi_end_p (gsi))
3089 	two = NULL;
3090       else
3091 	two = gsi_stmt (gsi);
3092       if (one && two)
3093 	if (gtry *try_one = dyn_cast <gtry *> (one))
3094 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3095 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3096 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3097 	      optimize_double_finally (try_one, try_two);
3098       if (one)
3099 	switch (gimple_code (one))
3100 	  {
3101 	  case GIMPLE_TRY:
3102 	    refactor_eh_r (gimple_try_eval (one));
3103 	    refactor_eh_r (gimple_try_cleanup (one));
3104 	    break;
3105 	  case GIMPLE_CATCH:
3106 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3107 	    break;
3108 	  case GIMPLE_EH_FILTER:
3109 	    refactor_eh_r (gimple_eh_filter_failure (one));
3110 	    break;
3111 	  case GIMPLE_EH_ELSE:
3112 	    {
3113 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3114 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3115 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3116 	    }
3117 	    break;
3118 	  default:
3119 	    break;
3120 	  }
3121       if (two)
3122 	gsi_next (&gsi);
3123       else
3124 	break;
3125     }
3126 }
3127 
3128 namespace {
3129 
3130 const pass_data pass_data_refactor_eh =
3131 {
3132   GIMPLE_PASS, /* type */
3133   "ehopt", /* name */
3134   OPTGROUP_NONE, /* optinfo_flags */
3135   TV_TREE_EH, /* tv_id */
3136   PROP_gimple_lcf, /* properties_required */
3137   0, /* properties_provided */
3138   0, /* properties_destroyed */
3139   0, /* todo_flags_start */
3140   0, /* todo_flags_finish */
3141 };
3142 
3143 class pass_refactor_eh : public gimple_opt_pass
3144 {
3145 public:
pass_refactor_eh(gcc::context * ctxt)3146   pass_refactor_eh (gcc::context *ctxt)
3147     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3148   {}
3149 
3150   /* opt_pass methods: */
gate(function *)3151   virtual bool gate (function *) { return flag_exceptions != 0; }
execute(function *)3152   virtual unsigned int execute (function *)
3153     {
3154       refactor_eh_r (gimple_body (current_function_decl));
3155       return 0;
3156     }
3157 
3158 }; // class pass_refactor_eh
3159 
3160 } // anon namespace
3161 
3162 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3163 make_pass_refactor_eh (gcc::context *ctxt)
3164 {
3165   return new pass_refactor_eh (ctxt);
3166 }
3167 
3168 /* At the end of gimple optimization, we can lower RESX.  */
3169 
3170 static bool
lower_resx(basic_block bb,gresx * stmt,hash_map<eh_region,tree> * mnt_map)3171 lower_resx (basic_block bb, gresx *stmt,
3172 	    hash_map<eh_region, tree> *mnt_map)
3173 {
3174   int lp_nr;
3175   eh_region src_r, dst_r;
3176   gimple_stmt_iterator gsi;
3177   gimple *x;
3178   tree fn, src_nr;
3179   bool ret = false;
3180 
3181   lp_nr = lookup_stmt_eh_lp (stmt);
3182   if (lp_nr != 0)
3183     dst_r = get_eh_region_from_lp_number (lp_nr);
3184   else
3185     dst_r = NULL;
3186 
3187   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3188   gsi = gsi_last_bb (bb);
3189 
3190   if (src_r == NULL)
3191     {
3192       /* We can wind up with no source region when pass_cleanup_eh shows
3193 	 that there are no entries into an eh region and deletes it, but
3194 	 then the block that contains the resx isn't removed.  This can
3195 	 happen without optimization when the switch statement created by
3196 	 lower_try_finally_switch isn't simplified to remove the eh case.
3197 
3198 	 Resolve this by expanding the resx node to an abort.  */
3199 
3200       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3201       x = gimple_build_call (fn, 0);
3202       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3203 
3204       while (EDGE_COUNT (bb->succs) > 0)
3205 	remove_edge (EDGE_SUCC (bb, 0));
3206     }
3207   else if (dst_r)
3208     {
3209       /* When we have a destination region, we resolve this by copying
3210 	 the excptr and filter values into place, and changing the edge
3211 	 to immediately after the landing pad.  */
3212       edge e;
3213 
3214       if (lp_nr < 0)
3215 	{
3216 	  basic_block new_bb;
3217 	  tree lab;
3218 
3219 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3220 	     the failure decl into a new block, if needed.  */
3221 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3222 
3223 	  tree *slot = mnt_map->get (dst_r);
3224 	  if (slot == NULL)
3225 	    {
3226 	      gimple_stmt_iterator gsi2;
3227 
3228 	      new_bb = create_empty_bb (bb);
3229 	      add_bb_to_loop (new_bb, bb->loop_father);
3230 	      lab = gimple_block_label (new_bb);
3231 	      gsi2 = gsi_start_bb (new_bb);
3232 
3233 	      fn = dst_r->u.must_not_throw.failure_decl;
3234 	      x = gimple_build_call (fn, 0);
3235 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3236 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3237 
3238 	      mnt_map->put (dst_r, lab);
3239 	    }
3240 	  else
3241 	    {
3242 	      lab = *slot;
3243 	      new_bb = label_to_block (lab);
3244 	    }
3245 
3246 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3247 	  e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3248 	  e->count = bb->count;
3249 	  e->probability = REG_BR_PROB_BASE;
3250 	}
3251       else
3252 	{
3253 	  edge_iterator ei;
3254 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3255 
3256 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3257 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3258 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3259 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3260 
3261 	  /* Update the flags for the outgoing edge.  */
3262 	  e = single_succ_edge (bb);
3263 	  gcc_assert (e->flags & EDGE_EH);
3264 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3265 
3266 	  /* If there are no more EH users of the landing pad, delete it.  */
3267 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3268 	    if (e->flags & EDGE_EH)
3269 	      break;
3270 	  if (e == NULL)
3271 	    {
3272 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3273 	      remove_eh_landing_pad (lp);
3274 	    }
3275 	}
3276 
3277       ret = true;
3278     }
3279   else
3280     {
3281       tree var;
3282 
3283       /* When we don't have a destination region, this exception escapes
3284 	 up the call chain.  We resolve this by generating a call to the
3285 	 _Unwind_Resume library function.  */
3286 
3287       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3288 	 with no arguments for C++ and Java.  Check for that.  */
3289       if (src_r->use_cxa_end_cleanup)
3290 	{
3291 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3292 	  x = gimple_build_call (fn, 0);
3293 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3294 	}
3295       else
3296 	{
3297 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3298 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3299 	  x = gimple_build_call (fn, 1, src_nr);
3300 	  var = create_tmp_var (ptr_type_node);
3301 	  var = make_ssa_name (var, x);
3302 	  gimple_call_set_lhs (x, var);
3303 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3304 
3305 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3306 	  x = gimple_build_call (fn, 1, var);
3307 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3308 	}
3309 
3310       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3311     }
3312 
3313   gsi_remove (&gsi, true);
3314 
3315   return ret;
3316 }
3317 
3318 namespace {
3319 
3320 const pass_data pass_data_lower_resx =
3321 {
3322   GIMPLE_PASS, /* type */
3323   "resx", /* name */
3324   OPTGROUP_NONE, /* optinfo_flags */
3325   TV_TREE_EH, /* tv_id */
3326   PROP_gimple_lcf, /* properties_required */
3327   0, /* properties_provided */
3328   0, /* properties_destroyed */
3329   0, /* todo_flags_start */
3330   0, /* todo_flags_finish */
3331 };
3332 
3333 class pass_lower_resx : public gimple_opt_pass
3334 {
3335 public:
pass_lower_resx(gcc::context * ctxt)3336   pass_lower_resx (gcc::context *ctxt)
3337     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3338   {}
3339 
3340   /* opt_pass methods: */
gate(function *)3341   virtual bool gate (function *) { return flag_exceptions != 0; }
3342   virtual unsigned int execute (function *);
3343 
3344 }; // class pass_lower_resx
3345 
3346 unsigned
execute(function * fun)3347 pass_lower_resx::execute (function *fun)
3348 {
3349   basic_block bb;
3350   bool dominance_invalidated = false;
3351   bool any_rewritten = false;
3352 
3353   hash_map<eh_region, tree> mnt_map;
3354 
3355   FOR_EACH_BB_FN (bb, fun)
3356     {
3357       gimple *last = last_stmt (bb);
3358       if (last && is_gimple_resx (last))
3359 	{
3360 	  dominance_invalidated |=
3361 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3362 	  any_rewritten = true;
3363 	}
3364     }
3365 
3366   if (dominance_invalidated)
3367     {
3368       free_dominance_info (CDI_DOMINATORS);
3369       free_dominance_info (CDI_POST_DOMINATORS);
3370     }
3371 
3372   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3373 }
3374 
3375 } // anon namespace
3376 
3377 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3378 make_pass_lower_resx (gcc::context *ctxt)
3379 {
3380   return new pass_lower_resx (ctxt);
3381 }
3382 
3383 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3384    external throw.  */
3385 
3386 static void
optimize_clobbers(basic_block bb)3387 optimize_clobbers (basic_block bb)
3388 {
3389   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3390   bool any_clobbers = false;
3391   bool seen_stack_restore = false;
3392   edge_iterator ei;
3393   edge e;
3394 
3395   /* Only optimize anything if the bb contains at least one clobber,
3396      ends with resx (checked by caller), optionally contains some
3397      debug stmts or labels, or at most one __builtin_stack_restore
3398      call, and has an incoming EH edge.  */
3399   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3400     {
3401       gimple *stmt = gsi_stmt (gsi);
3402       if (is_gimple_debug (stmt))
3403 	continue;
3404       if (gimple_clobber_p (stmt))
3405 	{
3406 	  any_clobbers = true;
3407 	  continue;
3408 	}
3409       if (!seen_stack_restore
3410 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3411 	{
3412 	  seen_stack_restore = true;
3413 	  continue;
3414 	}
3415       if (gimple_code (stmt) == GIMPLE_LABEL)
3416 	break;
3417       return;
3418     }
3419   if (!any_clobbers)
3420     return;
3421   FOR_EACH_EDGE (e, ei, bb->preds)
3422     if (e->flags & EDGE_EH)
3423       break;
3424   if (e == NULL)
3425     return;
3426   gsi = gsi_last_bb (bb);
3427   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3428     {
3429       gimple *stmt = gsi_stmt (gsi);
3430       if (!gimple_clobber_p (stmt))
3431 	continue;
3432       unlink_stmt_vdef (stmt);
3433       gsi_remove (&gsi, true);
3434       release_defs (stmt);
3435     }
3436 }
3437 
3438 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3439    internal throw to successor BB.  */
3440 
3441 static int
sink_clobbers(basic_block bb)3442 sink_clobbers (basic_block bb)
3443 {
3444   edge e;
3445   edge_iterator ei;
3446   gimple_stmt_iterator gsi, dgsi;
3447   basic_block succbb;
3448   bool any_clobbers = false;
3449   unsigned todo = 0;
3450 
3451   /* Only optimize if BB has a single EH successor and
3452      all predecessor edges are EH too.  */
3453   if (!single_succ_p (bb)
3454       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3455     return 0;
3456 
3457   FOR_EACH_EDGE (e, ei, bb->preds)
3458     {
3459       if ((e->flags & EDGE_EH) == 0)
3460 	return 0;
3461     }
3462 
3463   /* And BB contains only CLOBBER stmts before the final
3464      RESX.  */
3465   gsi = gsi_last_bb (bb);
3466   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3467     {
3468       gimple *stmt = gsi_stmt (gsi);
3469       if (is_gimple_debug (stmt))
3470 	continue;
3471       if (gimple_code (stmt) == GIMPLE_LABEL)
3472 	break;
3473       if (!gimple_clobber_p (stmt))
3474 	return 0;
3475       any_clobbers = true;
3476     }
3477   if (!any_clobbers)
3478     return 0;
3479 
3480   edge succe = single_succ_edge (bb);
3481   succbb = succe->dest;
3482 
3483   /* See if there is a virtual PHI node to take an updated virtual
3484      operand from.  */
3485   gphi *vphi = NULL;
3486   tree vuse = NULL_TREE;
3487   for (gphi_iterator gpi = gsi_start_phis (succbb);
3488        !gsi_end_p (gpi); gsi_next (&gpi))
3489     {
3490       tree res = gimple_phi_result (gpi.phi ());
3491       if (virtual_operand_p (res))
3492 	{
3493 	  vphi = gpi.phi ();
3494 	  vuse = res;
3495 	  break;
3496 	}
3497     }
3498 
3499   dgsi = gsi_after_labels (succbb);
3500   gsi = gsi_last_bb (bb);
3501   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3502     {
3503       gimple *stmt = gsi_stmt (gsi);
3504       tree lhs;
3505       if (is_gimple_debug (stmt))
3506 	continue;
3507       if (gimple_code (stmt) == GIMPLE_LABEL)
3508 	break;
3509       lhs = gimple_assign_lhs (stmt);
3510       /* Unfortunately we don't have dominance info updated at this
3511 	 point, so checking if
3512 	 dominated_by_p (CDI_DOMINATORS, succbb,
3513 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3514 	 would be too costly.  Thus, avoid sinking any clobbers that
3515 	 refer to non-(D) SSA_NAMEs.  */
3516       if (TREE_CODE (lhs) == MEM_REF
3517 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3518 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3519 	{
3520 	  unlink_stmt_vdef (stmt);
3521 	  gsi_remove (&gsi, true);
3522 	  release_defs (stmt);
3523 	  continue;
3524 	}
3525 
3526       /* As we do not change stmt order when sinking across a
3527          forwarder edge we can keep virtual operands in place.  */
3528       gsi_remove (&gsi, false);
3529       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3530 
3531       /* But adjust virtual operands if we sunk across a PHI node.  */
3532       if (vuse)
3533 	{
3534 	  gimple *use_stmt;
3535 	  imm_use_iterator iter;
3536 	  use_operand_p use_p;
3537 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3538 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3539 	      SET_USE (use_p, gimple_vdef (stmt));
3540 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3541 	    {
3542 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3543 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3544 	    }
3545 	  /* Adjust the incoming virtual operand.  */
3546 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3547 	  SET_USE (gimple_vuse_op (stmt), vuse);
3548 	}
3549       /* If there isn't a single predecessor but no virtual PHI node
3550          arrange for virtual operands to be renamed.  */
3551       else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3552 	       && !single_pred_p (succbb))
3553 	{
3554 	  /* In this case there will be no use of the VDEF of this stmt.
3555 	     ???  Unless this is a secondary opportunity and we have not
3556 	     removed unreachable blocks yet, so we cannot assert this.
3557 	     Which also means we will end up renaming too many times.  */
3558 	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3559 	  mark_virtual_operands_for_renaming (cfun);
3560 	  todo |= TODO_update_ssa_only_virtuals;
3561 	}
3562     }
3563 
3564   return todo;
3565 }
3566 
3567 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3568    we have found some duplicate labels and removed some edges.  */
3569 
3570 static bool
lower_eh_dispatch(basic_block src,geh_dispatch * stmt)3571 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3572 {
3573   gimple_stmt_iterator gsi;
3574   int region_nr;
3575   eh_region r;
3576   tree filter, fn;
3577   gimple *x;
3578   bool redirected = false;
3579 
3580   region_nr = gimple_eh_dispatch_region (stmt);
3581   r = get_eh_region_from_number (region_nr);
3582 
3583   gsi = gsi_last_bb (src);
3584 
3585   switch (r->type)
3586     {
3587     case ERT_TRY:
3588       {
3589 	auto_vec<tree> labels;
3590 	tree default_label = NULL;
3591 	eh_catch c;
3592 	edge_iterator ei;
3593 	edge e;
3594 	hash_set<tree> seen_values;
3595 
3596 	/* Collect the labels for a switch.  Zero the post_landing_pad
3597 	   field becase we'll no longer have anything keeping these labels
3598 	   in existence and the optimizer will be free to merge these
3599 	   blocks at will.  */
3600 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3601 	  {
3602 	    tree tp_node, flt_node, lab = c->label;
3603 	    bool have_label = false;
3604 
3605 	    c->label = NULL;
3606 	    tp_node = c->type_list;
3607 	    flt_node = c->filter_list;
3608 
3609 	    if (tp_node == NULL)
3610 	      {
3611 	        default_label = lab;
3612 		break;
3613 	      }
3614 	    do
3615 	      {
3616 		/* Filter out duplicate labels that arise when this handler
3617 		   is shadowed by an earlier one.  When no labels are
3618 		   attached to the handler anymore, we remove
3619 		   the corresponding edge and then we delete unreachable
3620 		   blocks at the end of this pass.  */
3621 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3622 		  {
3623 		    tree t = build_case_label (TREE_VALUE (flt_node),
3624 					       NULL, lab);
3625 		    labels.safe_push (t);
3626 		    seen_values.add (TREE_VALUE (flt_node));
3627 		    have_label = true;
3628 		  }
3629 
3630 		tp_node = TREE_CHAIN (tp_node);
3631 		flt_node = TREE_CHAIN (flt_node);
3632 	      }
3633 	    while (tp_node);
3634 	    if (! have_label)
3635 	      {
3636 	        remove_edge (find_edge (src, label_to_block (lab)));
3637 	        redirected = true;
3638 	      }
3639 	  }
3640 
3641 	/* Clean up the edge flags.  */
3642 	FOR_EACH_EDGE (e, ei, src->succs)
3643 	  {
3644 	    if (e->flags & EDGE_FALLTHRU)
3645 	      {
3646 		/* If there was no catch-all, use the fallthru edge.  */
3647 		if (default_label == NULL)
3648 		  default_label = gimple_block_label (e->dest);
3649 		e->flags &= ~EDGE_FALLTHRU;
3650 	      }
3651 	  }
3652 	gcc_assert (default_label != NULL);
3653 
3654 	/* Don't generate a switch if there's only a default case.
3655 	   This is common in the form of try { A; } catch (...) { B; }.  */
3656 	if (!labels.exists ())
3657 	  {
3658 	    e = single_succ_edge (src);
3659 	    e->flags |= EDGE_FALLTHRU;
3660 	  }
3661 	else
3662 	  {
3663 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3664 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3665 							 region_nr));
3666 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3667 	    filter = make_ssa_name (filter, x);
3668 	    gimple_call_set_lhs (x, filter);
3669 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3670 
3671 	    /* Turn the default label into a default case.  */
3672 	    default_label = build_case_label (NULL, NULL, default_label);
3673 	    sort_case_labels (labels);
3674 
3675 	    x = gimple_build_switch (filter, default_label, labels);
3676 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3677 	  }
3678       }
3679       break;
3680 
3681     case ERT_ALLOWED_EXCEPTIONS:
3682       {
3683 	edge b_e = BRANCH_EDGE (src);
3684 	edge f_e = FALLTHRU_EDGE (src);
3685 
3686 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3687 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3688 						     region_nr));
3689 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3690 	filter = make_ssa_name (filter, x);
3691 	gimple_call_set_lhs (x, filter);
3692 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3693 
3694 	r->u.allowed.label = NULL;
3695 	x = gimple_build_cond (EQ_EXPR, filter,
3696 			       build_int_cst (TREE_TYPE (filter),
3697 					      r->u.allowed.filter),
3698 			       NULL_TREE, NULL_TREE);
3699 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3700 
3701 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3702         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3703       }
3704       break;
3705 
3706     default:
3707       gcc_unreachable ();
3708     }
3709 
3710   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3711   gsi_remove (&gsi, true);
3712   return redirected;
3713 }
3714 
3715 namespace {
3716 
3717 const pass_data pass_data_lower_eh_dispatch =
3718 {
3719   GIMPLE_PASS, /* type */
3720   "ehdisp", /* name */
3721   OPTGROUP_NONE, /* optinfo_flags */
3722   TV_TREE_EH, /* tv_id */
3723   PROP_gimple_lcf, /* properties_required */
3724   0, /* properties_provided */
3725   0, /* properties_destroyed */
3726   0, /* todo_flags_start */
3727   0, /* todo_flags_finish */
3728 };
3729 
3730 class pass_lower_eh_dispatch : public gimple_opt_pass
3731 {
3732 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3733   pass_lower_eh_dispatch (gcc::context *ctxt)
3734     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3735   {}
3736 
3737   /* opt_pass methods: */
gate(function * fun)3738   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3739   virtual unsigned int execute (function *);
3740 
3741 }; // class pass_lower_eh_dispatch
3742 
3743 unsigned
execute(function * fun)3744 pass_lower_eh_dispatch::execute (function *fun)
3745 {
3746   basic_block bb;
3747   int flags = 0;
3748   bool redirected = false;
3749 
3750   assign_filter_values ();
3751 
3752   FOR_EACH_BB_FN (bb, fun)
3753     {
3754       gimple *last = last_stmt (bb);
3755       if (last == NULL)
3756 	continue;
3757       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3758 	{
3759 	  redirected |= lower_eh_dispatch (bb,
3760 					   as_a <geh_dispatch *> (last));
3761 	  flags |= TODO_update_ssa_only_virtuals;
3762 	}
3763       else if (gimple_code (last) == GIMPLE_RESX)
3764 	{
3765 	  if (stmt_can_throw_external (last))
3766 	    optimize_clobbers (bb);
3767 	  else
3768 	    flags |= sink_clobbers (bb);
3769 	}
3770     }
3771 
3772   if (redirected)
3773     delete_unreachable_blocks ();
3774   return flags;
3775 }
3776 
3777 } // anon namespace
3778 
3779 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)3780 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3781 {
3782   return new pass_lower_eh_dispatch (ctxt);
3783 }
3784 
3785 /* Walk statements, see what regions and, optionally, landing pads
3786    are really referenced.
3787 
3788    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3789    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3790 
3791    Passing NULL for LP_REACHABLE is valid, in this case only reachable
3792    regions are marked.
3793 
3794    The caller is responsible for freeing the returned sbitmaps.  */
3795 
3796 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)3797 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3798 {
3799   sbitmap r_reachable, lp_reachable;
3800   basic_block bb;
3801   bool mark_landing_pads = (lp_reachablep != NULL);
3802   gcc_checking_assert (r_reachablep != NULL);
3803 
3804   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3805   bitmap_clear (r_reachable);
3806   *r_reachablep = r_reachable;
3807 
3808   if (mark_landing_pads)
3809     {
3810       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3811       bitmap_clear (lp_reachable);
3812       *lp_reachablep = lp_reachable;
3813     }
3814   else
3815     lp_reachable = NULL;
3816 
3817   FOR_EACH_BB_FN (bb, cfun)
3818     {
3819       gimple_stmt_iterator gsi;
3820 
3821       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3822 	{
3823 	  gimple *stmt = gsi_stmt (gsi);
3824 
3825 	  if (mark_landing_pads)
3826 	    {
3827 	      int lp_nr = lookup_stmt_eh_lp (stmt);
3828 
3829 	      /* Negative LP numbers are MUST_NOT_THROW regions which
3830 		 are not considered BB enders.  */
3831 	      if (lp_nr < 0)
3832 		bitmap_set_bit (r_reachable, -lp_nr);
3833 
3834 	      /* Positive LP numbers are real landing pads, and BB enders.  */
3835 	      else if (lp_nr > 0)
3836 		{
3837 		  gcc_assert (gsi_one_before_end_p (gsi));
3838 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3839 		  bitmap_set_bit (r_reachable, region->index);
3840 		  bitmap_set_bit (lp_reachable, lp_nr);
3841 		}
3842 	    }
3843 
3844 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3845 	  switch (gimple_code (stmt))
3846 	    {
3847 	    case GIMPLE_RESX:
3848 	      bitmap_set_bit (r_reachable,
3849 			      gimple_resx_region (as_a <gresx *> (stmt)));
3850 	      break;
3851 	    case GIMPLE_EH_DISPATCH:
3852 	      bitmap_set_bit (r_reachable,
3853 			      gimple_eh_dispatch_region (
3854                                 as_a <geh_dispatch *> (stmt)));
3855 	      break;
3856 	    case GIMPLE_CALL:
3857 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3858 		for (int i = 0; i < 2; ++i)
3859 		  {
3860 		    tree rt = gimple_call_arg (stmt, i);
3861 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
3862 
3863 		    gcc_assert (ri == (int)ri);
3864 		    bitmap_set_bit (r_reachable, ri);
3865 		  }
3866 	      break;
3867 	    default:
3868 	      break;
3869 	    }
3870 	}
3871     }
3872 }
3873 
3874 /* Remove unreachable handlers and unreachable landing pads.  */
3875 
3876 static void
remove_unreachable_handlers(void)3877 remove_unreachable_handlers (void)
3878 {
3879   sbitmap r_reachable, lp_reachable;
3880   eh_region region;
3881   eh_landing_pad lp;
3882   unsigned i;
3883 
3884   mark_reachable_handlers (&r_reachable, &lp_reachable);
3885 
3886   if (dump_file)
3887     {
3888       fprintf (dump_file, "Before removal of unreachable regions:\n");
3889       dump_eh_tree (dump_file, cfun);
3890       fprintf (dump_file, "Reachable regions: ");
3891       dump_bitmap_file (dump_file, r_reachable);
3892       fprintf (dump_file, "Reachable landing pads: ");
3893       dump_bitmap_file (dump_file, lp_reachable);
3894     }
3895 
3896   if (dump_file)
3897     {
3898       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3899 	if (region && !bitmap_bit_p (r_reachable, region->index))
3900 	  fprintf (dump_file,
3901 		   "Removing unreachable region %d\n",
3902 		   region->index);
3903     }
3904 
3905   remove_unreachable_eh_regions (r_reachable);
3906 
3907   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3908     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3909       {
3910 	if (dump_file)
3911 	  fprintf (dump_file,
3912 		   "Removing unreachable landing pad %d\n",
3913 		   lp->index);
3914 	remove_eh_landing_pad (lp);
3915       }
3916 
3917   if (dump_file)
3918     {
3919       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3920       dump_eh_tree (dump_file, cfun);
3921       fprintf (dump_file, "\n\n");
3922     }
3923 
3924   sbitmap_free (r_reachable);
3925   sbitmap_free (lp_reachable);
3926 
3927   if (flag_checking)
3928     verify_eh_tree (cfun);
3929 }
3930 
3931 /* Remove unreachable handlers if any landing pads have been removed after
3932    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
3933 
3934 void
maybe_remove_unreachable_handlers(void)3935 maybe_remove_unreachable_handlers (void)
3936 {
3937   eh_landing_pad lp;
3938   unsigned i;
3939 
3940   if (cfun->eh == NULL)
3941     return;
3942 
3943   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3944     if (lp && lp->post_landing_pad)
3945       {
3946 	if (label_to_block (lp->post_landing_pad) == NULL)
3947 	  {
3948 	    remove_unreachable_handlers ();
3949 	    return;
3950 	  }
3951       }
3952 }
3953 
3954 /* Remove regions that do not have landing pads.  This assumes
3955    that remove_unreachable_handlers has already been run, and
3956    that we've just manipulated the landing pads since then.
3957 
3958    Preserve regions with landing pads and regions that prevent
3959    exceptions from propagating further, even if these regions
3960    are not reachable.  */
3961 
3962 static void
remove_unreachable_handlers_no_lp(void)3963 remove_unreachable_handlers_no_lp (void)
3964 {
3965   eh_region region;
3966   sbitmap r_reachable;
3967   unsigned i;
3968 
3969   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3970 
3971   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3972     {
3973       if (! region)
3974 	continue;
3975 
3976       if (region->landing_pads != NULL
3977 	  || region->type == ERT_MUST_NOT_THROW)
3978 	bitmap_set_bit (r_reachable, region->index);
3979 
3980       if (dump_file
3981 	  && !bitmap_bit_p (r_reachable, region->index))
3982 	fprintf (dump_file,
3983 		 "Removing unreachable region %d\n",
3984 		 region->index);
3985     }
3986 
3987   remove_unreachable_eh_regions (r_reachable);
3988 
3989   sbitmap_free (r_reachable);
3990 }
3991 
3992 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
3993    optimisticaly split all sorts of edges, including EH edges.  The
3994    optimization passes in between may not have needed them; if not,
3995    we should undo the split.
3996 
3997    Recognize this case by having one EH edge incoming to the BB and
3998    one normal edge outgoing; BB should be empty apart from the
3999    post_landing_pad label.
4000 
4001    Note that this is slightly different from the empty handler case
4002    handled by cleanup_empty_eh, in that the actual handler may yet
4003    have actual code but the landing pad has been separated from the
4004    handler.  As such, cleanup_empty_eh relies on this transformation
4005    having been done first.  */
4006 
4007 static bool
unsplit_eh(eh_landing_pad lp)4008 unsplit_eh (eh_landing_pad lp)
4009 {
4010   basic_block bb = label_to_block (lp->post_landing_pad);
4011   gimple_stmt_iterator gsi;
4012   edge e_in, e_out;
4013 
4014   /* Quickly check the edge counts on BB for singularity.  */
4015   if (!single_pred_p (bb) || !single_succ_p (bb))
4016     return false;
4017   e_in = single_pred_edge (bb);
4018   e_out = single_succ_edge (bb);
4019 
4020   /* Input edge must be EH and output edge must be normal.  */
4021   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4022     return false;
4023 
4024   /* The block must be empty except for the labels and debug insns.  */
4025   gsi = gsi_after_labels (bb);
4026   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4027     gsi_next_nondebug (&gsi);
4028   if (!gsi_end_p (gsi))
4029     return false;
4030 
4031   /* The destination block must not already have a landing pad
4032      for a different region.  */
4033   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4034     {
4035       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4036       tree lab;
4037       int lp_nr;
4038 
4039       if (!label_stmt)
4040 	break;
4041       lab = gimple_label_label (label_stmt);
4042       lp_nr = EH_LANDING_PAD_NR (lab);
4043       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4044 	return false;
4045     }
4046 
4047   /* The new destination block must not already be a destination of
4048      the source block, lest we merge fallthru and eh edges and get
4049      all sorts of confused.  */
4050   if (find_edge (e_in->src, e_out->dest))
4051     return false;
4052 
4053   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4054      thought this should have been cleaned up by a phicprop pass, but
4055      that doesn't appear to handle virtuals.  Propagate by hand.  */
4056   if (!gimple_seq_empty_p (phi_nodes (bb)))
4057     {
4058       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4059 	{
4060 	  gimple *use_stmt;
4061 	  gphi *phi = gpi.phi ();
4062 	  tree lhs = gimple_phi_result (phi);
4063 	  tree rhs = gimple_phi_arg_def (phi, 0);
4064 	  use_operand_p use_p;
4065 	  imm_use_iterator iter;
4066 
4067 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4068 	    {
4069 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4070 		SET_USE (use_p, rhs);
4071 	    }
4072 
4073 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4074 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4075 
4076 	  remove_phi_node (&gpi, true);
4077 	}
4078     }
4079 
4080   if (dump_file && (dump_flags & TDF_DETAILS))
4081     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4082 	     lp->index, e_out->dest->index);
4083 
4084   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4085      a successor edge, humor it.  But do the real CFG change with the
4086      predecessor of E_OUT in order to preserve the ordering of arguments
4087      to the PHI nodes in E_OUT->DEST.  */
4088   redirect_eh_edge_1 (e_in, e_out->dest, false);
4089   redirect_edge_pred (e_out, e_in->src);
4090   e_out->flags = e_in->flags;
4091   e_out->probability = e_in->probability;
4092   e_out->count = e_in->count;
4093   remove_edge (e_in);
4094 
4095   return true;
4096 }
4097 
4098 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4099 
4100 static bool
unsplit_all_eh(void)4101 unsplit_all_eh (void)
4102 {
4103   bool changed = false;
4104   eh_landing_pad lp;
4105   int i;
4106 
4107   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4108     if (lp)
4109       changed |= unsplit_eh (lp);
4110 
4111   return changed;
4112 }
4113 
4114 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4115    to OLD_BB to NEW_BB; return true on success, false on failure.
4116 
4117    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4118    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4119    Virtual PHIs may be deleted and marked for renaming.  */
4120 
4121 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4122 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4123 			     edge old_bb_out, bool change_region)
4124 {
4125   gphi_iterator ngsi, ogsi;
4126   edge_iterator ei;
4127   edge e;
4128   bitmap ophi_handled;
4129 
4130   /* The destination block must not be a regular successor for any
4131      of the preds of the landing pad.  Thus, avoid turning
4132         <..>
4133 	 |  \ EH
4134 	 |  <..>
4135 	 |  /
4136 	<..>
4137      into
4138         <..>
4139 	|  | EH
4140 	<..>
4141      which CFG verification would choke on.  See PR45172 and PR51089.  */
4142   FOR_EACH_EDGE (e, ei, old_bb->preds)
4143     if (find_edge (e->src, new_bb))
4144       return false;
4145 
4146   FOR_EACH_EDGE (e, ei, old_bb->preds)
4147     redirect_edge_var_map_clear (e);
4148 
4149   ophi_handled = BITMAP_ALLOC (NULL);
4150 
4151   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4152      for the edges we're going to move.  */
4153   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4154     {
4155       gphi *ophi, *nphi = ngsi.phi ();
4156       tree nresult, nop;
4157 
4158       nresult = gimple_phi_result (nphi);
4159       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4160 
4161       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4162 	 the source ssa_name.  */
4163       ophi = NULL;
4164       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4165 	{
4166 	  ophi = ogsi.phi ();
4167 	  if (gimple_phi_result (ophi) == nop)
4168 	    break;
4169 	  ophi = NULL;
4170 	}
4171 
4172       /* If we did find the corresponding PHI, copy those inputs.  */
4173       if (ophi)
4174 	{
4175 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4176 	  if (!has_single_use (nop))
4177 	    {
4178 	      imm_use_iterator imm_iter;
4179 	      use_operand_p use_p;
4180 
4181 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4182 		{
4183 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4184 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4185 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4186 		    goto fail;
4187 		}
4188 	    }
4189 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4190 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4191 	    {
4192 	      location_t oloc;
4193 	      tree oop;
4194 
4195 	      if ((e->flags & EDGE_EH) == 0)
4196 		continue;
4197 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4198 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4199 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4200 	    }
4201 	}
4202       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4203 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4204 	 variable is unchanged from input to the block and we can simply
4205 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4206       else
4207 	{
4208 	  location_t nloc
4209 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4210 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4211 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4212 	}
4213     }
4214 
4215   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4216      we don't know what values from the other edges into NEW_BB to use.  */
4217   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4218     {
4219       gphi *ophi = ogsi.phi ();
4220       tree oresult = gimple_phi_result (ophi);
4221       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4222 	goto fail;
4223     }
4224 
4225   /* Finally, move the edges and update the PHIs.  */
4226   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4227     if (e->flags & EDGE_EH)
4228       {
4229 	/* ???  CFG manipluation routines do not try to update loop
4230 	   form on edge redirection.  Do so manually here for now.  */
4231 	/* If we redirect a loop entry or latch edge that will either create
4232 	   a multiple entry loop or rotate the loop.  If the loops merge
4233 	   we may have created a loop with multiple latches.
4234 	   All of this isn't easily fixed thus cancel the affected loop
4235 	   and mark the other loop as possibly having multiple latches.  */
4236 	if (e->dest == e->dest->loop_father->header)
4237 	  {
4238 	    mark_loop_for_removal (e->dest->loop_father);
4239 	    new_bb->loop_father->latch = NULL;
4240 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4241 	  }
4242 	redirect_eh_edge_1 (e, new_bb, change_region);
4243 	redirect_edge_succ (e, new_bb);
4244 	flush_pending_stmts (e);
4245       }
4246     else
4247       ei_next (&ei);
4248 
4249   BITMAP_FREE (ophi_handled);
4250   return true;
4251 
4252  fail:
4253   FOR_EACH_EDGE (e, ei, old_bb->preds)
4254     redirect_edge_var_map_clear (e);
4255   BITMAP_FREE (ophi_handled);
4256   return false;
4257 }
4258 
4259 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4260    old region to NEW_REGION at BB.  */
4261 
4262 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4263 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4264 			  eh_landing_pad lp, eh_region new_region)
4265 {
4266   gimple_stmt_iterator gsi;
4267   eh_landing_pad *pp;
4268 
4269   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4270     continue;
4271   *pp = lp->next_lp;
4272 
4273   lp->region = new_region;
4274   lp->next_lp = new_region->landing_pads;
4275   new_region->landing_pads = lp;
4276 
4277   /* Delete the RESX that was matched within the empty handler block.  */
4278   gsi = gsi_last_bb (bb);
4279   unlink_stmt_vdef (gsi_stmt (gsi));
4280   gsi_remove (&gsi, true);
4281 
4282   /* Clean up E_OUT for the fallthru.  */
4283   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4284   e_out->probability = REG_BR_PROB_BASE;
4285 }
4286 
4287 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4288    unsplitting than unsplit_eh was prepared to handle, e.g. when
4289    multiple incoming edges and phis are involved.  */
4290 
4291 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4292 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4293 {
4294   gimple_stmt_iterator gsi;
4295   tree lab;
4296 
4297   /* We really ought not have totally lost everything following
4298      a landing pad label.  Given that BB is empty, there had better
4299      be a successor.  */
4300   gcc_assert (e_out != NULL);
4301 
4302   /* The destination block must not already have a landing pad
4303      for a different region.  */
4304   lab = NULL;
4305   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4306     {
4307       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4308       int lp_nr;
4309 
4310       if (!stmt)
4311 	break;
4312       lab = gimple_label_label (stmt);
4313       lp_nr = EH_LANDING_PAD_NR (lab);
4314       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4315 	return false;
4316     }
4317 
4318   /* Attempt to move the PHIs into the successor block.  */
4319   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4320     {
4321       if (dump_file && (dump_flags & TDF_DETAILS))
4322 	fprintf (dump_file,
4323 		 "Unsplit EH landing pad %d to block %i "
4324 		 "(via cleanup_empty_eh).\n",
4325 		 lp->index, e_out->dest->index);
4326       return true;
4327     }
4328 
4329   return false;
4330 }
4331 
4332 /* Return true if edge E_FIRST is part of an empty infinite loop
4333    or leads to such a loop through a series of single successor
4334    empty bbs.  */
4335 
4336 static bool
infinite_empty_loop_p(edge e_first)4337 infinite_empty_loop_p (edge e_first)
4338 {
4339   bool inf_loop = false;
4340   edge e;
4341 
4342   if (e_first->dest == e_first->src)
4343     return true;
4344 
4345   e_first->src->aux = (void *) 1;
4346   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4347     {
4348       gimple_stmt_iterator gsi;
4349       if (e->dest->aux)
4350 	{
4351 	  inf_loop = true;
4352 	  break;
4353 	}
4354       e->dest->aux = (void *) 1;
4355       gsi = gsi_after_labels (e->dest);
4356       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4357 	gsi_next_nondebug (&gsi);
4358       if (!gsi_end_p (gsi))
4359 	break;
4360     }
4361   e_first->src->aux = NULL;
4362   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4363     e->dest->aux = NULL;
4364 
4365   return inf_loop;
4366 }
4367 
4368 /* Examine the block associated with LP to determine if it's an empty
4369    handler for its EH region.  If so, attempt to redirect EH edges to
4370    an outer region.  Return true the CFG was updated in any way.  This
4371    is similar to jump forwarding, just across EH edges.  */
4372 
4373 static bool
cleanup_empty_eh(eh_landing_pad lp)4374 cleanup_empty_eh (eh_landing_pad lp)
4375 {
4376   basic_block bb = label_to_block (lp->post_landing_pad);
4377   gimple_stmt_iterator gsi;
4378   gimple *resx;
4379   eh_region new_region;
4380   edge_iterator ei;
4381   edge e, e_out;
4382   bool has_non_eh_pred;
4383   bool ret = false;
4384   int new_lp_nr;
4385 
4386   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4387   switch (EDGE_COUNT (bb->succs))
4388     {
4389     case 0:
4390       e_out = NULL;
4391       break;
4392     case 1:
4393       e_out = single_succ_edge (bb);
4394       break;
4395     default:
4396       return false;
4397     }
4398 
4399   resx = last_stmt (bb);
4400   if (resx && is_gimple_resx (resx))
4401     {
4402       if (stmt_can_throw_external (resx))
4403 	optimize_clobbers (bb);
4404       else if (sink_clobbers (bb))
4405 	ret = true;
4406     }
4407 
4408   gsi = gsi_after_labels (bb);
4409 
4410   /* Make sure to skip debug statements.  */
4411   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4412     gsi_next_nondebug (&gsi);
4413 
4414   /* If the block is totally empty, look for more unsplitting cases.  */
4415   if (gsi_end_p (gsi))
4416     {
4417       /* For the degenerate case of an infinite loop bail out.
4418 	 If bb has no successors and is totally empty, which can happen e.g.
4419 	 because of incorrect noreturn attribute, bail out too.  */
4420       if (e_out == NULL
4421 	  || infinite_empty_loop_p (e_out))
4422 	return ret;
4423 
4424       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4425     }
4426 
4427   /* The block should consist only of a single RESX statement, modulo a
4428      preceding call to __builtin_stack_restore if there is no outgoing
4429      edge, since the call can be eliminated in this case.  */
4430   resx = gsi_stmt (gsi);
4431   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4432     {
4433       gsi_next (&gsi);
4434       resx = gsi_stmt (gsi);
4435     }
4436   if (!is_gimple_resx (resx))
4437     return ret;
4438   gcc_assert (gsi_one_before_end_p (gsi));
4439 
4440   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4441   has_non_eh_pred = false;
4442   FOR_EACH_EDGE (e, ei, bb->preds)
4443     if (!(e->flags & EDGE_EH))
4444       has_non_eh_pred = true;
4445 
4446   /* Find the handler that's outer of the empty handler by looking at
4447      where the RESX instruction was vectored.  */
4448   new_lp_nr = lookup_stmt_eh_lp (resx);
4449   new_region = get_eh_region_from_lp_number (new_lp_nr);
4450 
4451   /* If there's no destination region within the current function,
4452      redirection is trivial via removing the throwing statements from
4453      the EH region, removing the EH edges, and allowing the block
4454      to go unreachable.  */
4455   if (new_region == NULL)
4456     {
4457       gcc_assert (e_out == NULL);
4458       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4459 	if (e->flags & EDGE_EH)
4460 	  {
4461 	    gimple *stmt = last_stmt (e->src);
4462 	    remove_stmt_from_eh_lp (stmt);
4463 	    remove_edge (e);
4464 	  }
4465 	else
4466 	  ei_next (&ei);
4467       goto succeed;
4468     }
4469 
4470   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4471      to handle the abort and allow the blocks to go unreachable.  */
4472   if (new_region->type == ERT_MUST_NOT_THROW)
4473     {
4474       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4475 	if (e->flags & EDGE_EH)
4476 	  {
4477 	    gimple *stmt = last_stmt (e->src);
4478 	    remove_stmt_from_eh_lp (stmt);
4479 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4480 	    remove_edge (e);
4481 	  }
4482 	else
4483 	  ei_next (&ei);
4484       goto succeed;
4485     }
4486 
4487   /* Try to redirect the EH edges and merge the PHIs into the destination
4488      landing pad block.  If the merge succeeds, we'll already have redirected
4489      all the EH edges.  The handler itself will go unreachable if there were
4490      no normal edges.  */
4491   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4492     goto succeed;
4493 
4494   /* Finally, if all input edges are EH edges, then we can (potentially)
4495      reduce the number of transfers from the runtime by moving the landing
4496      pad from the original region to the new region.  This is a win when
4497      we remove the last CLEANUP region along a particular exception
4498      propagation path.  Since nothing changes except for the region with
4499      which the landing pad is associated, the PHI nodes do not need to be
4500      adjusted at all.  */
4501   if (!has_non_eh_pred)
4502     {
4503       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4504       if (dump_file && (dump_flags & TDF_DETAILS))
4505 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4506 		 lp->index, new_region->index);
4507 
4508       /* ??? The CFG didn't change, but we may have rendered the
4509 	 old EH region unreachable.  Trigger a cleanup there.  */
4510       return true;
4511     }
4512 
4513   return ret;
4514 
4515  succeed:
4516   if (dump_file && (dump_flags & TDF_DETAILS))
4517     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4518   remove_eh_landing_pad (lp);
4519   return true;
4520 }
4521 
4522 /* Do a post-order traversal of the EH region tree.  Examine each
4523    post_landing_pad block and see if we can eliminate it as empty.  */
4524 
4525 static bool
cleanup_all_empty_eh(void)4526 cleanup_all_empty_eh (void)
4527 {
4528   bool changed = false;
4529   eh_landing_pad lp;
4530   int i;
4531 
4532   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4533     if (lp)
4534       changed |= cleanup_empty_eh (lp);
4535 
4536   return changed;
4537 }
4538 
4539 /* Perform cleanups and lowering of exception handling
4540     1) cleanups regions with handlers doing nothing are optimized out
4541     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4542     3) Info about regions that are containing instructions, and regions
4543        reachable via local EH edges is collected
4544     4) Eh tree is pruned for regions no longer necessary.
4545 
4546    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4547 	 Unify those that have the same failure decl and locus.
4548 */
4549 
4550 static unsigned int
execute_cleanup_eh_1(void)4551 execute_cleanup_eh_1 (void)
4552 {
4553   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4554      looking up unreachable landing pads.  */
4555   remove_unreachable_handlers ();
4556 
4557   /* Watch out for the region tree vanishing due to all unreachable.  */
4558   if (cfun->eh->region_tree)
4559     {
4560       bool changed = false;
4561 
4562       if (optimize)
4563 	changed |= unsplit_all_eh ();
4564       changed |= cleanup_all_empty_eh ();
4565 
4566       if (changed)
4567 	{
4568 	  free_dominance_info (CDI_DOMINATORS);
4569 	  free_dominance_info (CDI_POST_DOMINATORS);
4570 
4571           /* We delayed all basic block deletion, as we may have performed
4572 	     cleanups on EH edges while non-EH edges were still present.  */
4573 	  delete_unreachable_blocks ();
4574 
4575 	  /* We manipulated the landing pads.  Remove any region that no
4576 	     longer has a landing pad.  */
4577 	  remove_unreachable_handlers_no_lp ();
4578 
4579 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4580 	}
4581     }
4582 
4583   return 0;
4584 }
4585 
4586 namespace {
4587 
4588 const pass_data pass_data_cleanup_eh =
4589 {
4590   GIMPLE_PASS, /* type */
4591   "ehcleanup", /* name */
4592   OPTGROUP_NONE, /* optinfo_flags */
4593   TV_TREE_EH, /* tv_id */
4594   PROP_gimple_lcf, /* properties_required */
4595   0, /* properties_provided */
4596   0, /* properties_destroyed */
4597   0, /* todo_flags_start */
4598   0, /* todo_flags_finish */
4599 };
4600 
4601 class pass_cleanup_eh : public gimple_opt_pass
4602 {
4603 public:
pass_cleanup_eh(gcc::context * ctxt)4604   pass_cleanup_eh (gcc::context *ctxt)
4605     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4606   {}
4607 
4608   /* opt_pass methods: */
clone()4609   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate(function * fun)4610   virtual bool gate (function *fun)
4611     {
4612       return fun->eh != NULL && fun->eh->region_tree != NULL;
4613     }
4614 
4615   virtual unsigned int execute (function *);
4616 
4617 }; // class pass_cleanup_eh
4618 
4619 unsigned int
execute(function * fun)4620 pass_cleanup_eh::execute (function *fun)
4621 {
4622   int ret = execute_cleanup_eh_1 ();
4623 
4624   /* If the function no longer needs an EH personality routine
4625      clear it.  This exposes cross-language inlining opportunities
4626      and avoids references to a never defined personality routine.  */
4627   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4628       && function_needs_eh_personality (fun) != eh_personality_lang)
4629     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4630 
4631   return ret;
4632 }
4633 
4634 } // anon namespace
4635 
4636 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4637 make_pass_cleanup_eh (gcc::context *ctxt)
4638 {
4639   return new pass_cleanup_eh (ctxt);
4640 }
4641 
4642 /* Verify that BB containing STMT as the last statement, has precisely the
4643    edge that make_eh_edges would create.  */
4644 
4645 DEBUG_FUNCTION bool
verify_eh_edges(gimple * stmt)4646 verify_eh_edges (gimple *stmt)
4647 {
4648   basic_block bb = gimple_bb (stmt);
4649   eh_landing_pad lp = NULL;
4650   int lp_nr;
4651   edge_iterator ei;
4652   edge e, eh_edge;
4653 
4654   lp_nr = lookup_stmt_eh_lp (stmt);
4655   if (lp_nr > 0)
4656     lp = get_eh_landing_pad_from_number (lp_nr);
4657 
4658   eh_edge = NULL;
4659   FOR_EACH_EDGE (e, ei, bb->succs)
4660     {
4661       if (e->flags & EDGE_EH)
4662 	{
4663 	  if (eh_edge)
4664 	    {
4665 	      error ("BB %i has multiple EH edges", bb->index);
4666 	      return true;
4667 	    }
4668 	  else
4669 	    eh_edge = e;
4670 	}
4671     }
4672 
4673   if (lp == NULL)
4674     {
4675       if (eh_edge)
4676 	{
4677 	  error ("BB %i can not throw but has an EH edge", bb->index);
4678 	  return true;
4679 	}
4680       return false;
4681     }
4682 
4683   if (!stmt_could_throw_p (stmt))
4684     {
4685       error ("BB %i last statement has incorrectly set lp", bb->index);
4686       return true;
4687     }
4688 
4689   if (eh_edge == NULL)
4690     {
4691       error ("BB %i is missing an EH edge", bb->index);
4692       return true;
4693     }
4694 
4695   if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4696     {
4697       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4698       return true;
4699     }
4700 
4701   return false;
4702 }
4703 
4704 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4705 
4706 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(geh_dispatch * stmt)4707 verify_eh_dispatch_edge (geh_dispatch *stmt)
4708 {
4709   eh_region r;
4710   eh_catch c;
4711   basic_block src, dst;
4712   bool want_fallthru = true;
4713   edge_iterator ei;
4714   edge e, fall_edge;
4715 
4716   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4717   src = gimple_bb (stmt);
4718 
4719   FOR_EACH_EDGE (e, ei, src->succs)
4720     gcc_assert (e->aux == NULL);
4721 
4722   switch (r->type)
4723     {
4724     case ERT_TRY:
4725       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4726 	{
4727 	  dst = label_to_block (c->label);
4728 	  e = find_edge (src, dst);
4729 	  if (e == NULL)
4730 	    {
4731 	      error ("BB %i is missing an edge", src->index);
4732 	      return true;
4733 	    }
4734 	  e->aux = (void *)e;
4735 
4736 	  /* A catch-all handler doesn't have a fallthru.  */
4737 	  if (c->type_list == NULL)
4738 	    {
4739 	      want_fallthru = false;
4740 	      break;
4741 	    }
4742 	}
4743       break;
4744 
4745     case ERT_ALLOWED_EXCEPTIONS:
4746       dst = label_to_block (r->u.allowed.label);
4747       e = find_edge (src, dst);
4748       if (e == NULL)
4749 	{
4750 	  error ("BB %i is missing an edge", src->index);
4751 	  return true;
4752 	}
4753       e->aux = (void *)e;
4754       break;
4755 
4756     default:
4757       gcc_unreachable ();
4758     }
4759 
4760   fall_edge = NULL;
4761   FOR_EACH_EDGE (e, ei, src->succs)
4762     {
4763       if (e->flags & EDGE_FALLTHRU)
4764 	{
4765 	  if (fall_edge != NULL)
4766 	    {
4767 	      error ("BB %i too many fallthru edges", src->index);
4768 	      return true;
4769 	    }
4770 	  fall_edge = e;
4771 	}
4772       else if (e->aux)
4773 	e->aux = NULL;
4774       else
4775 	{
4776 	  error ("BB %i has incorrect edge", src->index);
4777 	  return true;
4778 	}
4779     }
4780   if ((fall_edge != NULL) ^ want_fallthru)
4781     {
4782       error ("BB %i has incorrect fallthru edge", src->index);
4783       return true;
4784     }
4785 
4786   return false;
4787 }
4788