1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "expr.h"
27 #include "calls.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "except.h"
31 #include "pointer-set.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "tree-eh.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
41 #include "cgraph.h"
42 #include "tree-cfg.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-into-ssa.h"
48 #include "tree-ssa.h"
49 #include "tree-inline.h"
50 #include "tree-pass.h"
51 #include "langhooks.h"
52 #include "diagnostic-core.h"
53 #include "target.h"
54 #include "cfgloop.h"
55 #include "gimple-low.h"
56 
57 /* In some instances a tree and a gimple need to be stored in a same table,
58    i.e. in hash tables. This is a structure to do this. */
59 typedef union {tree *tp; tree t; gimple g;} treemple;
60 
61 /* Misc functions used in this file.  */
62 
63 /* Remember and lookup EH landing pad data for arbitrary statements.
64    Really this means any statement that could_throw_p.  We could
65    stuff this information into the stmt_ann data structure, but:
66 
67    (1) We absolutely rely on this information being kept until
68    we get to rtl.  Once we're done with lowering here, if we lose
69    the information there's no way to recover it!
70 
71    (2) There are many more statements that *cannot* throw as
72    compared to those that can.  We should be saving some amount
73    of space by only allocating memory for those that can throw.  */
74 
75 /* Add statement T in function IFUN to landing pad NUM.  */
76 
77 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple t,int num)78 add_stmt_to_eh_lp_fn (struct function *ifun, gimple t, int num)
79 {
80   struct throw_stmt_node *n;
81   void **slot;
82 
83   gcc_assert (num != 0);
84 
85   n = ggc_alloc_throw_stmt_node ();
86   n->stmt = t;
87   n->lp_nr = num;
88 
89   if (!get_eh_throw_stmt_table (ifun))
90     set_eh_throw_stmt_table (ifun, htab_create_ggc (31, struct_ptr_hash,
91 						    struct_ptr_eq,
92 						    ggc_free));
93 
94   slot = htab_find_slot (get_eh_throw_stmt_table (ifun), n, INSERT);
95   gcc_assert (!*slot);
96   *slot = n;
97 }
98 
99 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
100 
101 void
add_stmt_to_eh_lp(gimple t,int num)102 add_stmt_to_eh_lp (gimple t, int num)
103 {
104   add_stmt_to_eh_lp_fn (cfun, t, num);
105 }
106 
107 /* Add statement T to the single EH landing pad in REGION.  */
108 
109 static void
record_stmt_eh_region(eh_region region,gimple t)110 record_stmt_eh_region (eh_region region, gimple t)
111 {
112   if (region == NULL)
113     return;
114   if (region->type == ERT_MUST_NOT_THROW)
115     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
116   else
117     {
118       eh_landing_pad lp = region->landing_pads;
119       if (lp == NULL)
120 	lp = gen_eh_landing_pad (region);
121       else
122 	gcc_assert (lp->next_lp == NULL);
123       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
124     }
125 }
126 
127 
128 /* Remove statement T in function IFUN from its EH landing pad.  */
129 
130 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple t)131 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple t)
132 {
133   struct throw_stmt_node dummy;
134   void **slot;
135 
136   if (!get_eh_throw_stmt_table (ifun))
137     return false;
138 
139   dummy.stmt = t;
140   slot = htab_find_slot (get_eh_throw_stmt_table (ifun), &dummy,
141                         NO_INSERT);
142   if (slot)
143     {
144       htab_clear_slot (get_eh_throw_stmt_table (ifun), slot);
145       return true;
146     }
147   else
148     return false;
149 }
150 
151 
152 /* Remove statement T in the current function (cfun) from its
153    EH landing pad.  */
154 
155 bool
remove_stmt_from_eh_lp(gimple t)156 remove_stmt_from_eh_lp (gimple t)
157 {
158   return remove_stmt_from_eh_lp_fn (cfun, t);
159 }
160 
161 /* Determine if statement T is inside an EH region in function IFUN.
162    Positive numbers indicate a landing pad index; negative numbers
163    indicate a MUST_NOT_THROW region index; zero indicates that the
164    statement is not recorded in the region table.  */
165 
166 int
lookup_stmt_eh_lp_fn(struct function * ifun,gimple t)167 lookup_stmt_eh_lp_fn (struct function *ifun, gimple t)
168 {
169   struct throw_stmt_node *p, n;
170 
171   if (ifun->eh->throw_stmt_table == NULL)
172     return 0;
173 
174   n.stmt = t;
175   p = (struct throw_stmt_node *) htab_find (ifun->eh->throw_stmt_table, &n);
176   return p ? p->lp_nr : 0;
177 }
178 
179 /* Likewise, but always use the current function.  */
180 
181 int
lookup_stmt_eh_lp(gimple t)182 lookup_stmt_eh_lp (gimple t)
183 {
184   /* We can get called from initialized data when -fnon-call-exceptions
185      is on; prevent crash.  */
186   if (!cfun)
187     return 0;
188   return lookup_stmt_eh_lp_fn (cfun, t);
189 }
190 
191 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
192    nodes and LABEL_DECL nodes.  We will use this during the second phase to
193    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
194 
195 struct finally_tree_node
196 {
197   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
198      when deciding whether a GOTO to a certain LABEL_DECL (which is a
199      tree) leaves the TRY block, its necessary to record a tree in
200      this field.  Thus a treemple is used. */
201   treemple child;
202   gimple parent;
203 };
204 
205 /* Hashtable helpers.  */
206 
207 struct finally_tree_hasher : typed_free_remove <finally_tree_node>
208 {
209   typedef finally_tree_node value_type;
210   typedef finally_tree_node compare_type;
211   static inline hashval_t hash (const value_type *);
212   static inline bool equal (const value_type *, const compare_type *);
213 };
214 
215 inline hashval_t
hash(const value_type * v)216 finally_tree_hasher::hash (const value_type *v)
217 {
218   return (intptr_t)v->child.t >> 4;
219 }
220 
221 inline bool
equal(const value_type * v,const compare_type * c)222 finally_tree_hasher::equal (const value_type *v, const compare_type *c)
223 {
224   return v->child.t == c->child.t;
225 }
226 
227 /* Note that this table is *not* marked GTY.  It is short-lived.  */
228 static hash_table <finally_tree_hasher> finally_tree;
229 
230 static void
record_in_finally_tree(treemple child,gimple parent)231 record_in_finally_tree (treemple child, gimple parent)
232 {
233   struct finally_tree_node *n;
234   finally_tree_node **slot;
235 
236   n = XNEW (struct finally_tree_node);
237   n->child = child;
238   n->parent = parent;
239 
240   slot = finally_tree.find_slot (n, INSERT);
241   gcc_assert (!*slot);
242   *slot = n;
243 }
244 
245 static void
246 collect_finally_tree (gimple stmt, gimple region);
247 
248 /* Go through the gimple sequence.  Works with collect_finally_tree to
249    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
250 
251 static void
collect_finally_tree_1(gimple_seq seq,gimple region)252 collect_finally_tree_1 (gimple_seq seq, gimple region)
253 {
254   gimple_stmt_iterator gsi;
255 
256   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
257     collect_finally_tree (gsi_stmt (gsi), region);
258 }
259 
260 static void
collect_finally_tree(gimple stmt,gimple region)261 collect_finally_tree (gimple stmt, gimple region)
262 {
263   treemple temp;
264 
265   switch (gimple_code (stmt))
266     {
267     case GIMPLE_LABEL:
268       temp.t = gimple_label_label (stmt);
269       record_in_finally_tree (temp, region);
270       break;
271 
272     case GIMPLE_TRY:
273       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
274         {
275           temp.g = stmt;
276           record_in_finally_tree (temp, region);
277           collect_finally_tree_1 (gimple_try_eval (stmt), stmt);
278 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
279         }
280       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
281         {
282           collect_finally_tree_1 (gimple_try_eval (stmt), region);
283           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
284         }
285       break;
286 
287     case GIMPLE_CATCH:
288       collect_finally_tree_1 (gimple_catch_handler (stmt), region);
289       break;
290 
291     case GIMPLE_EH_FILTER:
292       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
293       break;
294 
295     case GIMPLE_EH_ELSE:
296       collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
297       collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
298       break;
299 
300     default:
301       /* A type, a decl, or some kind of statement that we're not
302 	 interested in.  Don't walk them.  */
303       break;
304     }
305 }
306 
307 
308 /* Use the finally tree to determine if a jump from START to TARGET
309    would leave the try_finally node that START lives in.  */
310 
311 static bool
outside_finally_tree(treemple start,gimple target)312 outside_finally_tree (treemple start, gimple target)
313 {
314   struct finally_tree_node n, *p;
315 
316   do
317     {
318       n.child = start;
319       p = finally_tree.find (&n);
320       if (!p)
321 	return true;
322       start.g = p->parent;
323     }
324   while (start.g != target);
325 
326   return false;
327 }
328 
329 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
330    nodes into a set of gotos, magic labels, and eh regions.
331    The eh region creation is straight-forward, but frobbing all the gotos
332    and such into shape isn't.  */
333 
334 /* The sequence into which we record all EH stuff.  This will be
335    placed at the end of the function when we're all done.  */
336 static gimple_seq eh_seq;
337 
338 /* Record whether an EH region contains something that can throw,
339    indexed by EH region number.  */
340 static bitmap eh_region_may_contain_throw_map;
341 
342 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
343    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
344    The idea is to record a gimple statement for everything except for
345    the conditionals, which get their labels recorded. Since labels are
346    of type 'tree', we need this node to store both gimple and tree
347    objects.  REPL_STMT is the sequence used to replace the goto/return
348    statement.  CONT_STMT is used to store the statement that allows
349    the return/goto to jump to the original destination. */
350 
351 struct goto_queue_node
352 {
353   treemple stmt;
354   location_t location;
355   gimple_seq repl_stmt;
356   gimple cont_stmt;
357   int index;
358   /* This is used when index >= 0 to indicate that stmt is a label (as
359      opposed to a goto stmt).  */
360   int is_label;
361 };
362 
363 /* State of the world while lowering.  */
364 
365 struct leh_state
366 {
367   /* What's "current" while constructing the eh region tree.  These
368      correspond to variables of the same name in cfun->eh, which we
369      don't have easy access to.  */
370   eh_region cur_region;
371 
372   /* What's "current" for the purposes of __builtin_eh_pointer.  For
373      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
374      the associated ALLOWED_EXCEPTIONS, etc.  */
375   eh_region ehp_region;
376 
377   /* Processing of TRY_FINALLY requires a bit more state.  This is
378      split out into a separate structure so that we don't have to
379      copy so much when processing other nodes.  */
380   struct leh_tf_state *tf;
381 };
382 
383 struct leh_tf_state
384 {
385   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
386      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
387      this so that outside_finally_tree can reliably reference the tree used
388      in the collect_finally_tree data structures.  */
389   gimple try_finally_expr;
390   gimple top_p;
391 
392   /* While lowering a top_p usually it is expanded into multiple statements,
393      thus we need the following field to store them. */
394   gimple_seq top_p_seq;
395 
396   /* The state outside this try_finally node.  */
397   struct leh_state *outer;
398 
399   /* The exception region created for it.  */
400   eh_region region;
401 
402   /* The goto queue.  */
403   struct goto_queue_node *goto_queue;
404   size_t goto_queue_size;
405   size_t goto_queue_active;
406 
407   /* Pointer map to help in searching goto_queue when it is large.  */
408   struct pointer_map_t *goto_queue_map;
409 
410   /* The set of unique labels seen as entries in the goto queue.  */
411   vec<tree> dest_array;
412 
413   /* A label to be added at the end of the completed transformed
414      sequence.  It will be set if may_fallthru was true *at one time*,
415      though subsequent transformations may have cleared that flag.  */
416   tree fallthru_label;
417 
418   /* True if it is possible to fall out the bottom of the try block.
419      Cleared if the fallthru is converted to a goto.  */
420   bool may_fallthru;
421 
422   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
423   bool may_return;
424 
425   /* True if the finally block can receive an exception edge.
426      Cleared if the exception case is handled by code duplication.  */
427   bool may_throw;
428 };
429 
430 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gimple);
431 
432 /* Search for STMT in the goto queue.  Return the replacement,
433    or null if the statement isn't in the queue.  */
434 
435 #define LARGE_GOTO_QUEUE 20
436 
437 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
438 
439 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)440 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
441 {
442   unsigned int i;
443   void **slot;
444 
445   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
446     {
447       for (i = 0; i < tf->goto_queue_active; i++)
448 	if ( tf->goto_queue[i].stmt.g == stmt.g)
449 	  return tf->goto_queue[i].repl_stmt;
450       return NULL;
451     }
452 
453   /* If we have a large number of entries in the goto_queue, create a
454      pointer map and use that for searching.  */
455 
456   if (!tf->goto_queue_map)
457     {
458       tf->goto_queue_map = pointer_map_create ();
459       for (i = 0; i < tf->goto_queue_active; i++)
460 	{
461 	  slot = pointer_map_insert (tf->goto_queue_map,
462                                      tf->goto_queue[i].stmt.g);
463           gcc_assert (*slot == NULL);
464 	  *slot = &tf->goto_queue[i];
465 	}
466     }
467 
468   slot = pointer_map_contains (tf->goto_queue_map, stmt.g);
469   if (slot != NULL)
470     return (((struct goto_queue_node *) *slot)->repl_stmt);
471 
472   return NULL;
473 }
474 
475 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
476    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
477    then we can just splat it in, otherwise we add the new stmts immediately
478    after the GIMPLE_COND and redirect.  */
479 
480 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)481 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
482 				gimple_stmt_iterator *gsi)
483 {
484   tree label;
485   gimple_seq new_seq;
486   treemple temp;
487   location_t loc = gimple_location (gsi_stmt (*gsi));
488 
489   temp.tp = tp;
490   new_seq = find_goto_replacement (tf, temp);
491   if (!new_seq)
492     return;
493 
494   if (gimple_seq_singleton_p (new_seq)
495       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
496     {
497       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
498       return;
499     }
500 
501   label = create_artificial_label (loc);
502   /* Set the new label for the GIMPLE_COND */
503   *tp = label;
504 
505   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
506   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
507 }
508 
509 /* The real work of replace_goto_queue.  Returns with TSI updated to
510    point to the next statement.  */
511 
512 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
513 
514 static void
replace_goto_queue_1(gimple stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)515 replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
516 		      gimple_stmt_iterator *gsi)
517 {
518   gimple_seq seq;
519   treemple temp;
520   temp.g = NULL;
521 
522   switch (gimple_code (stmt))
523     {
524     case GIMPLE_GOTO:
525     case GIMPLE_RETURN:
526       temp.g = stmt;
527       seq = find_goto_replacement (tf, temp);
528       if (seq)
529 	{
530 	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
531 	  gsi_remove (gsi, false);
532 	  return;
533 	}
534       break;
535 
536     case GIMPLE_COND:
537       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
538       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
539       break;
540 
541     case GIMPLE_TRY:
542       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
543       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
544       break;
545     case GIMPLE_CATCH:
546       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt), tf);
547       break;
548     case GIMPLE_EH_FILTER:
549       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
550       break;
551     case GIMPLE_EH_ELSE:
552       replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt), tf);
553       replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt), tf);
554       break;
555 
556     default:
557       /* These won't have gotos in them.  */
558       break;
559     }
560 
561   gsi_next (gsi);
562 }
563 
564 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
565 
566 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)567 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
568 {
569   gimple_stmt_iterator gsi = gsi_start (*seq);
570 
571   while (!gsi_end_p (gsi))
572     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
573 }
574 
575 /* Replace all goto queue members.  */
576 
577 static void
replace_goto_queue(struct leh_tf_state * tf)578 replace_goto_queue (struct leh_tf_state *tf)
579 {
580   if (tf->goto_queue_active == 0)
581     return;
582   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
583   replace_goto_queue_stmt_list (&eh_seq, tf);
584 }
585 
586 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
587    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
588    a gimple return. */
589 
590 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)591 record_in_goto_queue (struct leh_tf_state *tf,
592                       treemple new_stmt,
593                       int index,
594                       bool is_label,
595 		      location_t location)
596 {
597   size_t active, size;
598   struct goto_queue_node *q;
599 
600   gcc_assert (!tf->goto_queue_map);
601 
602   active = tf->goto_queue_active;
603   size = tf->goto_queue_size;
604   if (active >= size)
605     {
606       size = (size ? size * 2 : 32);
607       tf->goto_queue_size = size;
608       tf->goto_queue
609          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
610     }
611 
612   q = &tf->goto_queue[active];
613   tf->goto_queue_active = active + 1;
614 
615   memset (q, 0, sizeof (*q));
616   q->stmt = new_stmt;
617   q->index = index;
618   q->location = location;
619   q->is_label = is_label;
620 }
621 
622 /* Record the LABEL label in the goto queue contained in TF.
623    TF is not null.  */
624 
625 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)626 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
627 			    location_t location)
628 {
629   int index;
630   treemple temp, new_stmt;
631 
632   if (!label)
633     return;
634 
635   /* Computed and non-local gotos do not get processed.  Given
636      their nature we can neither tell whether we've escaped the
637      finally block nor redirect them if we knew.  */
638   if (TREE_CODE (label) != LABEL_DECL)
639     return;
640 
641   /* No need to record gotos that don't leave the try block.  */
642   temp.t = label;
643   if (!outside_finally_tree (temp, tf->try_finally_expr))
644     return;
645 
646   if (! tf->dest_array.exists ())
647     {
648       tf->dest_array.create (10);
649       tf->dest_array.quick_push (label);
650       index = 0;
651     }
652   else
653     {
654       int n = tf->dest_array.length ();
655       for (index = 0; index < n; ++index)
656         if (tf->dest_array[index] == label)
657           break;
658       if (index == n)
659         tf->dest_array.safe_push (label);
660     }
661 
662   /* In the case of a GOTO we want to record the destination label,
663      since with a GIMPLE_COND we have an easy access to the then/else
664      labels. */
665   new_stmt = stmt;
666   record_in_goto_queue (tf, new_stmt, index, true, location);
667 }
668 
669 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
670    node, and if so record that fact in the goto queue associated with that
671    try_finally node.  */
672 
673 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple stmt)674 maybe_record_in_goto_queue (struct leh_state *state, gimple stmt)
675 {
676   struct leh_tf_state *tf = state->tf;
677   treemple new_stmt;
678 
679   if (!tf)
680     return;
681 
682   switch (gimple_code (stmt))
683     {
684     case GIMPLE_COND:
685       new_stmt.tp = gimple_op_ptr (stmt, 2);
686       record_in_goto_queue_label (tf, new_stmt, gimple_cond_true_label (stmt),
687 				  EXPR_LOCATION (*new_stmt.tp));
688       new_stmt.tp = gimple_op_ptr (stmt, 3);
689       record_in_goto_queue_label (tf, new_stmt, gimple_cond_false_label (stmt),
690 				  EXPR_LOCATION (*new_stmt.tp));
691       break;
692     case GIMPLE_GOTO:
693       new_stmt.g = stmt;
694       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
695 				  gimple_location (stmt));
696       break;
697 
698     case GIMPLE_RETURN:
699       tf->may_return = true;
700       new_stmt.g = stmt;
701       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
702       break;
703 
704     default:
705       gcc_unreachable ();
706     }
707 }
708 
709 
710 #ifdef ENABLE_CHECKING
711 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
712    was in fact structured, and we've not yet done jump threading, then none
713    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
714 
715 static void
verify_norecord_switch_expr(struct leh_state * state,gimple switch_expr)716 verify_norecord_switch_expr (struct leh_state *state, gimple switch_expr)
717 {
718   struct leh_tf_state *tf = state->tf;
719   size_t i, n;
720 
721   if (!tf)
722     return;
723 
724   n = gimple_switch_num_labels (switch_expr);
725 
726   for (i = 0; i < n; ++i)
727     {
728       treemple temp;
729       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
730       temp.t = lab;
731       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
732     }
733 }
734 #else
735 #define verify_norecord_switch_expr(state, switch_expr)
736 #endif
737 
738 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
739    non-null, insert it before the new branch.  */
740 
741 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)742 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
743 {
744   gimple x;
745 
746   /* In the case of a return, the queue node must be a gimple statement.  */
747   gcc_assert (!q->is_label);
748 
749   /* Note that the return value may have already been computed, e.g.,
750 
751 	int x;
752 	int foo (void)
753 	{
754 	  x = 0;
755 	  try {
756 	    return x;
757 	  } finally {
758 	    x++;
759 	  }
760 	}
761 
762      should return 0, not 1.  We don't have to do anything to make
763      this happens because the return value has been placed in the
764      RESULT_DECL already.  */
765 
766   q->cont_stmt = q->stmt.g;
767 
768   if (mod)
769     gimple_seq_add_seq (&q->repl_stmt, mod);
770 
771   x = gimple_build_goto (finlab);
772   gimple_set_location (x, q->location);
773   gimple_seq_add_stmt (&q->repl_stmt, x);
774 }
775 
776 /* Similar, but easier, for GIMPLE_GOTO.  */
777 
778 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)779 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
780 		     struct leh_tf_state *tf)
781 {
782   gimple x;
783 
784   gcc_assert (q->is_label);
785 
786   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
787 
788   if (mod)
789     gimple_seq_add_seq (&q->repl_stmt, mod);
790 
791   x = gimple_build_goto (finlab);
792   gimple_set_location (x, q->location);
793   gimple_seq_add_stmt (&q->repl_stmt, x);
794 }
795 
796 /* Emit a standard landing pad sequence into SEQ for REGION.  */
797 
798 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)799 emit_post_landing_pad (gimple_seq *seq, eh_region region)
800 {
801   eh_landing_pad lp = region->landing_pads;
802   gimple x;
803 
804   if (lp == NULL)
805     lp = gen_eh_landing_pad (region);
806 
807   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
808   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
809 
810   x = gimple_build_label (lp->post_landing_pad);
811   gimple_seq_add_stmt (seq, x);
812 }
813 
814 /* Emit a RESX statement into SEQ for REGION.  */
815 
816 static void
emit_resx(gimple_seq * seq,eh_region region)817 emit_resx (gimple_seq *seq, eh_region region)
818 {
819   gimple x = gimple_build_resx (region->index);
820   gimple_seq_add_stmt (seq, x);
821   if (region->outer)
822     record_stmt_eh_region (region->outer, x);
823 }
824 
825 /* Emit an EH_DISPATCH statement into SEQ for REGION.  */
826 
827 static void
emit_eh_dispatch(gimple_seq * seq,eh_region region)828 emit_eh_dispatch (gimple_seq *seq, eh_region region)
829 {
830   gimple x = gimple_build_eh_dispatch (region->index);
831   gimple_seq_add_stmt (seq, x);
832 }
833 
834 /* Note that the current EH region may contain a throw, or a
835    call to a function which itself may contain a throw.  */
836 
837 static void
note_eh_region_may_contain_throw(eh_region region)838 note_eh_region_may_contain_throw (eh_region region)
839 {
840   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
841     {
842       if (region->type == ERT_MUST_NOT_THROW)
843 	break;
844       region = region->outer;
845       if (region == NULL)
846 	break;
847     }
848 }
849 
850 /* Check if REGION has been marked as containing a throw.  If REGION is
851    NULL, this predicate is false.  */
852 
853 static inline bool
eh_region_may_contain_throw(eh_region r)854 eh_region_may_contain_throw (eh_region r)
855 {
856   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
857 }
858 
859 /* We want to transform
860 	try { body; } catch { stuff; }
861    to
862 	normal_seqence:
863 	  body;
864 	  over:
865 	eh_seqence:
866 	  landing_pad:
867 	  stuff;
868 	  goto over;
869 
870    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
871    should be placed before the second operand, or NULL.  OVER is
872    an existing label that should be put at the exit, or NULL.  */
873 
874 static gimple_seq
frob_into_branch_around(gimple tp,eh_region region,tree over)875 frob_into_branch_around (gimple tp, eh_region region, tree over)
876 {
877   gimple x;
878   gimple_seq cleanup, result;
879   location_t loc = gimple_location (tp);
880 
881   cleanup = gimple_try_cleanup (tp);
882   result = gimple_try_eval (tp);
883 
884   if (region)
885     emit_post_landing_pad (&eh_seq, region);
886 
887   if (gimple_seq_may_fallthru (cleanup))
888     {
889       if (!over)
890 	over = create_artificial_label (loc);
891       x = gimple_build_goto (over);
892       gimple_set_location (x, loc);
893       gimple_seq_add_stmt (&cleanup, x);
894     }
895   gimple_seq_add_seq (&eh_seq, cleanup);
896 
897   if (over)
898     {
899       x = gimple_build_label (over);
900       gimple_seq_add_stmt (&result, x);
901     }
902   return result;
903 }
904 
905 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
906    Make sure to record all new labels found.  */
907 
908 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)909 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
910 			     location_t loc)
911 {
912   gimple region = NULL;
913   gimple_seq new_seq;
914   gimple_stmt_iterator gsi;
915 
916   new_seq = copy_gimple_seq_and_replace_locals (seq);
917 
918   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
919     {
920       gimple stmt = gsi_stmt (gsi);
921       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
922 	{
923 	  tree block = gimple_block (stmt);
924 	  gimple_set_location (stmt, loc);
925 	  gimple_set_block (stmt, block);
926 	}
927     }
928 
929   if (outer_state->tf)
930     region = outer_state->tf->try_finally_expr;
931   collect_finally_tree_1 (new_seq, region);
932 
933   return new_seq;
934 }
935 
936 /* A subroutine of lower_try_finally.  Create a fallthru label for
937    the given try_finally state.  The only tricky bit here is that
938    we have to make sure to record the label in our outer context.  */
939 
940 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)941 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
942 {
943   tree label = tf->fallthru_label;
944   treemple temp;
945 
946   if (!label)
947     {
948       label = create_artificial_label (gimple_location (tf->try_finally_expr));
949       tf->fallthru_label = label;
950       if (tf->outer->tf)
951         {
952           temp.t = label;
953           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
954         }
955     }
956   return label;
957 }
958 
959 /* A subroutine of lower_try_finally.  If FINALLY consits of a
960    GIMPLE_EH_ELSE node, return it.  */
961 
962 static inline gimple
get_eh_else(gimple_seq finally)963 get_eh_else (gimple_seq finally)
964 {
965   gimple x = gimple_seq_first_stmt (finally);
966   if (gimple_code (x) == GIMPLE_EH_ELSE)
967     {
968       gcc_assert (gimple_seq_singleton_p (finally));
969       return x;
970     }
971   return NULL;
972 }
973 
974 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
975    langhook returns non-null, then the language requires that the exception
976    path out of a try_finally be treated specially.  To wit: the code within
977    the finally block may not itself throw an exception.  We have two choices
978    here. First we can duplicate the finally block and wrap it in a
979    must_not_throw region.  Second, we can generate code like
980 
981 	try {
982 	  finally_block;
983 	} catch {
984 	  if (fintmp == eh_edge)
985 	    protect_cleanup_actions;
986 	}
987 
988    where "fintmp" is the temporary used in the switch statement generation
989    alternative considered below.  For the nonce, we always choose the first
990    option.
991 
992    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
993 
994 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)995 honor_protect_cleanup_actions (struct leh_state *outer_state,
996 			       struct leh_state *this_state,
997 			       struct leh_tf_state *tf)
998 {
999   tree protect_cleanup_actions;
1000   gimple_stmt_iterator gsi;
1001   bool finally_may_fallthru;
1002   gimple_seq finally;
1003   gimple x, eh_else;
1004 
1005   /* First check for nothing to do.  */
1006   if (lang_hooks.eh_protect_cleanup_actions == NULL)
1007     return;
1008   protect_cleanup_actions = lang_hooks.eh_protect_cleanup_actions ();
1009   if (protect_cleanup_actions == NULL)
1010     return;
1011 
1012   finally = gimple_try_cleanup (tf->top_p);
1013   eh_else = get_eh_else (finally);
1014 
1015   /* Duplicate the FINALLY block.  Only need to do this for try-finally,
1016      and not for cleanups.  If we've got an EH_ELSE, extract it now.  */
1017   if (eh_else)
1018     {
1019       finally = gimple_eh_else_e_body (eh_else);
1020       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1021     }
1022   else if (this_state)
1023     finally = lower_try_finally_dup_block (finally, outer_state,
1024 	gimple_location (tf->try_finally_expr));
1025   finally_may_fallthru = gimple_seq_may_fallthru (finally);
1026 
1027   /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1028      set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1029      to be in an enclosing scope, but needs to be implemented at this level
1030      to avoid a nesting violation (see wrap_temporary_cleanups in
1031      cp/decl.c).  Since it's logically at an outer level, we should call
1032      terminate before we get to it, so strip it away before adding the
1033      MUST_NOT_THROW filter.  */
1034   gsi = gsi_start (finally);
1035   x = gsi_stmt (gsi);
1036   if (gimple_code (x) == GIMPLE_TRY
1037       && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1038       && gimple_try_catch_is_cleanup (x))
1039     {
1040       gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1041       gsi_remove (&gsi, false);
1042     }
1043 
1044   /* Wrap the block with protect_cleanup_actions as the action.  */
1045   x = gimple_build_eh_must_not_throw (protect_cleanup_actions);
1046   x = gimple_build_try (finally, gimple_seq_alloc_with_stmt (x),
1047 			GIMPLE_TRY_CATCH);
1048   finally = lower_eh_must_not_throw (outer_state, x);
1049 
1050   /* Drop all of this into the exception sequence.  */
1051   emit_post_landing_pad (&eh_seq, tf->region);
1052   gimple_seq_add_seq (&eh_seq, finally);
1053   if (finally_may_fallthru)
1054     emit_resx (&eh_seq, tf->region);
1055 
1056   /* Having now been handled, EH isn't to be considered with
1057      the rest of the outgoing edges.  */
1058   tf->may_throw = false;
1059 }
1060 
1061 /* A subroutine of lower_try_finally.  We have determined that there is
1062    no fallthru edge out of the finally block.  This means that there is
1063    no outgoing edge corresponding to any incoming edge.  Restructure the
1064    try_finally node for this special case.  */
1065 
1066 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1067 lower_try_finally_nofallthru (struct leh_state *state,
1068 			      struct leh_tf_state *tf)
1069 {
1070   tree lab;
1071   gimple x, eh_else;
1072   gimple_seq finally;
1073   struct goto_queue_node *q, *qe;
1074 
1075   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1076 
1077   /* We expect that tf->top_p is a GIMPLE_TRY. */
1078   finally = gimple_try_cleanup (tf->top_p);
1079   tf->top_p_seq = gimple_try_eval (tf->top_p);
1080 
1081   x = gimple_build_label (lab);
1082   gimple_seq_add_stmt (&tf->top_p_seq, x);
1083 
1084   q = tf->goto_queue;
1085   qe = q + tf->goto_queue_active;
1086   for (; q < qe; ++q)
1087     if (q->index < 0)
1088       do_return_redirection (q, lab, NULL);
1089     else
1090       do_goto_redirection (q, lab, NULL, tf);
1091 
1092   replace_goto_queue (tf);
1093 
1094   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1095   eh_else = get_eh_else (finally);
1096   if (eh_else)
1097     {
1098       finally = gimple_eh_else_n_body (eh_else);
1099       lower_eh_constructs_1 (state, &finally);
1100       gimple_seq_add_seq (&tf->top_p_seq, finally);
1101 
1102       if (tf->may_throw)
1103 	{
1104 	  finally = gimple_eh_else_e_body (eh_else);
1105 	  lower_eh_constructs_1 (state, &finally);
1106 
1107 	  emit_post_landing_pad (&eh_seq, tf->region);
1108 	  gimple_seq_add_seq (&eh_seq, finally);
1109 	}
1110     }
1111   else
1112     {
1113       lower_eh_constructs_1 (state, &finally);
1114       gimple_seq_add_seq (&tf->top_p_seq, finally);
1115 
1116       if (tf->may_throw)
1117 	{
1118 	  emit_post_landing_pad (&eh_seq, tf->region);
1119 
1120 	  x = gimple_build_goto (lab);
1121 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1122 	  gimple_seq_add_stmt (&eh_seq, x);
1123 	}
1124     }
1125 }
1126 
1127 /* A subroutine of lower_try_finally.  We have determined that there is
1128    exactly one destination of the finally block.  Restructure the
1129    try_finally node for this special case.  */
1130 
1131 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1132 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1133 {
1134   struct goto_queue_node *q, *qe;
1135   gimple x;
1136   gimple_seq finally;
1137   gimple_stmt_iterator gsi;
1138   tree finally_label;
1139   location_t loc = gimple_location (tf->try_finally_expr);
1140 
1141   finally = gimple_try_cleanup (tf->top_p);
1142   tf->top_p_seq = gimple_try_eval (tf->top_p);
1143 
1144   /* Since there's only one destination, and the destination edge can only
1145      either be EH or non-EH, that implies that all of our incoming edges
1146      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1147   x = get_eh_else (finally);
1148   if (x)
1149     {
1150       if (tf->may_throw)
1151 	finally = gimple_eh_else_e_body (x);
1152       else
1153 	finally = gimple_eh_else_n_body (x);
1154     }
1155 
1156   lower_eh_constructs_1 (state, &finally);
1157 
1158   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1159     {
1160       gimple stmt = gsi_stmt (gsi);
1161       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1162 	{
1163 	  tree block = gimple_block (stmt);
1164 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1165 	  gimple_set_block (stmt, block);
1166 	}
1167     }
1168 
1169   if (tf->may_throw)
1170     {
1171       /* Only reachable via the exception edge.  Add the given label to
1172          the head of the FINALLY block.  Append a RESX at the end.  */
1173       emit_post_landing_pad (&eh_seq, tf->region);
1174       gimple_seq_add_seq (&eh_seq, finally);
1175       emit_resx (&eh_seq, tf->region);
1176       return;
1177     }
1178 
1179   if (tf->may_fallthru)
1180     {
1181       /* Only reachable via the fallthru edge.  Do nothing but let
1182 	 the two blocks run together; we'll fall out the bottom.  */
1183       gimple_seq_add_seq (&tf->top_p_seq, finally);
1184       return;
1185     }
1186 
1187   finally_label = create_artificial_label (loc);
1188   x = gimple_build_label (finally_label);
1189   gimple_seq_add_stmt (&tf->top_p_seq, x);
1190 
1191   gimple_seq_add_seq (&tf->top_p_seq, finally);
1192 
1193   q = tf->goto_queue;
1194   qe = q + tf->goto_queue_active;
1195 
1196   if (tf->may_return)
1197     {
1198       /* Reachable by return expressions only.  Redirect them.  */
1199       for (; q < qe; ++q)
1200 	do_return_redirection (q, finally_label, NULL);
1201       replace_goto_queue (tf);
1202     }
1203   else
1204     {
1205       /* Reachable by goto expressions only.  Redirect them.  */
1206       for (; q < qe; ++q)
1207 	do_goto_redirection (q, finally_label, NULL, tf);
1208       replace_goto_queue (tf);
1209 
1210       if (tf->dest_array[0] == tf->fallthru_label)
1211 	{
1212 	  /* Reachable by goto to fallthru label only.  Redirect it
1213 	     to the new label (already created, sadly), and do not
1214 	     emit the final branch out, or the fallthru label.  */
1215 	  tf->fallthru_label = NULL;
1216 	  return;
1217 	}
1218     }
1219 
1220   /* Place the original return/goto to the original destination
1221      immediately after the finally block. */
1222   x = tf->goto_queue[0].cont_stmt;
1223   gimple_seq_add_stmt (&tf->top_p_seq, x);
1224   maybe_record_in_goto_queue (state, x);
1225 }
1226 
1227 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1228    and outgoing from the finally block.  Implement this by duplicating the
1229    finally block for every destination.  */
1230 
1231 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1232 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1233 {
1234   gimple_seq finally;
1235   gimple_seq new_stmt;
1236   gimple_seq seq;
1237   gimple x, eh_else;
1238   tree tmp;
1239   location_t tf_loc = gimple_location (tf->try_finally_expr);
1240 
1241   finally = gimple_try_cleanup (tf->top_p);
1242 
1243   /* Notice EH_ELSE, and simplify some of the remaining code
1244      by considering FINALLY to be the normal return path only.  */
1245   eh_else = get_eh_else (finally);
1246   if (eh_else)
1247     finally = gimple_eh_else_n_body (eh_else);
1248 
1249   tf->top_p_seq = gimple_try_eval (tf->top_p);
1250   new_stmt = NULL;
1251 
1252   if (tf->may_fallthru)
1253     {
1254       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1255       lower_eh_constructs_1 (state, &seq);
1256       gimple_seq_add_seq (&new_stmt, seq);
1257 
1258       tmp = lower_try_finally_fallthru_label (tf);
1259       x = gimple_build_goto (tmp);
1260       gimple_set_location (x, tf_loc);
1261       gimple_seq_add_stmt (&new_stmt, x);
1262     }
1263 
1264   if (tf->may_throw)
1265     {
1266       /* We don't need to copy the EH path of EH_ELSE,
1267 	 since it is only emitted once.  */
1268       if (eh_else)
1269 	seq = gimple_eh_else_e_body (eh_else);
1270       else
1271 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1272       lower_eh_constructs_1 (state, &seq);
1273 
1274       emit_post_landing_pad (&eh_seq, tf->region);
1275       gimple_seq_add_seq (&eh_seq, seq);
1276       emit_resx (&eh_seq, tf->region);
1277     }
1278 
1279   if (tf->goto_queue)
1280     {
1281       struct goto_queue_node *q, *qe;
1282       int return_index, index;
1283       struct labels_s
1284       {
1285 	struct goto_queue_node *q;
1286 	tree label;
1287       } *labels;
1288 
1289       return_index = tf->dest_array.length ();
1290       labels = XCNEWVEC (struct labels_s, return_index + 1);
1291 
1292       q = tf->goto_queue;
1293       qe = q + tf->goto_queue_active;
1294       for (; q < qe; q++)
1295 	{
1296 	  index = q->index < 0 ? return_index : q->index;
1297 
1298 	  if (!labels[index].q)
1299 	    labels[index].q = q;
1300 	}
1301 
1302       for (index = 0; index < return_index + 1; index++)
1303 	{
1304 	  tree lab;
1305 
1306 	  q = labels[index].q;
1307 	  if (! q)
1308 	    continue;
1309 
1310 	  lab = labels[index].label
1311 	    = create_artificial_label (tf_loc);
1312 
1313 	  if (index == return_index)
1314 	    do_return_redirection (q, lab, NULL);
1315 	  else
1316 	    do_goto_redirection (q, lab, NULL, tf);
1317 
1318 	  x = gimple_build_label (lab);
1319           gimple_seq_add_stmt (&new_stmt, x);
1320 
1321 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1322 	  lower_eh_constructs_1 (state, &seq);
1323           gimple_seq_add_seq (&new_stmt, seq);
1324 
1325           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1326 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1327 	}
1328 
1329       for (q = tf->goto_queue; q < qe; q++)
1330 	{
1331 	  tree lab;
1332 
1333 	  index = q->index < 0 ? return_index : q->index;
1334 
1335 	  if (labels[index].q == q)
1336 	    continue;
1337 
1338 	  lab = labels[index].label;
1339 
1340 	  if (index == return_index)
1341 	    do_return_redirection (q, lab, NULL);
1342 	  else
1343 	    do_goto_redirection (q, lab, NULL, tf);
1344 	}
1345 
1346       replace_goto_queue (tf);
1347       free (labels);
1348     }
1349 
1350   /* Need to link new stmts after running replace_goto_queue due
1351      to not wanting to process the same goto stmts twice.  */
1352   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1353 }
1354 
1355 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1356    and outgoing from the finally block.  Implement this by instrumenting
1357    each incoming edge and creating a switch statement at the end of the
1358    finally block that branches to the appropriate destination.  */
1359 
1360 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1361 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1362 {
1363   struct goto_queue_node *q, *qe;
1364   tree finally_tmp, finally_label;
1365   int return_index, eh_index, fallthru_index;
1366   int nlabels, ndests, j, last_case_index;
1367   tree last_case;
1368   vec<tree> case_label_vec;
1369   gimple_seq switch_body = NULL;
1370   gimple x, eh_else;
1371   tree tmp;
1372   gimple switch_stmt;
1373   gimple_seq finally;
1374   struct pointer_map_t *cont_map = NULL;
1375   /* The location of the TRY_FINALLY stmt.  */
1376   location_t tf_loc = gimple_location (tf->try_finally_expr);
1377   /* The location of the finally block.  */
1378   location_t finally_loc;
1379 
1380   finally = gimple_try_cleanup (tf->top_p);
1381   eh_else = get_eh_else (finally);
1382 
1383   /* Mash the TRY block to the head of the chain.  */
1384   tf->top_p_seq = gimple_try_eval (tf->top_p);
1385 
1386   /* The location of the finally is either the last stmt in the finally
1387      block or the location of the TRY_FINALLY itself.  */
1388   x = gimple_seq_last_stmt (finally);
1389   finally_loc = x ? gimple_location (x) : tf_loc;
1390 
1391   /* Prepare for switch statement generation.  */
1392   nlabels = tf->dest_array.length ();
1393   return_index = nlabels;
1394   eh_index = return_index + tf->may_return;
1395   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1396   ndests = fallthru_index + tf->may_fallthru;
1397 
1398   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1399   finally_label = create_artificial_label (finally_loc);
1400 
1401   /* We use vec::quick_push on case_label_vec throughout this function,
1402      since we know the size in advance and allocate precisely as muce
1403      space as needed.  */
1404   case_label_vec.create (ndests);
1405   last_case = NULL;
1406   last_case_index = 0;
1407 
1408   /* Begin inserting code for getting to the finally block.  Things
1409      are done in this order to correspond to the sequence the code is
1410      laid out.  */
1411 
1412   if (tf->may_fallthru)
1413     {
1414       x = gimple_build_assign (finally_tmp,
1415 			       build_int_cst (integer_type_node,
1416 					      fallthru_index));
1417       gimple_seq_add_stmt (&tf->top_p_seq, x);
1418 
1419       tmp = build_int_cst (integer_type_node, fallthru_index);
1420       last_case = build_case_label (tmp, NULL,
1421 				    create_artificial_label (tf_loc));
1422       case_label_vec.quick_push (last_case);
1423       last_case_index++;
1424 
1425       x = gimple_build_label (CASE_LABEL (last_case));
1426       gimple_seq_add_stmt (&switch_body, x);
1427 
1428       tmp = lower_try_finally_fallthru_label (tf);
1429       x = gimple_build_goto (tmp);
1430       gimple_set_location (x, tf_loc);
1431       gimple_seq_add_stmt (&switch_body, x);
1432     }
1433 
1434   /* For EH_ELSE, emit the exception path (plus resx) now, then
1435      subsequently we only need consider the normal path.  */
1436   if (eh_else)
1437     {
1438       if (tf->may_throw)
1439 	{
1440 	  finally = gimple_eh_else_e_body (eh_else);
1441 	  lower_eh_constructs_1 (state, &finally);
1442 
1443 	  emit_post_landing_pad (&eh_seq, tf->region);
1444 	  gimple_seq_add_seq (&eh_seq, finally);
1445 	  emit_resx (&eh_seq, tf->region);
1446 	}
1447 
1448       finally = gimple_eh_else_n_body (eh_else);
1449     }
1450   else if (tf->may_throw)
1451     {
1452       emit_post_landing_pad (&eh_seq, tf->region);
1453 
1454       x = gimple_build_assign (finally_tmp,
1455 			       build_int_cst (integer_type_node, eh_index));
1456       gimple_seq_add_stmt (&eh_seq, x);
1457 
1458       x = gimple_build_goto (finally_label);
1459       gimple_set_location (x, tf_loc);
1460       gimple_seq_add_stmt (&eh_seq, x);
1461 
1462       tmp = build_int_cst (integer_type_node, eh_index);
1463       last_case = build_case_label (tmp, NULL,
1464 				    create_artificial_label (tf_loc));
1465       case_label_vec.quick_push (last_case);
1466       last_case_index++;
1467 
1468       x = gimple_build_label (CASE_LABEL (last_case));
1469       gimple_seq_add_stmt (&eh_seq, x);
1470       emit_resx (&eh_seq, tf->region);
1471     }
1472 
1473   x = gimple_build_label (finally_label);
1474   gimple_seq_add_stmt (&tf->top_p_seq, x);
1475 
1476   lower_eh_constructs_1 (state, &finally);
1477   gimple_seq_add_seq (&tf->top_p_seq, finally);
1478 
1479   /* Redirect each incoming goto edge.  */
1480   q = tf->goto_queue;
1481   qe = q + tf->goto_queue_active;
1482   j = last_case_index + tf->may_return;
1483   /* Prepare the assignments to finally_tmp that are executed upon the
1484      entrance through a particular edge. */
1485   for (; q < qe; ++q)
1486     {
1487       gimple_seq mod = NULL;
1488       int switch_id;
1489       unsigned int case_index;
1490 
1491       if (q->index < 0)
1492 	{
1493 	  x = gimple_build_assign (finally_tmp,
1494 				   build_int_cst (integer_type_node,
1495 						  return_index));
1496 	  gimple_seq_add_stmt (&mod, x);
1497 	  do_return_redirection (q, finally_label, mod);
1498 	  switch_id = return_index;
1499 	}
1500       else
1501 	{
1502 	  x = gimple_build_assign (finally_tmp,
1503 				   build_int_cst (integer_type_node, q->index));
1504 	  gimple_seq_add_stmt (&mod, x);
1505 	  do_goto_redirection (q, finally_label, mod, tf);
1506 	  switch_id = q->index;
1507 	}
1508 
1509       case_index = j + q->index;
1510       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1511         {
1512           tree case_lab;
1513           void **slot;
1514 	  tmp = build_int_cst (integer_type_node, switch_id);
1515           case_lab = build_case_label (tmp, NULL,
1516 				       create_artificial_label (tf_loc));
1517           /* We store the cont_stmt in the pointer map, so that we can recover
1518              it in the loop below.  */
1519           if (!cont_map)
1520             cont_map = pointer_map_create ();
1521           slot = pointer_map_insert (cont_map, case_lab);
1522           *slot = q->cont_stmt;
1523           case_label_vec.quick_push (case_lab);
1524         }
1525     }
1526   for (j = last_case_index; j < last_case_index + nlabels; j++)
1527     {
1528       gimple cont_stmt;
1529       void **slot;
1530 
1531       last_case = case_label_vec[j];
1532 
1533       gcc_assert (last_case);
1534       gcc_assert (cont_map);
1535 
1536       slot = pointer_map_contains (cont_map, last_case);
1537       gcc_assert (slot);
1538       cont_stmt = *(gimple *) slot;
1539 
1540       x = gimple_build_label (CASE_LABEL (last_case));
1541       gimple_seq_add_stmt (&switch_body, x);
1542       gimple_seq_add_stmt (&switch_body, cont_stmt);
1543       maybe_record_in_goto_queue (state, cont_stmt);
1544     }
1545   if (cont_map)
1546     pointer_map_destroy (cont_map);
1547 
1548   replace_goto_queue (tf);
1549 
1550   /* Make sure that the last case is the default label, as one is required.
1551      Then sort the labels, which is also required in GIMPLE.  */
1552   CASE_LOW (last_case) = NULL;
1553   sort_case_labels (case_label_vec);
1554 
1555   /* Build the switch statement, setting last_case to be the default
1556      label.  */
1557   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1558 				     case_label_vec);
1559   gimple_set_location (switch_stmt, finally_loc);
1560 
1561   /* Need to link SWITCH_STMT after running replace_goto_queue
1562      due to not wanting to process the same goto stmts twice.  */
1563   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1564   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1565 }
1566 
1567 /* Decide whether or not we are going to duplicate the finally block.
1568    There are several considerations.
1569 
1570    First, if this is Java, then the finally block contains code
1571    written by the user.  It has line numbers associated with it,
1572    so duplicating the block means it's difficult to set a breakpoint.
1573    Since controlling code generation via -g is verboten, we simply
1574    never duplicate code without optimization.
1575 
1576    Second, we'd like to prevent egregious code growth.  One way to
1577    do this is to estimate the size of the finally block, multiply
1578    that by the number of copies we'd need to make, and compare against
1579    the estimate of the size of the switch machinery we'd have to add.  */
1580 
1581 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1582 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1583 {
1584   int f_estimate, sw_estimate;
1585   gimple eh_else;
1586 
1587   /* If there's an EH_ELSE involved, the exception path is separate
1588      and really doesn't come into play for this computation.  */
1589   eh_else = get_eh_else (finally);
1590   if (eh_else)
1591     {
1592       ndests -= may_throw;
1593       finally = gimple_eh_else_n_body (eh_else);
1594     }
1595 
1596   if (!optimize)
1597     {
1598       gimple_stmt_iterator gsi;
1599 
1600       if (ndests == 1)
1601         return true;
1602 
1603       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1604 	{
1605 	  gimple stmt = gsi_stmt (gsi);
1606 	  if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt))
1607 	    return false;
1608 	}
1609       return true;
1610     }
1611 
1612   /* Finally estimate N times, plus N gotos.  */
1613   f_estimate = count_insns_seq (finally, &eni_size_weights);
1614   f_estimate = (f_estimate + 1) * ndests;
1615 
1616   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1617   sw_estimate = 10 + 2 * ndests;
1618 
1619   /* Optimize for size clearly wants our best guess.  */
1620   if (optimize_function_for_size_p (cfun))
1621     return f_estimate < sw_estimate;
1622 
1623   /* ??? These numbers are completely made up so far.  */
1624   if (optimize > 1)
1625     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1626   else
1627     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1628 }
1629 
1630 /* REG is the enclosing region for a possible cleanup region, or the region
1631    itself.  Returns TRUE if such a region would be unreachable.
1632 
1633    Cleanup regions within a must-not-throw region aren't actually reachable
1634    even if there are throwing stmts within them, because the personality
1635    routine will call terminate before unwinding.  */
1636 
1637 static bool
cleanup_is_dead_in(eh_region reg)1638 cleanup_is_dead_in (eh_region reg)
1639 {
1640   while (reg && reg->type == ERT_CLEANUP)
1641     reg = reg->outer;
1642   return (reg && reg->type == ERT_MUST_NOT_THROW);
1643 }
1644 
1645 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1646    to a sequence of labels and blocks, plus the exception region trees
1647    that record all the magic.  This is complicated by the need to
1648    arrange for the FINALLY block to be executed on all exits.  */
1649 
1650 static gimple_seq
lower_try_finally(struct leh_state * state,gimple tp)1651 lower_try_finally (struct leh_state *state, gimple tp)
1652 {
1653   struct leh_tf_state this_tf;
1654   struct leh_state this_state;
1655   int ndests;
1656   gimple_seq old_eh_seq;
1657 
1658   /* Process the try block.  */
1659 
1660   memset (&this_tf, 0, sizeof (this_tf));
1661   this_tf.try_finally_expr = tp;
1662   this_tf.top_p = tp;
1663   this_tf.outer = state;
1664   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1665     {
1666       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1667       this_state.cur_region = this_tf.region;
1668     }
1669   else
1670     {
1671       this_tf.region = NULL;
1672       this_state.cur_region = state->cur_region;
1673     }
1674 
1675   this_state.ehp_region = state->ehp_region;
1676   this_state.tf = &this_tf;
1677 
1678   old_eh_seq = eh_seq;
1679   eh_seq = NULL;
1680 
1681   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1682 
1683   /* Determine if the try block is escaped through the bottom.  */
1684   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1685 
1686   /* Determine if any exceptions are possible within the try block.  */
1687   if (this_tf.region)
1688     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1689   if (this_tf.may_throw)
1690     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1691 
1692   /* Determine how many edges (still) reach the finally block.  Or rather,
1693      how many destinations are reached by the finally block.  Use this to
1694      determine how we process the finally block itself.  */
1695 
1696   ndests = this_tf.dest_array.length ();
1697   ndests += this_tf.may_fallthru;
1698   ndests += this_tf.may_return;
1699   ndests += this_tf.may_throw;
1700 
1701   /* If the FINALLY block is not reachable, dike it out.  */
1702   if (ndests == 0)
1703     {
1704       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1705       gimple_try_set_cleanup (tp, NULL);
1706     }
1707   /* If the finally block doesn't fall through, then any destination
1708      we might try to impose there isn't reached either.  There may be
1709      some minor amount of cleanup and redirection still needed.  */
1710   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1711     lower_try_finally_nofallthru (state, &this_tf);
1712 
1713   /* We can easily special-case redirection to a single destination.  */
1714   else if (ndests == 1)
1715     lower_try_finally_onedest (state, &this_tf);
1716   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1717 				    gimple_try_cleanup (tp)))
1718     lower_try_finally_copy (state, &this_tf);
1719   else
1720     lower_try_finally_switch (state, &this_tf);
1721 
1722   /* If someone requested we add a label at the end of the transformed
1723      block, do so.  */
1724   if (this_tf.fallthru_label)
1725     {
1726       /* This must be reached only if ndests == 0. */
1727       gimple x = gimple_build_label (this_tf.fallthru_label);
1728       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1729     }
1730 
1731   this_tf.dest_array.release ();
1732   free (this_tf.goto_queue);
1733   if (this_tf.goto_queue_map)
1734     pointer_map_destroy (this_tf.goto_queue_map);
1735 
1736   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1737      If there was no old eh_seq, then the append is trivially already done.  */
1738   if (old_eh_seq)
1739     {
1740       if (eh_seq == NULL)
1741 	eh_seq = old_eh_seq;
1742       else
1743 	{
1744 	  gimple_seq new_eh_seq = eh_seq;
1745 	  eh_seq = old_eh_seq;
1746 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1747 	}
1748     }
1749 
1750   return this_tf.top_p_seq;
1751 }
1752 
1753 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1754    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1755    exception region trees that records all the magic.  */
1756 
1757 static gimple_seq
lower_catch(struct leh_state * state,gimple tp)1758 lower_catch (struct leh_state *state, gimple tp)
1759 {
1760   eh_region try_region = NULL;
1761   struct leh_state this_state = *state;
1762   gimple_stmt_iterator gsi;
1763   tree out_label;
1764   gimple_seq new_seq, cleanup;
1765   gimple x;
1766   location_t try_catch_loc = gimple_location (tp);
1767 
1768   if (flag_exceptions)
1769     {
1770       try_region = gen_eh_region_try (state->cur_region);
1771       this_state.cur_region = try_region;
1772     }
1773 
1774   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1775 
1776   if (!eh_region_may_contain_throw (try_region))
1777     return gimple_try_eval (tp);
1778 
1779   new_seq = NULL;
1780   emit_eh_dispatch (&new_seq, try_region);
1781   emit_resx (&new_seq, try_region);
1782 
1783   this_state.cur_region = state->cur_region;
1784   this_state.ehp_region = try_region;
1785 
1786   out_label = NULL;
1787   cleanup = gimple_try_cleanup (tp);
1788   for (gsi = gsi_start (cleanup);
1789        !gsi_end_p (gsi);
1790        gsi_next (&gsi))
1791     {
1792       eh_catch c;
1793       gimple gcatch;
1794       gimple_seq handler;
1795 
1796       gcatch = gsi_stmt (gsi);
1797       c = gen_eh_region_catch (try_region, gimple_catch_types (gcatch));
1798 
1799       handler = gimple_catch_handler (gcatch);
1800       lower_eh_constructs_1 (&this_state, &handler);
1801 
1802       c->label = create_artificial_label (UNKNOWN_LOCATION);
1803       x = gimple_build_label (c->label);
1804       gimple_seq_add_stmt (&new_seq, x);
1805 
1806       gimple_seq_add_seq (&new_seq, handler);
1807 
1808       if (gimple_seq_may_fallthru (new_seq))
1809 	{
1810 	  if (!out_label)
1811 	    out_label = create_artificial_label (try_catch_loc);
1812 
1813 	  x = gimple_build_goto (out_label);
1814 	  gimple_seq_add_stmt (&new_seq, x);
1815 	}
1816       if (!c->type_list)
1817 	break;
1818     }
1819 
1820   gimple_try_set_cleanup (tp, new_seq);
1821 
1822   return frob_into_branch_around (tp, try_region, out_label);
1823 }
1824 
1825 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1826    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1827    region trees that record all the magic.  */
1828 
1829 static gimple_seq
lower_eh_filter(struct leh_state * state,gimple tp)1830 lower_eh_filter (struct leh_state *state, gimple tp)
1831 {
1832   struct leh_state this_state = *state;
1833   eh_region this_region = NULL;
1834   gimple inner, x;
1835   gimple_seq new_seq;
1836 
1837   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1838 
1839   if (flag_exceptions)
1840     {
1841       this_region = gen_eh_region_allowed (state->cur_region,
1842 				           gimple_eh_filter_types (inner));
1843       this_state.cur_region = this_region;
1844     }
1845 
1846   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1847 
1848   if (!eh_region_may_contain_throw (this_region))
1849     return gimple_try_eval (tp);
1850 
1851   new_seq = NULL;
1852   this_state.cur_region = state->cur_region;
1853   this_state.ehp_region = this_region;
1854 
1855   emit_eh_dispatch (&new_seq, this_region);
1856   emit_resx (&new_seq, this_region);
1857 
1858   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1859   x = gimple_build_label (this_region->u.allowed.label);
1860   gimple_seq_add_stmt (&new_seq, x);
1861 
1862   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1863   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1864 
1865   gimple_try_set_cleanup (tp, new_seq);
1866 
1867   return frob_into_branch_around (tp, this_region, NULL);
1868 }
1869 
1870 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1871    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1872    plus the exception region trees that record all the magic.  */
1873 
1874 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gimple tp)1875 lower_eh_must_not_throw (struct leh_state *state, gimple tp)
1876 {
1877   struct leh_state this_state = *state;
1878 
1879   if (flag_exceptions)
1880     {
1881       gimple inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1882       eh_region this_region;
1883 
1884       this_region = gen_eh_region_must_not_throw (state->cur_region);
1885       this_region->u.must_not_throw.failure_decl
1886 	= gimple_eh_must_not_throw_fndecl (inner);
1887       this_region->u.must_not_throw.failure_loc
1888 	= LOCATION_LOCUS (gimple_location (tp));
1889 
1890       /* In order to get mangling applied to this decl, we must mark it
1891 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1892 	 needs to happen.  */
1893       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1894 
1895       this_state.cur_region = this_region;
1896     }
1897 
1898   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1899 
1900   return gimple_try_eval (tp);
1901 }
1902 
1903 /* Implement a cleanup expression.  This is similar to try-finally,
1904    except that we only execute the cleanup block for exception edges.  */
1905 
1906 static gimple_seq
lower_cleanup(struct leh_state * state,gimple tp)1907 lower_cleanup (struct leh_state *state, gimple tp)
1908 {
1909   struct leh_state this_state = *state;
1910   eh_region this_region = NULL;
1911   struct leh_tf_state fake_tf;
1912   gimple_seq result;
1913   bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1914 
1915   if (flag_exceptions && !cleanup_dead)
1916     {
1917       this_region = gen_eh_region_cleanup (state->cur_region);
1918       this_state.cur_region = this_region;
1919     }
1920 
1921   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1922 
1923   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1924     return gimple_try_eval (tp);
1925 
1926   /* Build enough of a try-finally state so that we can reuse
1927      honor_protect_cleanup_actions.  */
1928   memset (&fake_tf, 0, sizeof (fake_tf));
1929   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1930   fake_tf.outer = state;
1931   fake_tf.region = this_region;
1932   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1933   fake_tf.may_throw = true;
1934 
1935   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1936 
1937   if (fake_tf.may_throw)
1938     {
1939       /* In this case honor_protect_cleanup_actions had nothing to do,
1940 	 and we should process this normally.  */
1941       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1942       result = frob_into_branch_around (tp, this_region,
1943                                         fake_tf.fallthru_label);
1944     }
1945   else
1946     {
1947       /* In this case honor_protect_cleanup_actions did nearly all of
1948 	 the work.  All we have left is to append the fallthru_label.  */
1949 
1950       result = gimple_try_eval (tp);
1951       if (fake_tf.fallthru_label)
1952 	{
1953 	  gimple x = gimple_build_label (fake_tf.fallthru_label);
1954 	  gimple_seq_add_stmt (&result, x);
1955 	}
1956     }
1957   return result;
1958 }
1959 
1960 /* Main loop for lowering eh constructs. Also moves gsi to the next
1961    statement. */
1962 
1963 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)1964 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1965 {
1966   gimple_seq replace;
1967   gimple x;
1968   gimple stmt = gsi_stmt (*gsi);
1969 
1970   switch (gimple_code (stmt))
1971     {
1972     case GIMPLE_CALL:
1973       {
1974 	tree fndecl = gimple_call_fndecl (stmt);
1975 	tree rhs, lhs;
1976 
1977 	if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1978 	  switch (DECL_FUNCTION_CODE (fndecl))
1979 	    {
1980 	    case BUILT_IN_EH_POINTER:
1981 	      /* The front end may have generated a call to
1982 		 __builtin_eh_pointer (0) within a catch region.  Replace
1983 		 this zero argument with the current catch region number.  */
1984 	      if (state->ehp_region)
1985 		{
1986 		  tree nr = build_int_cst (integer_type_node,
1987 					   state->ehp_region->index);
1988 		  gimple_call_set_arg (stmt, 0, nr);
1989 		}
1990 	      else
1991 		{
1992 		  /* The user has dome something silly.  Remove it.  */
1993 		  rhs = null_pointer_node;
1994 		  goto do_replace;
1995 		}
1996 	      break;
1997 
1998 	    case BUILT_IN_EH_FILTER:
1999 	      /* ??? This should never appear, but since it's a builtin it
2000 		 is accessible to abuse by users.  Just remove it and
2001 		 replace the use with the arbitrary value zero.  */
2002 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2003 	    do_replace:
2004 	      lhs = gimple_call_lhs (stmt);
2005 	      x = gimple_build_assign (lhs, rhs);
2006 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2007 	      /* FALLTHRU */
2008 
2009 	    case BUILT_IN_EH_COPY_VALUES:
2010 	      /* Likewise this should not appear.  Remove it.  */
2011 	      gsi_remove (gsi, true);
2012 	      return;
2013 
2014 	    default:
2015 	      break;
2016 	    }
2017       }
2018       /* FALLTHRU */
2019 
2020     case GIMPLE_ASSIGN:
2021       /* If the stmt can throw use a new temporary for the assignment
2022          to a LHS.  This makes sure the old value of the LHS is
2023 	 available on the EH edge.  Only do so for statements that
2024 	 potentially fall through (no noreturn calls e.g.), otherwise
2025 	 this new assignment might create fake fallthru regions.  */
2026       if (stmt_could_throw_p (stmt)
2027 	  && gimple_has_lhs (stmt)
2028 	  && gimple_stmt_may_fallthru (stmt)
2029 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2030 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2031 	{
2032 	  tree lhs = gimple_get_lhs (stmt);
2033 	  tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2034 	  gimple s = gimple_build_assign (lhs, tmp);
2035 	  gimple_set_location (s, gimple_location (stmt));
2036 	  gimple_set_block (s, gimple_block (stmt));
2037 	  gimple_set_lhs (stmt, tmp);
2038 	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2039 	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2040 	    DECL_GIMPLE_REG_P (tmp) = 1;
2041 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2042 	}
2043       /* Look for things that can throw exceptions, and record them.  */
2044       if (state->cur_region && stmt_could_throw_p (stmt))
2045 	{
2046 	  record_stmt_eh_region (state->cur_region, stmt);
2047 	  note_eh_region_may_contain_throw (state->cur_region);
2048 	}
2049       break;
2050 
2051     case GIMPLE_COND:
2052     case GIMPLE_GOTO:
2053     case GIMPLE_RETURN:
2054       maybe_record_in_goto_queue (state, stmt);
2055       break;
2056 
2057     case GIMPLE_SWITCH:
2058       verify_norecord_switch_expr (state, stmt);
2059       break;
2060 
2061     case GIMPLE_TRY:
2062       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
2063 	replace = lower_try_finally (state, stmt);
2064       else
2065 	{
2066 	  x = gimple_seq_first_stmt (gimple_try_cleanup (stmt));
2067 	  if (!x)
2068 	    {
2069 	      replace = gimple_try_eval (stmt);
2070 	      lower_eh_constructs_1 (state, &replace);
2071 	    }
2072 	  else
2073 	    switch (gimple_code (x))
2074 	      {
2075 		case GIMPLE_CATCH:
2076 		    replace = lower_catch (state, stmt);
2077 		    break;
2078 		case GIMPLE_EH_FILTER:
2079 		    replace = lower_eh_filter (state, stmt);
2080 		    break;
2081 		case GIMPLE_EH_MUST_NOT_THROW:
2082 		    replace = lower_eh_must_not_throw (state, stmt);
2083 		    break;
2084 		case GIMPLE_EH_ELSE:
2085 		    /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2086 		    gcc_unreachable ();
2087 		default:
2088 		    replace = lower_cleanup (state, stmt);
2089 		    break;
2090 	      }
2091 	}
2092 
2093       /* Remove the old stmt and insert the transformed sequence
2094 	 instead. */
2095       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2096       gsi_remove (gsi, true);
2097 
2098       /* Return since we don't want gsi_next () */
2099       return;
2100 
2101     case GIMPLE_EH_ELSE:
2102       /* We should be eliminating this in lower_try_finally et al.  */
2103       gcc_unreachable ();
2104 
2105     default:
2106       /* A type, a decl, or some kind of statement that we're not
2107 	 interested in.  Don't walk them.  */
2108       break;
2109     }
2110 
2111   gsi_next (gsi);
2112 }
2113 
2114 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2115 
2116 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2117 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2118 {
2119   gimple_stmt_iterator gsi;
2120   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2121     lower_eh_constructs_2 (state, &gsi);
2122 }
2123 
2124 static unsigned int
lower_eh_constructs(void)2125 lower_eh_constructs (void)
2126 {
2127   struct leh_state null_state;
2128   gimple_seq bodyp;
2129 
2130   bodyp = gimple_body (current_function_decl);
2131   if (bodyp == NULL)
2132     return 0;
2133 
2134   finally_tree.create (31);
2135   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2136   memset (&null_state, 0, sizeof (null_state));
2137 
2138   collect_finally_tree_1 (bodyp, NULL);
2139   lower_eh_constructs_1 (&null_state, &bodyp);
2140   gimple_set_body (current_function_decl, bodyp);
2141 
2142   /* We assume there's a return statement, or something, at the end of
2143      the function, and thus ploping the EH sequence afterward won't
2144      change anything.  */
2145   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2146   gimple_seq_add_seq (&bodyp, eh_seq);
2147 
2148   /* We assume that since BODYP already existed, adding EH_SEQ to it
2149      didn't change its value, and we don't have to re-set the function.  */
2150   gcc_assert (bodyp == gimple_body (current_function_decl));
2151 
2152   finally_tree.dispose ();
2153   BITMAP_FREE (eh_region_may_contain_throw_map);
2154   eh_seq = NULL;
2155 
2156   /* If this function needs a language specific EH personality routine
2157      and the frontend didn't already set one do so now.  */
2158   if (function_needs_eh_personality (cfun) == eh_personality_lang
2159       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2160     DECL_FUNCTION_PERSONALITY (current_function_decl)
2161       = lang_hooks.eh_personality ();
2162 
2163   return 0;
2164 }
2165 
2166 namespace {
2167 
2168 const pass_data pass_data_lower_eh =
2169 {
2170   GIMPLE_PASS, /* type */
2171   "eh", /* name */
2172   OPTGROUP_NONE, /* optinfo_flags */
2173   false, /* has_gate */
2174   true, /* has_execute */
2175   TV_TREE_EH, /* tv_id */
2176   PROP_gimple_lcf, /* properties_required */
2177   PROP_gimple_leh, /* properties_provided */
2178   0, /* properties_destroyed */
2179   0, /* todo_flags_start */
2180   0, /* todo_flags_finish */
2181 };
2182 
2183 class pass_lower_eh : public gimple_opt_pass
2184 {
2185 public:
pass_lower_eh(gcc::context * ctxt)2186   pass_lower_eh (gcc::context *ctxt)
2187     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2188   {}
2189 
2190   /* opt_pass methods: */
execute()2191   unsigned int execute () { return lower_eh_constructs (); }
2192 
2193 }; // class pass_lower_eh
2194 
2195 } // anon namespace
2196 
2197 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2198 make_pass_lower_eh (gcc::context *ctxt)
2199 {
2200   return new pass_lower_eh (ctxt);
2201 }
2202 
2203 /* Create the multiple edges from an EH_DISPATCH statement to all of
2204    the possible handlers for its EH region.  Return true if there's
2205    no fallthru edge; false if there is.  */
2206 
2207 bool
make_eh_dispatch_edges(gimple stmt)2208 make_eh_dispatch_edges (gimple stmt)
2209 {
2210   eh_region r;
2211   eh_catch c;
2212   basic_block src, dst;
2213 
2214   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2215   src = gimple_bb (stmt);
2216 
2217   switch (r->type)
2218     {
2219     case ERT_TRY:
2220       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2221 	{
2222 	  dst = label_to_block (c->label);
2223 	  make_edge (src, dst, 0);
2224 
2225 	  /* A catch-all handler doesn't have a fallthru.  */
2226 	  if (c->type_list == NULL)
2227 	    return false;
2228 	}
2229       break;
2230 
2231     case ERT_ALLOWED_EXCEPTIONS:
2232       dst = label_to_block (r->u.allowed.label);
2233       make_edge (src, dst, 0);
2234       break;
2235 
2236     default:
2237       gcc_unreachable ();
2238     }
2239 
2240   return true;
2241 }
2242 
2243 /* Create the single EH edge from STMT to its nearest landing pad,
2244    if there is such a landing pad within the current function.  */
2245 
2246 void
make_eh_edges(gimple stmt)2247 make_eh_edges (gimple stmt)
2248 {
2249   basic_block src, dst;
2250   eh_landing_pad lp;
2251   int lp_nr;
2252 
2253   lp_nr = lookup_stmt_eh_lp (stmt);
2254   if (lp_nr <= 0)
2255     return;
2256 
2257   lp = get_eh_landing_pad_from_number (lp_nr);
2258   gcc_assert (lp != NULL);
2259 
2260   src = gimple_bb (stmt);
2261   dst = label_to_block (lp->post_landing_pad);
2262   make_edge (src, dst, EDGE_EH);
2263 }
2264 
2265 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2266    do not actually perform the final edge redirection.
2267 
2268    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2269    we intend to change the destination EH region as well; this means
2270    EH_LANDING_PAD_NR must already be set on the destination block label.
2271    If false, we're being called from generic cfg manipulation code and we
2272    should preserve our place within the region tree.  */
2273 
2274 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2275 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2276 {
2277   eh_landing_pad old_lp, new_lp;
2278   basic_block old_bb;
2279   gimple throw_stmt;
2280   int old_lp_nr, new_lp_nr;
2281   tree old_label, new_label;
2282   edge_iterator ei;
2283   edge e;
2284 
2285   old_bb = edge_in->dest;
2286   old_label = gimple_block_label (old_bb);
2287   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2288   gcc_assert (old_lp_nr > 0);
2289   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2290 
2291   throw_stmt = last_stmt (edge_in->src);
2292   gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2293 
2294   new_label = gimple_block_label (new_bb);
2295 
2296   /* Look for an existing region that might be using NEW_BB already.  */
2297   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2298   if (new_lp_nr)
2299     {
2300       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2301       gcc_assert (new_lp);
2302 
2303       /* Unless CHANGE_REGION is true, the new and old landing pad
2304 	 had better be associated with the same EH region.  */
2305       gcc_assert (change_region || new_lp->region == old_lp->region);
2306     }
2307   else
2308     {
2309       new_lp = NULL;
2310       gcc_assert (!change_region);
2311     }
2312 
2313   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2314   FOR_EACH_EDGE (e, ei, old_bb->preds)
2315     if (e != edge_in && (e->flags & EDGE_EH))
2316       break;
2317 
2318   if (new_lp)
2319     {
2320       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2321 	 there's nothing to do with the EH tree.  If there are no more
2322 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2323 	 If CHANGE_REGION is true, then our caller is expecting to remove
2324 	 the landing pad.  */
2325       if (e == NULL && !change_region)
2326 	remove_eh_landing_pad (old_lp);
2327     }
2328   else
2329     {
2330       /* No correct landing pad exists.  If there are no more edges
2331 	 into OLD_LP, then we can simply re-use the existing landing pad.
2332 	 Otherwise, we have to create a new landing pad.  */
2333       if (e == NULL)
2334 	{
2335 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2336 	  new_lp = old_lp;
2337 	}
2338       else
2339 	new_lp = gen_eh_landing_pad (old_lp->region);
2340       new_lp->post_landing_pad = new_label;
2341       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2342     }
2343 
2344   /* Maybe move the throwing statement to the new region.  */
2345   if (old_lp != new_lp)
2346     {
2347       remove_stmt_from_eh_lp (throw_stmt);
2348       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2349     }
2350 }
2351 
2352 /* Redirect EH edge E to NEW_BB.  */
2353 
2354 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2355 redirect_eh_edge (edge edge_in, basic_block new_bb)
2356 {
2357   redirect_eh_edge_1 (edge_in, new_bb, false);
2358   return ssa_redirect_edge (edge_in, new_bb);
2359 }
2360 
2361 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2362    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2363    The actual edge update will happen in the caller.  */
2364 
2365 void
redirect_eh_dispatch_edge(gimple stmt,edge e,basic_block new_bb)2366 redirect_eh_dispatch_edge (gimple stmt, edge e, basic_block new_bb)
2367 {
2368   tree new_lab = gimple_block_label (new_bb);
2369   bool any_changed = false;
2370   basic_block old_bb;
2371   eh_region r;
2372   eh_catch c;
2373 
2374   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2375   switch (r->type)
2376     {
2377     case ERT_TRY:
2378       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2379 	{
2380 	  old_bb = label_to_block (c->label);
2381 	  if (old_bb == e->dest)
2382 	    {
2383 	      c->label = new_lab;
2384 	      any_changed = true;
2385 	    }
2386 	}
2387       break;
2388 
2389     case ERT_ALLOWED_EXCEPTIONS:
2390       old_bb = label_to_block (r->u.allowed.label);
2391       gcc_assert (old_bb == e->dest);
2392       r->u.allowed.label = new_lab;
2393       any_changed = true;
2394       break;
2395 
2396     default:
2397       gcc_unreachable ();
2398     }
2399 
2400   gcc_assert (any_changed);
2401 }
2402 
2403 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2404 
2405 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2406 operation_could_trap_helper_p (enum tree_code op,
2407 			       bool fp_operation,
2408 			       bool honor_trapv,
2409 			       bool honor_nans,
2410 			       bool honor_snans,
2411 			       tree divisor,
2412 			       bool *handled)
2413 {
2414   *handled = true;
2415   switch (op)
2416     {
2417     case TRUNC_DIV_EXPR:
2418     case CEIL_DIV_EXPR:
2419     case FLOOR_DIV_EXPR:
2420     case ROUND_DIV_EXPR:
2421     case EXACT_DIV_EXPR:
2422     case CEIL_MOD_EXPR:
2423     case FLOOR_MOD_EXPR:
2424     case ROUND_MOD_EXPR:
2425     case TRUNC_MOD_EXPR:
2426     case RDIV_EXPR:
2427       if (honor_snans || honor_trapv)
2428 	return true;
2429       if (fp_operation)
2430 	return flag_trapping_math;
2431       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2432         return true;
2433       return false;
2434 
2435     case LT_EXPR:
2436     case LE_EXPR:
2437     case GT_EXPR:
2438     case GE_EXPR:
2439     case LTGT_EXPR:
2440       /* Some floating point comparisons may trap.  */
2441       return honor_nans;
2442 
2443     case EQ_EXPR:
2444     case NE_EXPR:
2445     case UNORDERED_EXPR:
2446     case ORDERED_EXPR:
2447     case UNLT_EXPR:
2448     case UNLE_EXPR:
2449     case UNGT_EXPR:
2450     case UNGE_EXPR:
2451     case UNEQ_EXPR:
2452       return honor_snans;
2453 
2454     case CONVERT_EXPR:
2455     case FIX_TRUNC_EXPR:
2456       /* Conversion of floating point might trap.  */
2457       return honor_nans;
2458 
2459     case NEGATE_EXPR:
2460     case ABS_EXPR:
2461     case CONJ_EXPR:
2462       /* These operations don't trap with floating point.  */
2463       if (honor_trapv)
2464 	return true;
2465       return false;
2466 
2467     case PLUS_EXPR:
2468     case MINUS_EXPR:
2469     case MULT_EXPR:
2470       /* Any floating arithmetic may trap.  */
2471       if (fp_operation && flag_trapping_math)
2472 	return true;
2473       if (honor_trapv)
2474 	return true;
2475       return false;
2476 
2477     case COMPLEX_EXPR:
2478     case CONSTRUCTOR:
2479       /* Constructing an object cannot trap.  */
2480       return false;
2481 
2482     default:
2483       /* Any floating arithmetic may trap.  */
2484       if (fp_operation && flag_trapping_math)
2485 	return true;
2486 
2487       *handled = false;
2488       return false;
2489     }
2490 }
2491 
2492 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2493    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2494    type operands that may trap.  If OP is a division operator, DIVISOR contains
2495    the value of the divisor.  */
2496 
2497 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2498 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2499 			tree divisor)
2500 {
2501   bool honor_nans = (fp_operation && flag_trapping_math
2502 		     && !flag_finite_math_only);
2503   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2504   bool handled;
2505 
2506   if (TREE_CODE_CLASS (op) != tcc_comparison
2507       && TREE_CODE_CLASS (op) != tcc_unary
2508       && TREE_CODE_CLASS (op) != tcc_binary)
2509     return false;
2510 
2511   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2512 					honor_nans, honor_snans, divisor,
2513 					&handled);
2514 }
2515 
2516 
2517 /* Returns true if it is possible to prove that the index of
2518    an array access REF (an ARRAY_REF expression) falls into the
2519    array bounds.  */
2520 
2521 static bool
in_array_bounds_p(tree ref)2522 in_array_bounds_p (tree ref)
2523 {
2524   tree idx = TREE_OPERAND (ref, 1);
2525   tree min, max;
2526 
2527   if (TREE_CODE (idx) != INTEGER_CST)
2528     return false;
2529 
2530   min = array_ref_low_bound (ref);
2531   max = array_ref_up_bound (ref);
2532   if (!min
2533       || !max
2534       || TREE_CODE (min) != INTEGER_CST
2535       || TREE_CODE (max) != INTEGER_CST)
2536     return false;
2537 
2538   if (tree_int_cst_lt (idx, min)
2539       || tree_int_cst_lt (max, idx))
2540     return false;
2541 
2542   return true;
2543 }
2544 
2545 /* Returns true if it is possible to prove that the range of
2546    an array access REF (an ARRAY_RANGE_REF expression) falls
2547    into the array bounds.  */
2548 
2549 static bool
range_in_array_bounds_p(tree ref)2550 range_in_array_bounds_p (tree ref)
2551 {
2552   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2553   tree range_min, range_max, min, max;
2554 
2555   range_min = TYPE_MIN_VALUE (domain_type);
2556   range_max = TYPE_MAX_VALUE (domain_type);
2557   if (!range_min
2558       || !range_max
2559       || TREE_CODE (range_min) != INTEGER_CST
2560       || TREE_CODE (range_max) != INTEGER_CST)
2561     return false;
2562 
2563   min = array_ref_low_bound (ref);
2564   max = array_ref_up_bound (ref);
2565   if (!min
2566       || !max
2567       || TREE_CODE (min) != INTEGER_CST
2568       || TREE_CODE (max) != INTEGER_CST)
2569     return false;
2570 
2571   if (tree_int_cst_lt (range_min, min)
2572       || tree_int_cst_lt (max, range_max))
2573     return false;
2574 
2575   return true;
2576 }
2577 
2578 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2579    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2580    This routine expects only GIMPLE lhs or rhs input.  */
2581 
2582 bool
tree_could_trap_p(tree expr)2583 tree_could_trap_p (tree expr)
2584 {
2585   enum tree_code code;
2586   bool fp_operation = false;
2587   bool honor_trapv = false;
2588   tree t, base, div = NULL_TREE;
2589 
2590   if (!expr)
2591     return false;
2592 
2593   code = TREE_CODE (expr);
2594   t = TREE_TYPE (expr);
2595 
2596   if (t)
2597     {
2598       if (COMPARISON_CLASS_P (expr))
2599 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2600       else
2601 	fp_operation = FLOAT_TYPE_P (t);
2602       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2603     }
2604 
2605   if (TREE_CODE_CLASS (code) == tcc_binary)
2606     div = TREE_OPERAND (expr, 1);
2607   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2608     return true;
2609 
2610  restart:
2611   switch (code)
2612     {
2613     case COMPONENT_REF:
2614     case REALPART_EXPR:
2615     case IMAGPART_EXPR:
2616     case BIT_FIELD_REF:
2617     case VIEW_CONVERT_EXPR:
2618     case WITH_SIZE_EXPR:
2619       expr = TREE_OPERAND (expr, 0);
2620       code = TREE_CODE (expr);
2621       goto restart;
2622 
2623     case ARRAY_RANGE_REF:
2624       base = TREE_OPERAND (expr, 0);
2625       if (tree_could_trap_p (base))
2626 	return true;
2627       if (TREE_THIS_NOTRAP (expr))
2628 	return false;
2629       return !range_in_array_bounds_p (expr);
2630 
2631     case ARRAY_REF:
2632       base = TREE_OPERAND (expr, 0);
2633       if (tree_could_trap_p (base))
2634 	return true;
2635       if (TREE_THIS_NOTRAP (expr))
2636 	return false;
2637       return !in_array_bounds_p (expr);
2638 
2639     case TARGET_MEM_REF:
2640     case MEM_REF:
2641       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2642 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2643 	return true;
2644       if (TREE_THIS_NOTRAP (expr))
2645 	return false;
2646       /* We cannot prove that the access is in-bounds when we have
2647          variable-index TARGET_MEM_REFs.  */
2648       if (code == TARGET_MEM_REF
2649 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2650 	return true;
2651       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2652 	{
2653 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2654 	  double_int off = mem_ref_offset (expr);
2655 	  if (off.is_negative ())
2656 	    return true;
2657 	  if (TREE_CODE (base) == STRING_CST)
2658 	    return double_int::from_uhwi (TREE_STRING_LENGTH (base)).ule (off);
2659 	  else if (DECL_SIZE_UNIT (base) == NULL_TREE
2660 		   || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2661 		   || tree_to_double_int (DECL_SIZE_UNIT (base)).ule (off))
2662 	    return true;
2663 	  /* Now we are sure the first byte of the access is inside
2664 	     the object.  */
2665 	  return false;
2666 	}
2667       return true;
2668 
2669     case INDIRECT_REF:
2670       return !TREE_THIS_NOTRAP (expr);
2671 
2672     case ASM_EXPR:
2673       return TREE_THIS_VOLATILE (expr);
2674 
2675     case CALL_EXPR:
2676       t = get_callee_fndecl (expr);
2677       /* Assume that calls to weak functions may trap.  */
2678       if (!t || !DECL_P (t))
2679 	return true;
2680       if (DECL_WEAK (t))
2681 	return tree_could_trap_p (t);
2682       return false;
2683 
2684     case FUNCTION_DECL:
2685       /* Assume that accesses to weak functions may trap, unless we know
2686 	 they are certainly defined in current TU or in some other
2687 	 LTO partition.  */
2688       if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2689 	{
2690 	  struct cgraph_node *node;
2691 	  if (!DECL_EXTERNAL (expr))
2692 	    return false;
2693 	  node = cgraph_function_node (cgraph_get_node (expr), NULL);
2694 	  if (node && node->in_other_partition)
2695 	    return false;
2696 	  return true;
2697 	}
2698       return false;
2699 
2700     case VAR_DECL:
2701       /* Assume that accesses to weak vars may trap, unless we know
2702 	 they are certainly defined in current TU or in some other
2703 	 LTO partition.  */
2704       if (DECL_WEAK (expr) && !DECL_COMDAT (expr))
2705 	{
2706 	  varpool_node *node;
2707 	  if (!DECL_EXTERNAL (expr))
2708 	    return false;
2709 	  node = varpool_variable_node (varpool_get_node (expr), NULL);
2710 	  if (node && node->in_other_partition)
2711 	    return false;
2712 	  return true;
2713 	}
2714       return false;
2715 
2716     default:
2717       return false;
2718     }
2719 }
2720 
2721 
2722 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2723    an assignment or a conditional) may throw.  */
2724 
2725 static bool
stmt_could_throw_1_p(gimple stmt)2726 stmt_could_throw_1_p (gimple stmt)
2727 {
2728   enum tree_code code = gimple_expr_code (stmt);
2729   bool honor_nans = false;
2730   bool honor_snans = false;
2731   bool fp_operation = false;
2732   bool honor_trapv = false;
2733   tree t;
2734   size_t i;
2735   bool handled, ret;
2736 
2737   if (TREE_CODE_CLASS (code) == tcc_comparison
2738       || TREE_CODE_CLASS (code) == tcc_unary
2739       || TREE_CODE_CLASS (code) == tcc_binary)
2740     {
2741       if (is_gimple_assign (stmt)
2742 	  && TREE_CODE_CLASS (code) == tcc_comparison)
2743 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2744       else if (gimple_code (stmt) == GIMPLE_COND)
2745 	t = TREE_TYPE (gimple_cond_lhs (stmt));
2746       else
2747 	t = gimple_expr_type (stmt);
2748       fp_operation = FLOAT_TYPE_P (t);
2749       if (fp_operation)
2750 	{
2751 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2752 	  honor_snans = flag_signaling_nans != 0;
2753 	}
2754       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2755 	honor_trapv = true;
2756     }
2757 
2758   /* Check if the main expression may trap.  */
2759   t = is_gimple_assign (stmt) ? gimple_assign_rhs2 (stmt) : NULL;
2760   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2761 				       honor_nans, honor_snans, t,
2762 				       &handled);
2763   if (handled)
2764     return ret;
2765 
2766   /* If the expression does not trap, see if any of the individual operands may
2767      trap.  */
2768   for (i = 0; i < gimple_num_ops (stmt); i++)
2769     if (tree_could_trap_p (gimple_op (stmt, i)))
2770       return true;
2771 
2772   return false;
2773 }
2774 
2775 
2776 /* Return true if statement STMT could throw an exception.  */
2777 
2778 bool
stmt_could_throw_p(gimple stmt)2779 stmt_could_throw_p (gimple stmt)
2780 {
2781   if (!flag_exceptions)
2782     return false;
2783 
2784   /* The only statements that can throw an exception are assignments,
2785      conditionals, calls, resx, and asms.  */
2786   switch (gimple_code (stmt))
2787     {
2788     case GIMPLE_RESX:
2789       return true;
2790 
2791     case GIMPLE_CALL:
2792       return !gimple_call_nothrow_p (stmt);
2793 
2794     case GIMPLE_ASSIGN:
2795     case GIMPLE_COND:
2796       if (!cfun->can_throw_non_call_exceptions)
2797         return false;
2798       return stmt_could_throw_1_p (stmt);
2799 
2800     case GIMPLE_ASM:
2801       if (!cfun->can_throw_non_call_exceptions)
2802         return false;
2803       return gimple_asm_volatile_p (stmt);
2804 
2805     default:
2806       return false;
2807     }
2808 }
2809 
2810 
2811 /* Return true if expression T could throw an exception.  */
2812 
2813 bool
tree_could_throw_p(tree t)2814 tree_could_throw_p (tree t)
2815 {
2816   if (!flag_exceptions)
2817     return false;
2818   if (TREE_CODE (t) == MODIFY_EXPR)
2819     {
2820       if (cfun->can_throw_non_call_exceptions
2821           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2822         return true;
2823       t = TREE_OPERAND (t, 1);
2824     }
2825 
2826   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2827     t = TREE_OPERAND (t, 0);
2828   if (TREE_CODE (t) == CALL_EXPR)
2829     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2830   if (cfun->can_throw_non_call_exceptions)
2831     return tree_could_trap_p (t);
2832   return false;
2833 }
2834 
2835 /* Return true if STMT can throw an exception that is not caught within
2836    the current function (CFUN).  */
2837 
2838 bool
stmt_can_throw_external(gimple stmt)2839 stmt_can_throw_external (gimple stmt)
2840 {
2841   int lp_nr;
2842 
2843   if (!stmt_could_throw_p (stmt))
2844     return false;
2845 
2846   lp_nr = lookup_stmt_eh_lp (stmt);
2847   return lp_nr == 0;
2848 }
2849 
2850 /* Return true if STMT can throw an exception that is caught within
2851    the current function (CFUN).  */
2852 
2853 bool
stmt_can_throw_internal(gimple stmt)2854 stmt_can_throw_internal (gimple stmt)
2855 {
2856   int lp_nr;
2857 
2858   if (!stmt_could_throw_p (stmt))
2859     return false;
2860 
2861   lp_nr = lookup_stmt_eh_lp (stmt);
2862   return lp_nr > 0;
2863 }
2864 
2865 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2866    remove any entry it might have from the EH table.  Return true if
2867    any change was made.  */
2868 
2869 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple stmt)2870 maybe_clean_eh_stmt_fn (struct function *ifun, gimple stmt)
2871 {
2872   if (stmt_could_throw_p (stmt))
2873     return false;
2874   return remove_stmt_from_eh_lp_fn (ifun, stmt);
2875 }
2876 
2877 /* Likewise, but always use the current function.  */
2878 
2879 bool
maybe_clean_eh_stmt(gimple stmt)2880 maybe_clean_eh_stmt (gimple stmt)
2881 {
2882   return maybe_clean_eh_stmt_fn (cfun, stmt);
2883 }
2884 
2885 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2886    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2887    in the table if it should be in there.  Return TRUE if a replacement was
2888    done that my require an EH edge purge.  */
2889 
2890 bool
maybe_clean_or_replace_eh_stmt(gimple old_stmt,gimple new_stmt)2891 maybe_clean_or_replace_eh_stmt (gimple old_stmt, gimple new_stmt)
2892 {
2893   int lp_nr = lookup_stmt_eh_lp (old_stmt);
2894 
2895   if (lp_nr != 0)
2896     {
2897       bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
2898 
2899       if (new_stmt == old_stmt && new_stmt_could_throw)
2900 	return false;
2901 
2902       remove_stmt_from_eh_lp (old_stmt);
2903       if (new_stmt_could_throw)
2904 	{
2905 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
2906 	  return false;
2907 	}
2908       else
2909 	return true;
2910     }
2911 
2912   return false;
2913 }
2914 
2915 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2916    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
2917    operand is the return value of duplicate_eh_regions.  */
2918 
2919 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple new_stmt,struct function * old_fun,gimple old_stmt,struct pointer_map_t * map,int default_lp_nr)2920 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple new_stmt,
2921 			    struct function *old_fun, gimple old_stmt,
2922 			    struct pointer_map_t *map, int default_lp_nr)
2923 {
2924   int old_lp_nr, new_lp_nr;
2925   void **slot;
2926 
2927   if (!stmt_could_throw_p (new_stmt))
2928     return false;
2929 
2930   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
2931   if (old_lp_nr == 0)
2932     {
2933       if (default_lp_nr == 0)
2934 	return false;
2935       new_lp_nr = default_lp_nr;
2936     }
2937   else if (old_lp_nr > 0)
2938     {
2939       eh_landing_pad old_lp, new_lp;
2940 
2941       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
2942       slot = pointer_map_contains (map, old_lp);
2943       new_lp = (eh_landing_pad) *slot;
2944       new_lp_nr = new_lp->index;
2945     }
2946   else
2947     {
2948       eh_region old_r, new_r;
2949 
2950       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
2951       slot = pointer_map_contains (map, old_r);
2952       new_r = (eh_region) *slot;
2953       new_lp_nr = -new_r->index;
2954     }
2955 
2956   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
2957   return true;
2958 }
2959 
2960 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2961    and thus no remapping is required.  */
2962 
2963 bool
maybe_duplicate_eh_stmt(gimple new_stmt,gimple old_stmt)2964 maybe_duplicate_eh_stmt (gimple new_stmt, gimple old_stmt)
2965 {
2966   int lp_nr;
2967 
2968   if (!stmt_could_throw_p (new_stmt))
2969     return false;
2970 
2971   lp_nr = lookup_stmt_eh_lp (old_stmt);
2972   if (lp_nr == 0)
2973     return false;
2974 
2975   add_stmt_to_eh_lp (new_stmt, lp_nr);
2976   return true;
2977 }
2978 
2979 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2980    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
2981    this only handles handlers consisting of a single call, as that's the
2982    important case for C++: a destructor call for a particular object showing
2983    up in multiple handlers.  */
2984 
2985 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)2986 same_handler_p (gimple_seq oneh, gimple_seq twoh)
2987 {
2988   gimple_stmt_iterator gsi;
2989   gimple ones, twos;
2990   unsigned int ai;
2991 
2992   gsi = gsi_start (oneh);
2993   if (!gsi_one_before_end_p (gsi))
2994     return false;
2995   ones = gsi_stmt (gsi);
2996 
2997   gsi = gsi_start (twoh);
2998   if (!gsi_one_before_end_p (gsi))
2999     return false;
3000   twos = gsi_stmt (gsi);
3001 
3002   if (!is_gimple_call (ones)
3003       || !is_gimple_call (twos)
3004       || gimple_call_lhs (ones)
3005       || gimple_call_lhs (twos)
3006       || gimple_call_chain (ones)
3007       || gimple_call_chain (twos)
3008       || !gimple_call_same_target_p (ones, twos)
3009       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3010     return false;
3011 
3012   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3013     if (!operand_equal_p (gimple_call_arg (ones, ai),
3014                           gimple_call_arg (twos, ai), 0))
3015       return false;
3016 
3017   return true;
3018 }
3019 
3020 /* Optimize
3021     try { A() } finally { try { ~B() } catch { ~A() } }
3022     try { ... } finally { ~A() }
3023    into
3024     try { A() } catch { ~B() }
3025     try { ~B() ... } finally { ~A() }
3026 
3027    This occurs frequently in C++, where A is a local variable and B is a
3028    temporary used in the initializer for A.  */
3029 
3030 static void
optimize_double_finally(gimple one,gimple two)3031 optimize_double_finally (gimple one, gimple two)
3032 {
3033   gimple oneh;
3034   gimple_stmt_iterator gsi;
3035   gimple_seq cleanup;
3036 
3037   cleanup = gimple_try_cleanup (one);
3038   gsi = gsi_start (cleanup);
3039   if (!gsi_one_before_end_p (gsi))
3040     return;
3041 
3042   oneh = gsi_stmt (gsi);
3043   if (gimple_code (oneh) != GIMPLE_TRY
3044       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3045     return;
3046 
3047   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3048     {
3049       gimple_seq seq = gimple_try_eval (oneh);
3050 
3051       gimple_try_set_cleanup (one, seq);
3052       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3053       seq = copy_gimple_seq_and_replace_locals (seq);
3054       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3055       gimple_try_set_eval (two, seq);
3056     }
3057 }
3058 
3059 /* Perform EH refactoring optimizations that are simpler to do when code
3060    flow has been lowered but EH structures haven't.  */
3061 
3062 static void
refactor_eh_r(gimple_seq seq)3063 refactor_eh_r (gimple_seq seq)
3064 {
3065   gimple_stmt_iterator gsi;
3066   gimple one, two;
3067 
3068   one = NULL;
3069   two = NULL;
3070   gsi = gsi_start (seq);
3071   while (1)
3072     {
3073       one = two;
3074       if (gsi_end_p (gsi))
3075 	two = NULL;
3076       else
3077 	two = gsi_stmt (gsi);
3078       if (one
3079 	  && two
3080 	  && gimple_code (one) == GIMPLE_TRY
3081 	  && gimple_code (two) == GIMPLE_TRY
3082 	  && gimple_try_kind (one) == GIMPLE_TRY_FINALLY
3083 	  && gimple_try_kind (two) == GIMPLE_TRY_FINALLY)
3084 	optimize_double_finally (one, two);
3085       if (one)
3086 	switch (gimple_code (one))
3087 	  {
3088 	  case GIMPLE_TRY:
3089 	    refactor_eh_r (gimple_try_eval (one));
3090 	    refactor_eh_r (gimple_try_cleanup (one));
3091 	    break;
3092 	  case GIMPLE_CATCH:
3093 	    refactor_eh_r (gimple_catch_handler (one));
3094 	    break;
3095 	  case GIMPLE_EH_FILTER:
3096 	    refactor_eh_r (gimple_eh_filter_failure (one));
3097 	    break;
3098 	  case GIMPLE_EH_ELSE:
3099 	    refactor_eh_r (gimple_eh_else_n_body (one));
3100 	    refactor_eh_r (gimple_eh_else_e_body (one));
3101 	    break;
3102 	  default:
3103 	    break;
3104 	  }
3105       if (two)
3106 	gsi_next (&gsi);
3107       else
3108 	break;
3109     }
3110 }
3111 
3112 static unsigned
refactor_eh(void)3113 refactor_eh (void)
3114 {
3115   refactor_eh_r (gimple_body (current_function_decl));
3116   return 0;
3117 }
3118 
3119 static bool
gate_refactor_eh(void)3120 gate_refactor_eh (void)
3121 {
3122   return flag_exceptions != 0;
3123 }
3124 
3125 namespace {
3126 
3127 const pass_data pass_data_refactor_eh =
3128 {
3129   GIMPLE_PASS, /* type */
3130   "ehopt", /* name */
3131   OPTGROUP_NONE, /* optinfo_flags */
3132   true, /* has_gate */
3133   true, /* has_execute */
3134   TV_TREE_EH, /* tv_id */
3135   PROP_gimple_lcf, /* properties_required */
3136   0, /* properties_provided */
3137   0, /* properties_destroyed */
3138   0, /* todo_flags_start */
3139   0, /* todo_flags_finish */
3140 };
3141 
3142 class pass_refactor_eh : public gimple_opt_pass
3143 {
3144 public:
pass_refactor_eh(gcc::context * ctxt)3145   pass_refactor_eh (gcc::context *ctxt)
3146     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3147   {}
3148 
3149   /* opt_pass methods: */
gate()3150   bool gate () { return gate_refactor_eh (); }
execute()3151   unsigned int execute () { return refactor_eh (); }
3152 
3153 }; // class pass_refactor_eh
3154 
3155 } // anon namespace
3156 
3157 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3158 make_pass_refactor_eh (gcc::context *ctxt)
3159 {
3160   return new pass_refactor_eh (ctxt);
3161 }
3162 
3163 /* At the end of gimple optimization, we can lower RESX.  */
3164 
3165 static bool
lower_resx(basic_block bb,gimple stmt,struct pointer_map_t * mnt_map)3166 lower_resx (basic_block bb, gimple stmt, struct pointer_map_t *mnt_map)
3167 {
3168   int lp_nr;
3169   eh_region src_r, dst_r;
3170   gimple_stmt_iterator gsi;
3171   gimple x;
3172   tree fn, src_nr;
3173   bool ret = false;
3174 
3175   lp_nr = lookup_stmt_eh_lp (stmt);
3176   if (lp_nr != 0)
3177     dst_r = get_eh_region_from_lp_number (lp_nr);
3178   else
3179     dst_r = NULL;
3180 
3181   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3182   gsi = gsi_last_bb (bb);
3183 
3184   if (src_r == NULL)
3185     {
3186       /* We can wind up with no source region when pass_cleanup_eh shows
3187 	 that there are no entries into an eh region and deletes it, but
3188 	 then the block that contains the resx isn't removed.  This can
3189 	 happen without optimization when the switch statement created by
3190 	 lower_try_finally_switch isn't simplified to remove the eh case.
3191 
3192 	 Resolve this by expanding the resx node to an abort.  */
3193 
3194       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3195       x = gimple_build_call (fn, 0);
3196       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3197 
3198       while (EDGE_COUNT (bb->succs) > 0)
3199 	remove_edge (EDGE_SUCC (bb, 0));
3200     }
3201   else if (dst_r)
3202     {
3203       /* When we have a destination region, we resolve this by copying
3204 	 the excptr and filter values into place, and changing the edge
3205 	 to immediately after the landing pad.  */
3206       edge e;
3207 
3208       if (lp_nr < 0)
3209 	{
3210 	  basic_block new_bb;
3211 	  void **slot;
3212 	  tree lab;
3213 
3214 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3215 	     the failure decl into a new block, if needed.  */
3216 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3217 
3218 	  slot = pointer_map_contains (mnt_map, dst_r);
3219 	  if (slot == NULL)
3220 	    {
3221 	      gimple_stmt_iterator gsi2;
3222 
3223 	      new_bb = create_empty_bb (bb);
3224 	      if (current_loops)
3225 		add_bb_to_loop (new_bb, bb->loop_father);
3226 	      lab = gimple_block_label (new_bb);
3227 	      gsi2 = gsi_start_bb (new_bb);
3228 
3229 	      fn = dst_r->u.must_not_throw.failure_decl;
3230 	      x = gimple_build_call (fn, 0);
3231 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3232 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3233 
3234 	      slot = pointer_map_insert (mnt_map, dst_r);
3235 	      *slot = lab;
3236 	    }
3237 	  else
3238 	    {
3239 	      lab = (tree) *slot;
3240 	      new_bb = label_to_block (lab);
3241 	    }
3242 
3243 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3244 	  e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3245 	  e->count = bb->count;
3246 	  e->probability = REG_BR_PROB_BASE;
3247 	}
3248       else
3249 	{
3250 	  edge_iterator ei;
3251 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3252 
3253 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3254 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3255 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3256 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3257 
3258 	  /* Update the flags for the outgoing edge.  */
3259 	  e = single_succ_edge (bb);
3260 	  gcc_assert (e->flags & EDGE_EH);
3261 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3262 
3263 	  /* If there are no more EH users of the landing pad, delete it.  */
3264 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3265 	    if (e->flags & EDGE_EH)
3266 	      break;
3267 	  if (e == NULL)
3268 	    {
3269 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3270 	      remove_eh_landing_pad (lp);
3271 	    }
3272 	}
3273 
3274       ret = true;
3275     }
3276   else
3277     {
3278       tree var;
3279 
3280       /* When we don't have a destination region, this exception escapes
3281 	 up the call chain.  We resolve this by generating a call to the
3282 	 _Unwind_Resume library function.  */
3283 
3284       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3285 	 with no arguments for C++ and Java.  Check for that.  */
3286       if (src_r->use_cxa_end_cleanup)
3287 	{
3288 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3289 	  x = gimple_build_call (fn, 0);
3290 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3291 	}
3292       else
3293 	{
3294 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3295 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3296 	  x = gimple_build_call (fn, 1, src_nr);
3297 	  var = create_tmp_var (ptr_type_node, NULL);
3298 	  var = make_ssa_name (var, x);
3299 	  gimple_call_set_lhs (x, var);
3300 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3301 
3302 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3303 	  x = gimple_build_call (fn, 1, var);
3304 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3305 	}
3306 
3307       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3308     }
3309 
3310   gsi_remove (&gsi, true);
3311 
3312   return ret;
3313 }
3314 
3315 static unsigned
execute_lower_resx(void)3316 execute_lower_resx (void)
3317 {
3318   basic_block bb;
3319   struct pointer_map_t *mnt_map;
3320   bool dominance_invalidated = false;
3321   bool any_rewritten = false;
3322 
3323   mnt_map = pointer_map_create ();
3324 
3325   FOR_EACH_BB_FN (bb, cfun)
3326     {
3327       gimple last = last_stmt (bb);
3328       if (last && is_gimple_resx (last))
3329 	{
3330 	  dominance_invalidated |= lower_resx (bb, last, mnt_map);
3331 	  any_rewritten = true;
3332 	}
3333     }
3334 
3335   pointer_map_destroy (mnt_map);
3336 
3337   if (dominance_invalidated)
3338     {
3339       free_dominance_info (CDI_DOMINATORS);
3340       free_dominance_info (CDI_POST_DOMINATORS);
3341     }
3342 
3343   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3344 }
3345 
3346 static bool
gate_lower_resx(void)3347 gate_lower_resx (void)
3348 {
3349   return flag_exceptions != 0;
3350 }
3351 
3352 namespace {
3353 
3354 const pass_data pass_data_lower_resx =
3355 {
3356   GIMPLE_PASS, /* type */
3357   "resx", /* name */
3358   OPTGROUP_NONE, /* optinfo_flags */
3359   true, /* has_gate */
3360   true, /* has_execute */
3361   TV_TREE_EH, /* tv_id */
3362   PROP_gimple_lcf, /* properties_required */
3363   0, /* properties_provided */
3364   0, /* properties_destroyed */
3365   0, /* todo_flags_start */
3366   TODO_verify_flow, /* todo_flags_finish */
3367 };
3368 
3369 class pass_lower_resx : public gimple_opt_pass
3370 {
3371 public:
pass_lower_resx(gcc::context * ctxt)3372   pass_lower_resx (gcc::context *ctxt)
3373     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3374   {}
3375 
3376   /* opt_pass methods: */
gate()3377   bool gate () { return gate_lower_resx (); }
execute()3378   unsigned int execute () { return execute_lower_resx (); }
3379 
3380 }; // class pass_lower_resx
3381 
3382 } // anon namespace
3383 
3384 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3385 make_pass_lower_resx (gcc::context *ctxt)
3386 {
3387   return new pass_lower_resx (ctxt);
3388 }
3389 
3390 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3391    external throw.  */
3392 
3393 static void
optimize_clobbers(basic_block bb)3394 optimize_clobbers (basic_block bb)
3395 {
3396   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3397   bool any_clobbers = false;
3398   bool seen_stack_restore = false;
3399   edge_iterator ei;
3400   edge e;
3401 
3402   /* Only optimize anything if the bb contains at least one clobber,
3403      ends with resx (checked by caller), optionally contains some
3404      debug stmts or labels, or at most one __builtin_stack_restore
3405      call, and has an incoming EH edge.  */
3406   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3407     {
3408       gimple stmt = gsi_stmt (gsi);
3409       if (is_gimple_debug (stmt))
3410 	continue;
3411       if (gimple_clobber_p (stmt))
3412 	{
3413 	  any_clobbers = true;
3414 	  continue;
3415 	}
3416       if (!seen_stack_restore
3417 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3418 	{
3419 	  seen_stack_restore = true;
3420 	  continue;
3421 	}
3422       if (gimple_code (stmt) == GIMPLE_LABEL)
3423 	break;
3424       return;
3425     }
3426   if (!any_clobbers)
3427     return;
3428   FOR_EACH_EDGE (e, ei, bb->preds)
3429     if (e->flags & EDGE_EH)
3430       break;
3431   if (e == NULL)
3432     return;
3433   gsi = gsi_last_bb (bb);
3434   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3435     {
3436       gimple stmt = gsi_stmt (gsi);
3437       if (!gimple_clobber_p (stmt))
3438 	continue;
3439       unlink_stmt_vdef (stmt);
3440       gsi_remove (&gsi, true);
3441       release_defs (stmt);
3442     }
3443 }
3444 
3445 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3446    internal throw to successor BB.  */
3447 
3448 static int
sink_clobbers(basic_block bb)3449 sink_clobbers (basic_block bb)
3450 {
3451   edge e;
3452   edge_iterator ei;
3453   gimple_stmt_iterator gsi, dgsi;
3454   basic_block succbb;
3455   bool any_clobbers = false;
3456   unsigned todo = 0;
3457 
3458   /* Only optimize if BB has a single EH successor and
3459      all predecessor edges are EH too.  */
3460   if (!single_succ_p (bb)
3461       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3462     return 0;
3463 
3464   FOR_EACH_EDGE (e, ei, bb->preds)
3465     {
3466       if ((e->flags & EDGE_EH) == 0)
3467 	return 0;
3468     }
3469 
3470   /* And BB contains only CLOBBER stmts before the final
3471      RESX.  */
3472   gsi = gsi_last_bb (bb);
3473   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3474     {
3475       gimple stmt = gsi_stmt (gsi);
3476       if (is_gimple_debug (stmt))
3477 	continue;
3478       if (gimple_code (stmt) == GIMPLE_LABEL)
3479 	break;
3480       if (!gimple_clobber_p (stmt))
3481 	return 0;
3482       any_clobbers = true;
3483     }
3484   if (!any_clobbers)
3485     return 0;
3486 
3487   edge succe = single_succ_edge (bb);
3488   succbb = succe->dest;
3489 
3490   /* See if there is a virtual PHI node to take an updated virtual
3491      operand from.  */
3492   gimple vphi = NULL;
3493   tree vuse = NULL_TREE;
3494   for (gsi = gsi_start_phis (succbb); !gsi_end_p (gsi); gsi_next (&gsi))
3495     {
3496       tree res = gimple_phi_result (gsi_stmt (gsi));
3497       if (virtual_operand_p (res))
3498 	{
3499 	  vphi = gsi_stmt (gsi);
3500 	  vuse = res;
3501 	  break;
3502 	}
3503     }
3504 
3505   dgsi = gsi_after_labels (succbb);
3506   gsi = gsi_last_bb (bb);
3507   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3508     {
3509       gimple stmt = gsi_stmt (gsi);
3510       tree lhs;
3511       if (is_gimple_debug (stmt))
3512 	continue;
3513       if (gimple_code (stmt) == GIMPLE_LABEL)
3514 	break;
3515       lhs = gimple_assign_lhs (stmt);
3516       /* Unfortunately we don't have dominance info updated at this
3517 	 point, so checking if
3518 	 dominated_by_p (CDI_DOMINATORS, succbb,
3519 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3520 	 would be too costly.  Thus, avoid sinking any clobbers that
3521 	 refer to non-(D) SSA_NAMEs.  */
3522       if (TREE_CODE (lhs) == MEM_REF
3523 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3524 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3525 	{
3526 	  unlink_stmt_vdef (stmt);
3527 	  gsi_remove (&gsi, true);
3528 	  release_defs (stmt);
3529 	  continue;
3530 	}
3531 
3532       /* As we do not change stmt order when sinking across a
3533          forwarder edge we can keep virtual operands in place.  */
3534       gsi_remove (&gsi, false);
3535       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3536 
3537       /* But adjust virtual operands if we sunk across a PHI node.  */
3538       if (vuse)
3539 	{
3540 	  gimple use_stmt;
3541 	  imm_use_iterator iter;
3542 	  use_operand_p use_p;
3543 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3544 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3545 	      SET_USE (use_p, gimple_vdef (stmt));
3546 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3547 	    {
3548 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3549 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3550 	    }
3551 	  /* Adjust the incoming virtual operand.  */
3552 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3553 	  SET_USE (gimple_vuse_op (stmt), vuse);
3554 	}
3555       /* If there isn't a single predecessor but no virtual PHI node
3556          arrange for virtual operands to be renamed.  */
3557       else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3558 	       && !single_pred_p (succbb))
3559 	{
3560 	  /* In this case there will be no use of the VDEF of this stmt.
3561 	     ???  Unless this is a secondary opportunity and we have not
3562 	     removed unreachable blocks yet, so we cannot assert this.
3563 	     Which also means we will end up renaming too many times.  */
3564 	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3565 	  mark_virtual_operands_for_renaming (cfun);
3566 	  todo |= TODO_update_ssa_only_virtuals;
3567 	}
3568     }
3569 
3570   return todo;
3571 }
3572 
3573 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3574    we have found some duplicate labels and removed some edges.  */
3575 
3576 static bool
lower_eh_dispatch(basic_block src,gimple stmt)3577 lower_eh_dispatch (basic_block src, gimple stmt)
3578 {
3579   gimple_stmt_iterator gsi;
3580   int region_nr;
3581   eh_region r;
3582   tree filter, fn;
3583   gimple x;
3584   bool redirected = false;
3585 
3586   region_nr = gimple_eh_dispatch_region (stmt);
3587   r = get_eh_region_from_number (region_nr);
3588 
3589   gsi = gsi_last_bb (src);
3590 
3591   switch (r->type)
3592     {
3593     case ERT_TRY:
3594       {
3595 	auto_vec<tree> labels;
3596 	tree default_label = NULL;
3597 	eh_catch c;
3598 	edge_iterator ei;
3599 	edge e;
3600 	struct pointer_set_t *seen_values = pointer_set_create ();
3601 
3602 	/* Collect the labels for a switch.  Zero the post_landing_pad
3603 	   field becase we'll no longer have anything keeping these labels
3604 	   in existence and the optimizer will be free to merge these
3605 	   blocks at will.  */
3606 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3607 	  {
3608 	    tree tp_node, flt_node, lab = c->label;
3609 	    bool have_label = false;
3610 
3611 	    c->label = NULL;
3612 	    tp_node = c->type_list;
3613 	    flt_node = c->filter_list;
3614 
3615 	    if (tp_node == NULL)
3616 	      {
3617 	        default_label = lab;
3618 		break;
3619 	      }
3620 	    do
3621 	      {
3622 		/* Filter out duplicate labels that arise when this handler
3623 		   is shadowed by an earlier one.  When no labels are
3624 		   attached to the handler anymore, we remove
3625 		   the corresponding edge and then we delete unreachable
3626 		   blocks at the end of this pass.  */
3627 		if (! pointer_set_contains (seen_values, TREE_VALUE (flt_node)))
3628 		  {
3629 		    tree t = build_case_label (TREE_VALUE (flt_node),
3630 					       NULL, lab);
3631 		    labels.safe_push (t);
3632 		    pointer_set_insert (seen_values, TREE_VALUE (flt_node));
3633 		    have_label = true;
3634 		  }
3635 
3636 		tp_node = TREE_CHAIN (tp_node);
3637 		flt_node = TREE_CHAIN (flt_node);
3638 	      }
3639 	    while (tp_node);
3640 	    if (! have_label)
3641 	      {
3642 	        remove_edge (find_edge (src, label_to_block (lab)));
3643 	        redirected = true;
3644 	      }
3645 	  }
3646 
3647 	/* Clean up the edge flags.  */
3648 	FOR_EACH_EDGE (e, ei, src->succs)
3649 	  {
3650 	    if (e->flags & EDGE_FALLTHRU)
3651 	      {
3652 		/* If there was no catch-all, use the fallthru edge.  */
3653 		if (default_label == NULL)
3654 		  default_label = gimple_block_label (e->dest);
3655 		e->flags &= ~EDGE_FALLTHRU;
3656 	      }
3657 	  }
3658 	gcc_assert (default_label != NULL);
3659 
3660 	/* Don't generate a switch if there's only a default case.
3661 	   This is common in the form of try { A; } catch (...) { B; }.  */
3662 	if (!labels.exists ())
3663 	  {
3664 	    e = single_succ_edge (src);
3665 	    e->flags |= EDGE_FALLTHRU;
3666 	  }
3667 	else
3668 	  {
3669 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3670 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3671 							 region_nr));
3672 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3673 	    filter = make_ssa_name (filter, x);
3674 	    gimple_call_set_lhs (x, filter);
3675 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3676 
3677 	    /* Turn the default label into a default case.  */
3678 	    default_label = build_case_label (NULL, NULL, default_label);
3679 	    sort_case_labels (labels);
3680 
3681 	    x = gimple_build_switch (filter, default_label, labels);
3682 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3683 	  }
3684 	pointer_set_destroy (seen_values);
3685       }
3686       break;
3687 
3688     case ERT_ALLOWED_EXCEPTIONS:
3689       {
3690 	edge b_e = BRANCH_EDGE (src);
3691 	edge f_e = FALLTHRU_EDGE (src);
3692 
3693 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3694 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3695 						     region_nr));
3696 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)), NULL);
3697 	filter = make_ssa_name (filter, x);
3698 	gimple_call_set_lhs (x, filter);
3699 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3700 
3701 	r->u.allowed.label = NULL;
3702 	x = gimple_build_cond (EQ_EXPR, filter,
3703 			       build_int_cst (TREE_TYPE (filter),
3704 					      r->u.allowed.filter),
3705 			       NULL_TREE, NULL_TREE);
3706 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3707 
3708 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3709         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3710       }
3711       break;
3712 
3713     default:
3714       gcc_unreachable ();
3715     }
3716 
3717   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3718   gsi_remove (&gsi, true);
3719   return redirected;
3720 }
3721 
3722 static unsigned
execute_lower_eh_dispatch(void)3723 execute_lower_eh_dispatch (void)
3724 {
3725   basic_block bb;
3726   int flags = 0;
3727   bool redirected = false;
3728 
3729   assign_filter_values ();
3730 
3731   FOR_EACH_BB_FN (bb, cfun)
3732     {
3733       gimple last = last_stmt (bb);
3734       if (last == NULL)
3735 	continue;
3736       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3737 	{
3738 	  redirected |= lower_eh_dispatch (bb, last);
3739 	  flags |= TODO_update_ssa_only_virtuals;
3740 	}
3741       else if (gimple_code (last) == GIMPLE_RESX)
3742 	{
3743 	  if (stmt_can_throw_external (last))
3744 	    optimize_clobbers (bb);
3745 	  else
3746 	    flags |= sink_clobbers (bb);
3747 	}
3748     }
3749 
3750   if (redirected)
3751     delete_unreachable_blocks ();
3752   return flags;
3753 }
3754 
3755 static bool
gate_lower_eh_dispatch(void)3756 gate_lower_eh_dispatch (void)
3757 {
3758   return cfun->eh->region_tree != NULL;
3759 }
3760 
3761 namespace {
3762 
3763 const pass_data pass_data_lower_eh_dispatch =
3764 {
3765   GIMPLE_PASS, /* type */
3766   "ehdisp", /* name */
3767   OPTGROUP_NONE, /* optinfo_flags */
3768   true, /* has_gate */
3769   true, /* has_execute */
3770   TV_TREE_EH, /* tv_id */
3771   PROP_gimple_lcf, /* properties_required */
3772   0, /* properties_provided */
3773   0, /* properties_destroyed */
3774   0, /* todo_flags_start */
3775   TODO_verify_flow, /* todo_flags_finish */
3776 };
3777 
3778 class pass_lower_eh_dispatch : public gimple_opt_pass
3779 {
3780 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3781   pass_lower_eh_dispatch (gcc::context *ctxt)
3782     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3783   {}
3784 
3785   /* opt_pass methods: */
gate()3786   bool gate () { return gate_lower_eh_dispatch (); }
execute()3787   unsigned int execute () { return execute_lower_eh_dispatch (); }
3788 
3789 }; // class pass_lower_eh_dispatch
3790 
3791 } // anon namespace
3792 
3793 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)3794 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3795 {
3796   return new pass_lower_eh_dispatch (ctxt);
3797 }
3798 
3799 /* Walk statements, see what regions and, optionally, landing pads
3800    are really referenced.
3801 
3802    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3803    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3804 
3805    Passing NULL for LP_REACHABLE is valid, in this case only reachable
3806    regions are marked.
3807 
3808    The caller is responsible for freeing the returned sbitmaps.  */
3809 
3810 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)3811 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3812 {
3813   sbitmap r_reachable, lp_reachable;
3814   basic_block bb;
3815   bool mark_landing_pads = (lp_reachablep != NULL);
3816   gcc_checking_assert (r_reachablep != NULL);
3817 
3818   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3819   bitmap_clear (r_reachable);
3820   *r_reachablep = r_reachable;
3821 
3822   if (mark_landing_pads)
3823     {
3824       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3825       bitmap_clear (lp_reachable);
3826       *lp_reachablep = lp_reachable;
3827     }
3828   else
3829     lp_reachable = NULL;
3830 
3831   FOR_EACH_BB_FN (bb, cfun)
3832     {
3833       gimple_stmt_iterator gsi;
3834 
3835       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3836 	{
3837 	  gimple stmt = gsi_stmt (gsi);
3838 
3839 	  if (mark_landing_pads)
3840 	    {
3841 	      int lp_nr = lookup_stmt_eh_lp (stmt);
3842 
3843 	      /* Negative LP numbers are MUST_NOT_THROW regions which
3844 		 are not considered BB enders.  */
3845 	      if (lp_nr < 0)
3846 		bitmap_set_bit (r_reachable, -lp_nr);
3847 
3848 	      /* Positive LP numbers are real landing pads, and BB enders.  */
3849 	      else if (lp_nr > 0)
3850 		{
3851 		  gcc_assert (gsi_one_before_end_p (gsi));
3852 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3853 		  bitmap_set_bit (r_reachable, region->index);
3854 		  bitmap_set_bit (lp_reachable, lp_nr);
3855 		}
3856 	    }
3857 
3858 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3859 	  switch (gimple_code (stmt))
3860 	    {
3861 	    case GIMPLE_RESX:
3862 	      bitmap_set_bit (r_reachable, gimple_resx_region (stmt));
3863 	      break;
3864 	    case GIMPLE_EH_DISPATCH:
3865 	      bitmap_set_bit (r_reachable, gimple_eh_dispatch_region (stmt));
3866 	      break;
3867 	    default:
3868 	      break;
3869 	    }
3870 	}
3871     }
3872 }
3873 
3874 /* Remove unreachable handlers and unreachable landing pads.  */
3875 
3876 static void
remove_unreachable_handlers(void)3877 remove_unreachable_handlers (void)
3878 {
3879   sbitmap r_reachable, lp_reachable;
3880   eh_region region;
3881   eh_landing_pad lp;
3882   unsigned i;
3883 
3884   mark_reachable_handlers (&r_reachable, &lp_reachable);
3885 
3886   if (dump_file)
3887     {
3888       fprintf (dump_file, "Before removal of unreachable regions:\n");
3889       dump_eh_tree (dump_file, cfun);
3890       fprintf (dump_file, "Reachable regions: ");
3891       dump_bitmap_file (dump_file, r_reachable);
3892       fprintf (dump_file, "Reachable landing pads: ");
3893       dump_bitmap_file (dump_file, lp_reachable);
3894     }
3895 
3896   if (dump_file)
3897     {
3898       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3899 	if (region && !bitmap_bit_p (r_reachable, region->index))
3900 	  fprintf (dump_file,
3901 		   "Removing unreachable region %d\n",
3902 		   region->index);
3903     }
3904 
3905   remove_unreachable_eh_regions (r_reachable);
3906 
3907   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3908     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3909       {
3910 	if (dump_file)
3911 	  fprintf (dump_file,
3912 		   "Removing unreachable landing pad %d\n",
3913 		   lp->index);
3914 	remove_eh_landing_pad (lp);
3915       }
3916 
3917   if (dump_file)
3918     {
3919       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
3920       dump_eh_tree (dump_file, cfun);
3921       fprintf (dump_file, "\n\n");
3922     }
3923 
3924   sbitmap_free (r_reachable);
3925   sbitmap_free (lp_reachable);
3926 
3927 #ifdef ENABLE_CHECKING
3928   verify_eh_tree (cfun);
3929 #endif
3930 }
3931 
3932 /* Remove unreachable handlers if any landing pads have been removed after
3933    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
3934 
3935 void
maybe_remove_unreachable_handlers(void)3936 maybe_remove_unreachable_handlers (void)
3937 {
3938   eh_landing_pad lp;
3939   unsigned i;
3940 
3941   if (cfun->eh == NULL)
3942     return;
3943 
3944   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3945     if (lp && lp->post_landing_pad)
3946       {
3947 	if (label_to_block (lp->post_landing_pad) == NULL)
3948 	  {
3949 	    remove_unreachable_handlers ();
3950 	    return;
3951 	  }
3952       }
3953 }
3954 
3955 /* Remove regions that do not have landing pads.  This assumes
3956    that remove_unreachable_handlers has already been run, and
3957    that we've just manipulated the landing pads since then.
3958 
3959    Preserve regions with landing pads and regions that prevent
3960    exceptions from propagating further, even if these regions
3961    are not reachable.  */
3962 
3963 static void
remove_unreachable_handlers_no_lp(void)3964 remove_unreachable_handlers_no_lp (void)
3965 {
3966   eh_region region;
3967   sbitmap r_reachable;
3968   unsigned i;
3969 
3970   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
3971 
3972   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3973     {
3974       if (! region)
3975 	continue;
3976 
3977       if (region->landing_pads != NULL
3978 	  || region->type == ERT_MUST_NOT_THROW)
3979 	bitmap_set_bit (r_reachable, region->index);
3980 
3981       if (dump_file
3982 	  && !bitmap_bit_p (r_reachable, region->index))
3983 	fprintf (dump_file,
3984 		 "Removing unreachable region %d\n",
3985 		 region->index);
3986     }
3987 
3988   remove_unreachable_eh_regions (r_reachable);
3989 
3990   sbitmap_free (r_reachable);
3991 }
3992 
3993 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
3994    optimisticaly split all sorts of edges, including EH edges.  The
3995    optimization passes in between may not have needed them; if not,
3996    we should undo the split.
3997 
3998    Recognize this case by having one EH edge incoming to the BB and
3999    one normal edge outgoing; BB should be empty apart from the
4000    post_landing_pad label.
4001 
4002    Note that this is slightly different from the empty handler case
4003    handled by cleanup_empty_eh, in that the actual handler may yet
4004    have actual code but the landing pad has been separated from the
4005    handler.  As such, cleanup_empty_eh relies on this transformation
4006    having been done first.  */
4007 
4008 static bool
unsplit_eh(eh_landing_pad lp)4009 unsplit_eh (eh_landing_pad lp)
4010 {
4011   basic_block bb = label_to_block (lp->post_landing_pad);
4012   gimple_stmt_iterator gsi;
4013   edge e_in, e_out;
4014 
4015   /* Quickly check the edge counts on BB for singularity.  */
4016   if (!single_pred_p (bb) || !single_succ_p (bb))
4017     return false;
4018   e_in = single_pred_edge (bb);
4019   e_out = single_succ_edge (bb);
4020 
4021   /* Input edge must be EH and output edge must be normal.  */
4022   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4023     return false;
4024 
4025   /* The block must be empty except for the labels and debug insns.  */
4026   gsi = gsi_after_labels (bb);
4027   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4028     gsi_next_nondebug (&gsi);
4029   if (!gsi_end_p (gsi))
4030     return false;
4031 
4032   /* The destination block must not already have a landing pad
4033      for a different region.  */
4034   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4035     {
4036       gimple stmt = gsi_stmt (gsi);
4037       tree lab;
4038       int lp_nr;
4039 
4040       if (gimple_code (stmt) != GIMPLE_LABEL)
4041 	break;
4042       lab = gimple_label_label (stmt);
4043       lp_nr = EH_LANDING_PAD_NR (lab);
4044       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4045 	return false;
4046     }
4047 
4048   /* The new destination block must not already be a destination of
4049      the source block, lest we merge fallthru and eh edges and get
4050      all sorts of confused.  */
4051   if (find_edge (e_in->src, e_out->dest))
4052     return false;
4053 
4054   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4055      thought this should have been cleaned up by a phicprop pass, but
4056      that doesn't appear to handle virtuals.  Propagate by hand.  */
4057   if (!gimple_seq_empty_p (phi_nodes (bb)))
4058     {
4059       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
4060 	{
4061 	  gimple use_stmt, phi = gsi_stmt (gsi);
4062 	  tree lhs = gimple_phi_result (phi);
4063 	  tree rhs = gimple_phi_arg_def (phi, 0);
4064 	  use_operand_p use_p;
4065 	  imm_use_iterator iter;
4066 
4067 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4068 	    {
4069 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4070 		SET_USE (use_p, rhs);
4071 	    }
4072 
4073 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4074 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4075 
4076 	  remove_phi_node (&gsi, true);
4077 	}
4078     }
4079 
4080   if (dump_file && (dump_flags & TDF_DETAILS))
4081     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4082 	     lp->index, e_out->dest->index);
4083 
4084   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4085      a successor edge, humor it.  But do the real CFG change with the
4086      predecessor of E_OUT in order to preserve the ordering of arguments
4087      to the PHI nodes in E_OUT->DEST.  */
4088   redirect_eh_edge_1 (e_in, e_out->dest, false);
4089   redirect_edge_pred (e_out, e_in->src);
4090   e_out->flags = e_in->flags;
4091   e_out->probability = e_in->probability;
4092   e_out->count = e_in->count;
4093   remove_edge (e_in);
4094 
4095   return true;
4096 }
4097 
4098 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4099 
4100 static bool
unsplit_all_eh(void)4101 unsplit_all_eh (void)
4102 {
4103   bool changed = false;
4104   eh_landing_pad lp;
4105   int i;
4106 
4107   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4108     if (lp)
4109       changed |= unsplit_eh (lp);
4110 
4111   return changed;
4112 }
4113 
4114 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4115    to OLD_BB to NEW_BB; return true on success, false on failure.
4116 
4117    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4118    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4119    Virtual PHIs may be deleted and marked for renaming.  */
4120 
4121 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4122 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4123 			     edge old_bb_out, bool change_region)
4124 {
4125   gimple_stmt_iterator ngsi, ogsi;
4126   edge_iterator ei;
4127   edge e;
4128   bitmap ophi_handled;
4129 
4130   /* The destination block must not be a regular successor for any
4131      of the preds of the landing pad.  Thus, avoid turning
4132         <..>
4133 	 |  \ EH
4134 	 |  <..>
4135 	 |  /
4136 	<..>
4137      into
4138         <..>
4139 	|  | EH
4140 	<..>
4141      which CFG verification would choke on.  See PR45172 and PR51089.  */
4142   FOR_EACH_EDGE (e, ei, old_bb->preds)
4143     if (find_edge (e->src, new_bb))
4144       return false;
4145 
4146   FOR_EACH_EDGE (e, ei, old_bb->preds)
4147     redirect_edge_var_map_clear (e);
4148 
4149   ophi_handled = BITMAP_ALLOC (NULL);
4150 
4151   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4152      for the edges we're going to move.  */
4153   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4154     {
4155       gimple ophi, nphi = gsi_stmt (ngsi);
4156       tree nresult, nop;
4157 
4158       nresult = gimple_phi_result (nphi);
4159       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4160 
4161       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4162 	 the source ssa_name.  */
4163       ophi = NULL;
4164       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4165 	{
4166 	  ophi = gsi_stmt (ogsi);
4167 	  if (gimple_phi_result (ophi) == nop)
4168 	    break;
4169 	  ophi = NULL;
4170 	}
4171 
4172       /* If we did find the corresponding PHI, copy those inputs.  */
4173       if (ophi)
4174 	{
4175 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4176 	  if (!has_single_use (nop))
4177 	    {
4178 	      imm_use_iterator imm_iter;
4179 	      use_operand_p use_p;
4180 
4181 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4182 		{
4183 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4184 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4185 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4186 		    goto fail;
4187 		}
4188 	    }
4189 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4190 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4191 	    {
4192 	      location_t oloc;
4193 	      tree oop;
4194 
4195 	      if ((e->flags & EDGE_EH) == 0)
4196 		continue;
4197 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4198 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4199 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4200 	    }
4201 	}
4202       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4203 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4204 	 variable is unchanged from input to the block and we can simply
4205 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4206       else
4207 	{
4208 	  location_t nloc
4209 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4210 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4211 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4212 	}
4213     }
4214 
4215   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4216      we don't know what values from the other edges into NEW_BB to use.  */
4217   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4218     {
4219       gimple ophi = gsi_stmt (ogsi);
4220       tree oresult = gimple_phi_result (ophi);
4221       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4222 	goto fail;
4223     }
4224 
4225   /* Finally, move the edges and update the PHIs.  */
4226   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4227     if (e->flags & EDGE_EH)
4228       {
4229 	/* ???  CFG manipluation routines do not try to update loop
4230 	   form on edge redirection.  Do so manually here for now.  */
4231 	/* If we redirect a loop entry or latch edge that will either create
4232 	   a multiple entry loop or rotate the loop.  If the loops merge
4233 	   we may have created a loop with multiple latches.
4234 	   All of this isn't easily fixed thus cancel the affected loop
4235 	   and mark the other loop as possibly having multiple latches.  */
4236 	if (current_loops
4237 	    && e->dest == e->dest->loop_father->header)
4238 	  {
4239 	    e->dest->loop_father->header = NULL;
4240 	    e->dest->loop_father->latch = NULL;
4241 	    new_bb->loop_father->latch = NULL;
4242 	    loops_state_set (LOOPS_NEED_FIXUP|LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4243 	  }
4244 	redirect_eh_edge_1 (e, new_bb, change_region);
4245 	redirect_edge_succ (e, new_bb);
4246 	flush_pending_stmts (e);
4247       }
4248     else
4249       ei_next (&ei);
4250 
4251   BITMAP_FREE (ophi_handled);
4252   return true;
4253 
4254  fail:
4255   FOR_EACH_EDGE (e, ei, old_bb->preds)
4256     redirect_edge_var_map_clear (e);
4257   BITMAP_FREE (ophi_handled);
4258   return false;
4259 }
4260 
4261 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4262    old region to NEW_REGION at BB.  */
4263 
4264 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4265 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4266 			  eh_landing_pad lp, eh_region new_region)
4267 {
4268   gimple_stmt_iterator gsi;
4269   eh_landing_pad *pp;
4270 
4271   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4272     continue;
4273   *pp = lp->next_lp;
4274 
4275   lp->region = new_region;
4276   lp->next_lp = new_region->landing_pads;
4277   new_region->landing_pads = lp;
4278 
4279   /* Delete the RESX that was matched within the empty handler block.  */
4280   gsi = gsi_last_bb (bb);
4281   unlink_stmt_vdef (gsi_stmt (gsi));
4282   gsi_remove (&gsi, true);
4283 
4284   /* Clean up E_OUT for the fallthru.  */
4285   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4286   e_out->probability = REG_BR_PROB_BASE;
4287 }
4288 
4289 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4290    unsplitting than unsplit_eh was prepared to handle, e.g. when
4291    multiple incoming edges and phis are involved.  */
4292 
4293 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4294 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4295 {
4296   gimple_stmt_iterator gsi;
4297   tree lab;
4298 
4299   /* We really ought not have totally lost everything following
4300      a landing pad label.  Given that BB is empty, there had better
4301      be a successor.  */
4302   gcc_assert (e_out != NULL);
4303 
4304   /* The destination block must not already have a landing pad
4305      for a different region.  */
4306   lab = NULL;
4307   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4308     {
4309       gimple stmt = gsi_stmt (gsi);
4310       int lp_nr;
4311 
4312       if (gimple_code (stmt) != GIMPLE_LABEL)
4313 	break;
4314       lab = gimple_label_label (stmt);
4315       lp_nr = EH_LANDING_PAD_NR (lab);
4316       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4317 	return false;
4318     }
4319 
4320   /* Attempt to move the PHIs into the successor block.  */
4321   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4322     {
4323       if (dump_file && (dump_flags & TDF_DETAILS))
4324 	fprintf (dump_file,
4325 		 "Unsplit EH landing pad %d to block %i "
4326 		 "(via cleanup_empty_eh).\n",
4327 		 lp->index, e_out->dest->index);
4328       return true;
4329     }
4330 
4331   return false;
4332 }
4333 
4334 /* Return true if edge E_FIRST is part of an empty infinite loop
4335    or leads to such a loop through a series of single successor
4336    empty bbs.  */
4337 
4338 static bool
infinite_empty_loop_p(edge e_first)4339 infinite_empty_loop_p (edge e_first)
4340 {
4341   bool inf_loop = false;
4342   edge e;
4343 
4344   if (e_first->dest == e_first->src)
4345     return true;
4346 
4347   e_first->src->aux = (void *) 1;
4348   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4349     {
4350       gimple_stmt_iterator gsi;
4351       if (e->dest->aux)
4352 	{
4353 	  inf_loop = true;
4354 	  break;
4355 	}
4356       e->dest->aux = (void *) 1;
4357       gsi = gsi_after_labels (e->dest);
4358       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4359 	gsi_next_nondebug (&gsi);
4360       if (!gsi_end_p (gsi))
4361 	break;
4362     }
4363   e_first->src->aux = NULL;
4364   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4365     e->dest->aux = NULL;
4366 
4367   return inf_loop;
4368 }
4369 
4370 /* Examine the block associated with LP to determine if it's an empty
4371    handler for its EH region.  If so, attempt to redirect EH edges to
4372    an outer region.  Return true the CFG was updated in any way.  This
4373    is similar to jump forwarding, just across EH edges.  */
4374 
4375 static bool
cleanup_empty_eh(eh_landing_pad lp)4376 cleanup_empty_eh (eh_landing_pad lp)
4377 {
4378   basic_block bb = label_to_block (lp->post_landing_pad);
4379   gimple_stmt_iterator gsi;
4380   gimple resx;
4381   eh_region new_region;
4382   edge_iterator ei;
4383   edge e, e_out;
4384   bool has_non_eh_pred;
4385   bool ret = false;
4386   int new_lp_nr;
4387 
4388   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4389   switch (EDGE_COUNT (bb->succs))
4390     {
4391     case 0:
4392       e_out = NULL;
4393       break;
4394     case 1:
4395       e_out = single_succ_edge (bb);
4396       break;
4397     default:
4398       return false;
4399     }
4400 
4401   resx = last_stmt (bb);
4402   if (resx && is_gimple_resx (resx))
4403     {
4404       if (stmt_can_throw_external (resx))
4405 	optimize_clobbers (bb);
4406       else if (sink_clobbers (bb))
4407 	ret = true;
4408     }
4409 
4410   gsi = gsi_after_labels (bb);
4411 
4412   /* Make sure to skip debug statements.  */
4413   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4414     gsi_next_nondebug (&gsi);
4415 
4416   /* If the block is totally empty, look for more unsplitting cases.  */
4417   if (gsi_end_p (gsi))
4418     {
4419       /* For the degenerate case of an infinite loop bail out.
4420 	 If bb has no successors and is totally empty, which can happen e.g.
4421 	 because of incorrect noreturn attribute, bail out too.  */
4422       if (e_out == NULL
4423 	  || infinite_empty_loop_p (e_out))
4424 	return ret;
4425 
4426       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4427     }
4428 
4429   /* The block should consist only of a single RESX statement, modulo a
4430      preceding call to __builtin_stack_restore if there is no outgoing
4431      edge, since the call can be eliminated in this case.  */
4432   resx = gsi_stmt (gsi);
4433   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4434     {
4435       gsi_next (&gsi);
4436       resx = gsi_stmt (gsi);
4437     }
4438   if (!is_gimple_resx (resx))
4439     return ret;
4440   gcc_assert (gsi_one_before_end_p (gsi));
4441 
4442   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4443   has_non_eh_pred = false;
4444   FOR_EACH_EDGE (e, ei, bb->preds)
4445     if (!(e->flags & EDGE_EH))
4446       has_non_eh_pred = true;
4447 
4448   /* Find the handler that's outer of the empty handler by looking at
4449      where the RESX instruction was vectored.  */
4450   new_lp_nr = lookup_stmt_eh_lp (resx);
4451   new_region = get_eh_region_from_lp_number (new_lp_nr);
4452 
4453   /* If there's no destination region within the current function,
4454      redirection is trivial via removing the throwing statements from
4455      the EH region, removing the EH edges, and allowing the block
4456      to go unreachable.  */
4457   if (new_region == NULL)
4458     {
4459       gcc_assert (e_out == NULL);
4460       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4461 	if (e->flags & EDGE_EH)
4462 	  {
4463 	    gimple stmt = last_stmt (e->src);
4464 	    remove_stmt_from_eh_lp (stmt);
4465 	    remove_edge (e);
4466 	  }
4467 	else
4468 	  ei_next (&ei);
4469       goto succeed;
4470     }
4471 
4472   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4473      to handle the abort and allow the blocks to go unreachable.  */
4474   if (new_region->type == ERT_MUST_NOT_THROW)
4475     {
4476       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4477 	if (e->flags & EDGE_EH)
4478 	  {
4479 	    gimple stmt = last_stmt (e->src);
4480 	    remove_stmt_from_eh_lp (stmt);
4481 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4482 	    remove_edge (e);
4483 	  }
4484 	else
4485 	  ei_next (&ei);
4486       goto succeed;
4487     }
4488 
4489   /* Try to redirect the EH edges and merge the PHIs into the destination
4490      landing pad block.  If the merge succeeds, we'll already have redirected
4491      all the EH edges.  The handler itself will go unreachable if there were
4492      no normal edges.  */
4493   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4494     goto succeed;
4495 
4496   /* Finally, if all input edges are EH edges, then we can (potentially)
4497      reduce the number of transfers from the runtime by moving the landing
4498      pad from the original region to the new region.  This is a win when
4499      we remove the last CLEANUP region along a particular exception
4500      propagation path.  Since nothing changes except for the region with
4501      which the landing pad is associated, the PHI nodes do not need to be
4502      adjusted at all.  */
4503   if (!has_non_eh_pred)
4504     {
4505       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4506       if (dump_file && (dump_flags & TDF_DETAILS))
4507 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4508 		 lp->index, new_region->index);
4509 
4510       /* ??? The CFG didn't change, but we may have rendered the
4511 	 old EH region unreachable.  Trigger a cleanup there.  */
4512       return true;
4513     }
4514 
4515   return ret;
4516 
4517  succeed:
4518   if (dump_file && (dump_flags & TDF_DETAILS))
4519     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4520   remove_eh_landing_pad (lp);
4521   return true;
4522 }
4523 
4524 /* Do a post-order traversal of the EH region tree.  Examine each
4525    post_landing_pad block and see if we can eliminate it as empty.  */
4526 
4527 static bool
cleanup_all_empty_eh(void)4528 cleanup_all_empty_eh (void)
4529 {
4530   bool changed = false;
4531   eh_landing_pad lp;
4532   int i;
4533 
4534   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4535     if (lp)
4536       changed |= cleanup_empty_eh (lp);
4537 
4538   return changed;
4539 }
4540 
4541 /* Perform cleanups and lowering of exception handling
4542     1) cleanups regions with handlers doing nothing are optimized out
4543     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4544     3) Info about regions that are containing instructions, and regions
4545        reachable via local EH edges is collected
4546     4) Eh tree is pruned for regions no longer necessary.
4547 
4548    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4549 	 Unify those that have the same failure decl and locus.
4550 */
4551 
4552 static unsigned int
execute_cleanup_eh_1(void)4553 execute_cleanup_eh_1 (void)
4554 {
4555   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4556      looking up unreachable landing pads.  */
4557   remove_unreachable_handlers ();
4558 
4559   /* Watch out for the region tree vanishing due to all unreachable.  */
4560   if (cfun->eh->region_tree)
4561     {
4562       bool changed = false;
4563 
4564       if (optimize)
4565 	changed |= unsplit_all_eh ();
4566       changed |= cleanup_all_empty_eh ();
4567 
4568       if (changed)
4569 	{
4570 	  free_dominance_info (CDI_DOMINATORS);
4571 	  free_dominance_info (CDI_POST_DOMINATORS);
4572 
4573           /* We delayed all basic block deletion, as we may have performed
4574 	     cleanups on EH edges while non-EH edges were still present.  */
4575 	  delete_unreachable_blocks ();
4576 
4577 	  /* We manipulated the landing pads.  Remove any region that no
4578 	     longer has a landing pad.  */
4579 	  remove_unreachable_handlers_no_lp ();
4580 
4581 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4582 	}
4583     }
4584 
4585   return 0;
4586 }
4587 
4588 static unsigned int
execute_cleanup_eh(void)4589 execute_cleanup_eh (void)
4590 {
4591   int ret = execute_cleanup_eh_1 ();
4592 
4593   /* If the function no longer needs an EH personality routine
4594      clear it.  This exposes cross-language inlining opportunities
4595      and avoids references to a never defined personality routine.  */
4596   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4597       && function_needs_eh_personality (cfun) != eh_personality_lang)
4598     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4599 
4600   return ret;
4601 }
4602 
4603 static bool
gate_cleanup_eh(void)4604 gate_cleanup_eh (void)
4605 {
4606   return cfun->eh != NULL && cfun->eh->region_tree != NULL;
4607 }
4608 
4609 namespace {
4610 
4611 const pass_data pass_data_cleanup_eh =
4612 {
4613   GIMPLE_PASS, /* type */
4614   "ehcleanup", /* name */
4615   OPTGROUP_NONE, /* optinfo_flags */
4616   true, /* has_gate */
4617   true, /* has_execute */
4618   TV_TREE_EH, /* tv_id */
4619   PROP_gimple_lcf, /* properties_required */
4620   0, /* properties_provided */
4621   0, /* properties_destroyed */
4622   0, /* todo_flags_start */
4623   TODO_verify_ssa, /* todo_flags_finish */
4624 };
4625 
4626 class pass_cleanup_eh : public gimple_opt_pass
4627 {
4628 public:
pass_cleanup_eh(gcc::context * ctxt)4629   pass_cleanup_eh (gcc::context *ctxt)
4630     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4631   {}
4632 
4633   /* opt_pass methods: */
clone()4634   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate()4635   bool gate () { return gate_cleanup_eh (); }
execute()4636   unsigned int execute () { return execute_cleanup_eh (); }
4637 
4638 }; // class pass_cleanup_eh
4639 
4640 } // anon namespace
4641 
4642 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4643 make_pass_cleanup_eh (gcc::context *ctxt)
4644 {
4645   return new pass_cleanup_eh (ctxt);
4646 }
4647 
4648 /* Verify that BB containing STMT as the last statement, has precisely the
4649    edge that make_eh_edges would create.  */
4650 
4651 DEBUG_FUNCTION bool
verify_eh_edges(gimple stmt)4652 verify_eh_edges (gimple stmt)
4653 {
4654   basic_block bb = gimple_bb (stmt);
4655   eh_landing_pad lp = NULL;
4656   int lp_nr;
4657   edge_iterator ei;
4658   edge e, eh_edge;
4659 
4660   lp_nr = lookup_stmt_eh_lp (stmt);
4661   if (lp_nr > 0)
4662     lp = get_eh_landing_pad_from_number (lp_nr);
4663 
4664   eh_edge = NULL;
4665   FOR_EACH_EDGE (e, ei, bb->succs)
4666     {
4667       if (e->flags & EDGE_EH)
4668 	{
4669 	  if (eh_edge)
4670 	    {
4671 	      error ("BB %i has multiple EH edges", bb->index);
4672 	      return true;
4673 	    }
4674 	  else
4675 	    eh_edge = e;
4676 	}
4677     }
4678 
4679   if (lp == NULL)
4680     {
4681       if (eh_edge)
4682 	{
4683 	  error ("BB %i can not throw but has an EH edge", bb->index);
4684 	  return true;
4685 	}
4686       return false;
4687     }
4688 
4689   if (!stmt_could_throw_p (stmt))
4690     {
4691       error ("BB %i last statement has incorrectly set lp", bb->index);
4692       return true;
4693     }
4694 
4695   if (eh_edge == NULL)
4696     {
4697       error ("BB %i is missing an EH edge", bb->index);
4698       return true;
4699     }
4700 
4701   if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4702     {
4703       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4704       return true;
4705     }
4706 
4707   return false;
4708 }
4709 
4710 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4711 
4712 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(gimple stmt)4713 verify_eh_dispatch_edge (gimple stmt)
4714 {
4715   eh_region r;
4716   eh_catch c;
4717   basic_block src, dst;
4718   bool want_fallthru = true;
4719   edge_iterator ei;
4720   edge e, fall_edge;
4721 
4722   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4723   src = gimple_bb (stmt);
4724 
4725   FOR_EACH_EDGE (e, ei, src->succs)
4726     gcc_assert (e->aux == NULL);
4727 
4728   switch (r->type)
4729     {
4730     case ERT_TRY:
4731       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4732 	{
4733 	  dst = label_to_block (c->label);
4734 	  e = find_edge (src, dst);
4735 	  if (e == NULL)
4736 	    {
4737 	      error ("BB %i is missing an edge", src->index);
4738 	      return true;
4739 	    }
4740 	  e->aux = (void *)e;
4741 
4742 	  /* A catch-all handler doesn't have a fallthru.  */
4743 	  if (c->type_list == NULL)
4744 	    {
4745 	      want_fallthru = false;
4746 	      break;
4747 	    }
4748 	}
4749       break;
4750 
4751     case ERT_ALLOWED_EXCEPTIONS:
4752       dst = label_to_block (r->u.allowed.label);
4753       e = find_edge (src, dst);
4754       if (e == NULL)
4755 	{
4756 	  error ("BB %i is missing an edge", src->index);
4757 	  return true;
4758 	}
4759       e->aux = (void *)e;
4760       break;
4761 
4762     default:
4763       gcc_unreachable ();
4764     }
4765 
4766   fall_edge = NULL;
4767   FOR_EACH_EDGE (e, ei, src->succs)
4768     {
4769       if (e->flags & EDGE_FALLTHRU)
4770 	{
4771 	  if (fall_edge != NULL)
4772 	    {
4773 	      error ("BB %i too many fallthru edges", src->index);
4774 	      return true;
4775 	    }
4776 	  fall_edge = e;
4777 	}
4778       else if (e->aux)
4779 	e->aux = NULL;
4780       else
4781 	{
4782 	  error ("BB %i has incorrect edge", src->index);
4783 	  return true;
4784 	}
4785     }
4786   if ((fall_edge != NULL) ^ want_fallthru)
4787     {
4788       error ("BB %i has incorrect fallthru edge", src->index);
4789       return true;
4790     }
4791 
4792   return false;
4793 }
4794