1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
47 #include "attribs.h"
48 #include "asan.h"
49 #include "gimplify.h"
50 
51 /* In some instances a tree and a gimple need to be stored in a same table,
52    i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree *tp; tree t; gimple *g;} treemple;
54 
55 /* Misc functions used in this file.  */
56 
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58    Really this means any statement that could_throw_p.  We could
59    stuff this information into the stmt_ann data structure, but:
60 
61    (1) We absolutely rely on this information being kept until
62    we get to rtl.  Once we're done with lowering here, if we lose
63    the information there's no way to recover it!
64 
65    (2) There are many more statements that *cannot* throw as
66    compared to those that can.  We should be saving some amount
67    of space by only allocating memory for those that can throw.  */
68 
69 /* Add statement T in function IFUN to landing pad NUM.  */
70 
71 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple * t,int num)72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73 {
74   gcc_assert (num != 0);
75 
76   if (!get_eh_throw_stmt_table (ifun))
77     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78 
79   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80 }
81 
82 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
83 
84 void
add_stmt_to_eh_lp(gimple * t,int num)85 add_stmt_to_eh_lp (gimple *t, int num)
86 {
87   add_stmt_to_eh_lp_fn (cfun, t, num);
88 }
89 
90 /* Add statement T to the single EH landing pad in REGION.  */
91 
92 static void
record_stmt_eh_region(eh_region region,gimple * t)93 record_stmt_eh_region (eh_region region, gimple *t)
94 {
95   if (region == NULL)
96     return;
97   if (region->type == ERT_MUST_NOT_THROW)
98     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99   else
100     {
101       eh_landing_pad lp = region->landing_pads;
102       if (lp == NULL)
103 	lp = gen_eh_landing_pad (region);
104       else
105 	gcc_assert (lp->next_lp == NULL);
106       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107     }
108 }
109 
110 
111 /* Remove statement T in function IFUN from its EH landing pad.  */
112 
113 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple * t)114 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115 {
116   if (!get_eh_throw_stmt_table (ifun))
117     return false;
118 
119   if (!get_eh_throw_stmt_table (ifun)->get (t))
120     return false;
121 
122   get_eh_throw_stmt_table (ifun)->remove (t);
123       return true;
124 }
125 
126 
127 /* Remove statement T in the current function (cfun) from its
128    EH landing pad.  */
129 
130 bool
remove_stmt_from_eh_lp(gimple * t)131 remove_stmt_from_eh_lp (gimple *t)
132 {
133   return remove_stmt_from_eh_lp_fn (cfun, t);
134 }
135 
136 /* Determine if statement T is inside an EH region in function IFUN.
137    Positive numbers indicate a landing pad index; negative numbers
138    indicate a MUST_NOT_THROW region index; zero indicates that the
139    statement is not recorded in the region table.  */
140 
141 int
lookup_stmt_eh_lp_fn(struct function * ifun,gimple * t)142 lookup_stmt_eh_lp_fn (struct function *ifun, gimple *t)
143 {
144   if (ifun->eh->throw_stmt_table == NULL)
145     return 0;
146 
147   int *lp_nr = ifun->eh->throw_stmt_table->get (t);
148   return lp_nr ? *lp_nr : 0;
149 }
150 
151 /* Likewise, but always use the current function.  */
152 
153 int
lookup_stmt_eh_lp(gimple * t)154 lookup_stmt_eh_lp (gimple *t)
155 {
156   /* We can get called from initialized data when -fnon-call-exceptions
157      is on; prevent crash.  */
158   if (!cfun)
159     return 0;
160   return lookup_stmt_eh_lp_fn (cfun, t);
161 }
162 
163 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
164    nodes and LABEL_DECL nodes.  We will use this during the second phase to
165    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
166 
167 struct finally_tree_node
168 {
169   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
170      when deciding whether a GOTO to a certain LABEL_DECL (which is a
171      tree) leaves the TRY block, its necessary to record a tree in
172      this field.  Thus a treemple is used. */
173   treemple child;
174   gtry *parent;
175 };
176 
177 /* Hashtable helpers.  */
178 
179 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180 {
181   static inline hashval_t hash (const finally_tree_node *);
182   static inline bool equal (const finally_tree_node *,
183 			    const finally_tree_node *);
184 };
185 
186 inline hashval_t
hash(const finally_tree_node * v)187 finally_tree_hasher::hash (const finally_tree_node *v)
188 {
189   return (intptr_t)v->child.t >> 4;
190 }
191 
192 inline bool
equal(const finally_tree_node * v,const finally_tree_node * c)193 finally_tree_hasher::equal (const finally_tree_node *v,
194 			    const finally_tree_node *c)
195 {
196   return v->child.t == c->child.t;
197 }
198 
199 /* Note that this table is *not* marked GTY.  It is short-lived.  */
200 static hash_table<finally_tree_hasher> *finally_tree;
201 
202 static void
record_in_finally_tree(treemple child,gtry * parent)203 record_in_finally_tree (treemple child, gtry *parent)
204 {
205   struct finally_tree_node *n;
206   finally_tree_node **slot;
207 
208   n = XNEW (struct finally_tree_node);
209   n->child = child;
210   n->parent = parent;
211 
212   slot = finally_tree->find_slot (n, INSERT);
213   gcc_assert (!*slot);
214   *slot = n;
215 }
216 
217 static void
218 collect_finally_tree (gimple *stmt, gtry *region);
219 
220 /* Go through the gimple sequence.  Works with collect_finally_tree to
221    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222 
223 static void
collect_finally_tree_1(gimple_seq seq,gtry * region)224 collect_finally_tree_1 (gimple_seq seq, gtry *region)
225 {
226   gimple_stmt_iterator gsi;
227 
228   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229     collect_finally_tree (gsi_stmt (gsi), region);
230 }
231 
232 static void
collect_finally_tree(gimple * stmt,gtry * region)233 collect_finally_tree (gimple *stmt, gtry *region)
234 {
235   treemple temp;
236 
237   switch (gimple_code (stmt))
238     {
239     case GIMPLE_LABEL:
240       temp.t = gimple_label_label (as_a <glabel *> (stmt));
241       record_in_finally_tree (temp, region);
242       break;
243 
244     case GIMPLE_TRY:
245       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246         {
247           temp.g = stmt;
248           record_in_finally_tree (temp, region);
249           collect_finally_tree_1 (gimple_try_eval (stmt),
250 				  as_a <gtry *> (stmt));
251 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252         }
253       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254         {
255           collect_finally_tree_1 (gimple_try_eval (stmt), region);
256           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257         }
258       break;
259 
260     case GIMPLE_CATCH:
261       collect_finally_tree_1 (gimple_catch_handler (
262 				 as_a <gcatch *> (stmt)),
263 			      region);
264       break;
265 
266     case GIMPLE_EH_FILTER:
267       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268       break;
269 
270     case GIMPLE_EH_ELSE:
271       {
272 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275       }
276       break;
277 
278     default:
279       /* A type, a decl, or some kind of statement that we're not
280 	 interested in.  Don't walk them.  */
281       break;
282     }
283 }
284 
285 
286 /* Use the finally tree to determine if a jump from START to TARGET
287    would leave the try_finally node that START lives in.  */
288 
289 static bool
outside_finally_tree(treemple start,gimple * target)290 outside_finally_tree (treemple start, gimple *target)
291 {
292   struct finally_tree_node n, *p;
293 
294   do
295     {
296       n.child = start;
297       p = finally_tree->find (&n);
298       if (!p)
299 	return true;
300       start.g = p->parent;
301     }
302   while (start.g != target);
303 
304   return false;
305 }
306 
307 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
308    nodes into a set of gotos, magic labels, and eh regions.
309    The eh region creation is straight-forward, but frobbing all the gotos
310    and such into shape isn't.  */
311 
312 /* The sequence into which we record all EH stuff.  This will be
313    placed at the end of the function when we're all done.  */
314 static gimple_seq eh_seq;
315 
316 /* Record whether an EH region contains something that can throw,
317    indexed by EH region number.  */
318 static bitmap eh_region_may_contain_throw_map;
319 
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322    The idea is to record a gimple statement for everything except for
323    the conditionals, which get their labels recorded. Since labels are
324    of type 'tree', we need this node to store both gimple and tree
325    objects.  REPL_STMT is the sequence used to replace the goto/return
326    statement.  CONT_STMT is used to store the statement that allows
327    the return/goto to jump to the original destination. */
328 
329 struct goto_queue_node
330 {
331   treemple stmt;
332   location_t location;
333   gimple_seq repl_stmt;
334   gimple *cont_stmt;
335   int index;
336   /* This is used when index >= 0 to indicate that stmt is a label (as
337      opposed to a goto stmt).  */
338   int is_label;
339 };
340 
341 /* State of the world while lowering.  */
342 
343 struct leh_state
344 {
345   /* What's "current" while constructing the eh region tree.  These
346      correspond to variables of the same name in cfun->eh, which we
347      don't have easy access to.  */
348   eh_region cur_region;
349 
350   /* What's "current" for the purposes of __builtin_eh_pointer.  For
351      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
352      the associated ALLOWED_EXCEPTIONS, etc.  */
353   eh_region ehp_region;
354 
355   /* Processing of TRY_FINALLY requires a bit more state.  This is
356      split out into a separate structure so that we don't have to
357      copy so much when processing other nodes.  */
358   struct leh_tf_state *tf;
359 };
360 
361 struct leh_tf_state
362 {
363   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
364      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
365      this so that outside_finally_tree can reliably reference the tree used
366      in the collect_finally_tree data structures.  */
367   gtry *try_finally_expr;
368   gtry *top_p;
369 
370   /* While lowering a top_p usually it is expanded into multiple statements,
371      thus we need the following field to store them. */
372   gimple_seq top_p_seq;
373 
374   /* The state outside this try_finally node.  */
375   struct leh_state *outer;
376 
377   /* The exception region created for it.  */
378   eh_region region;
379 
380   /* The goto queue.  */
381   struct goto_queue_node *goto_queue;
382   size_t goto_queue_size;
383   size_t goto_queue_active;
384 
385   /* Pointer map to help in searching goto_queue when it is large.  */
386   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
387 
388   /* The set of unique labels seen as entries in the goto queue.  */
389   vec<tree> dest_array;
390 
391   /* A label to be added at the end of the completed transformed
392      sequence.  It will be set if may_fallthru was true *at one time*,
393      though subsequent transformations may have cleared that flag.  */
394   tree fallthru_label;
395 
396   /* True if it is possible to fall out the bottom of the try block.
397      Cleared if the fallthru is converted to a goto.  */
398   bool may_fallthru;
399 
400   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
401   bool may_return;
402 
403   /* True if the finally block can receive an exception edge.
404      Cleared if the exception case is handled by code duplication.  */
405   bool may_throw;
406 };
407 
408 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
409 
410 /* Search for STMT in the goto queue.  Return the replacement,
411    or null if the statement isn't in the queue.  */
412 
413 #define LARGE_GOTO_QUEUE 20
414 
415 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
416 
417 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)418 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
419 {
420   unsigned int i;
421 
422   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
423     {
424       for (i = 0; i < tf->goto_queue_active; i++)
425 	if ( tf->goto_queue[i].stmt.g == stmt.g)
426 	  return tf->goto_queue[i].repl_stmt;
427       return NULL;
428     }
429 
430   /* If we have a large number of entries in the goto_queue, create a
431      pointer map and use that for searching.  */
432 
433   if (!tf->goto_queue_map)
434     {
435       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
436       for (i = 0; i < tf->goto_queue_active; i++)
437 	{
438 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
439 						  &tf->goto_queue[i]);
440 	  gcc_assert (!existed);
441 	}
442     }
443 
444   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
445   if (slot != NULL)
446     return ((*slot)->repl_stmt);
447 
448   return NULL;
449 }
450 
451 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
452    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
453    then we can just splat it in, otherwise we add the new stmts immediately
454    after the GIMPLE_COND and redirect.  */
455 
456 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)457 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
458 				gimple_stmt_iterator *gsi)
459 {
460   tree label;
461   gimple_seq new_seq;
462   treemple temp;
463   location_t loc = gimple_location (gsi_stmt (*gsi));
464 
465   temp.tp = tp;
466   new_seq = find_goto_replacement (tf, temp);
467   if (!new_seq)
468     return;
469 
470   if (gimple_seq_singleton_p (new_seq)
471       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
472     {
473       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
474       return;
475     }
476 
477   label = create_artificial_label (loc);
478   /* Set the new label for the GIMPLE_COND */
479   *tp = label;
480 
481   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
482   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
483 }
484 
485 /* The real work of replace_goto_queue.  Returns with TSI updated to
486    point to the next statement.  */
487 
488 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
489 
490 static void
replace_goto_queue_1(gimple * stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)491 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
492 		      gimple_stmt_iterator *gsi)
493 {
494   gimple_seq seq;
495   treemple temp;
496   temp.g = NULL;
497 
498   switch (gimple_code (stmt))
499     {
500     case GIMPLE_GOTO:
501     case GIMPLE_RETURN:
502       temp.g = stmt;
503       seq = find_goto_replacement (tf, temp);
504       if (seq)
505 	{
506 	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
507 	  gsi_remove (gsi, false);
508 	  return;
509 	}
510       break;
511 
512     case GIMPLE_COND:
513       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
514       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
515       break;
516 
517     case GIMPLE_TRY:
518       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
519       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
520       break;
521     case GIMPLE_CATCH:
522       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
523 				      as_a <gcatch *> (stmt)),
524 				    tf);
525       break;
526     case GIMPLE_EH_FILTER:
527       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
528       break;
529     case GIMPLE_EH_ELSE:
530       {
531 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
532 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
533 				      tf);
534 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
535 				      tf);
536       }
537       break;
538 
539     default:
540       /* These won't have gotos in them.  */
541       break;
542     }
543 
544   gsi_next (gsi);
545 }
546 
547 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
548 
549 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)550 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
551 {
552   gimple_stmt_iterator gsi = gsi_start (*seq);
553 
554   while (!gsi_end_p (gsi))
555     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
556 }
557 
558 /* Replace all goto queue members.  */
559 
560 static void
replace_goto_queue(struct leh_tf_state * tf)561 replace_goto_queue (struct leh_tf_state *tf)
562 {
563   if (tf->goto_queue_active == 0)
564     return;
565   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
566   replace_goto_queue_stmt_list (&eh_seq, tf);
567 }
568 
569 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
570    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
571    a gimple return. */
572 
573 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)574 record_in_goto_queue (struct leh_tf_state *tf,
575                       treemple new_stmt,
576                       int index,
577                       bool is_label,
578 		      location_t location)
579 {
580   size_t active, size;
581   struct goto_queue_node *q;
582 
583   gcc_assert (!tf->goto_queue_map);
584 
585   active = tf->goto_queue_active;
586   size = tf->goto_queue_size;
587   if (active >= size)
588     {
589       size = (size ? size * 2 : 32);
590       tf->goto_queue_size = size;
591       tf->goto_queue
592          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
593     }
594 
595   q = &tf->goto_queue[active];
596   tf->goto_queue_active = active + 1;
597 
598   memset (q, 0, sizeof (*q));
599   q->stmt = new_stmt;
600   q->index = index;
601   q->location = location;
602   q->is_label = is_label;
603 }
604 
605 /* Record the LABEL label in the goto queue contained in TF.
606    TF is not null.  */
607 
608 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)609 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
610 			    location_t location)
611 {
612   int index;
613   treemple temp, new_stmt;
614 
615   if (!label)
616     return;
617 
618   /* Computed and non-local gotos do not get processed.  Given
619      their nature we can neither tell whether we've escaped the
620      finally block nor redirect them if we knew.  */
621   if (TREE_CODE (label) != LABEL_DECL)
622     return;
623 
624   /* No need to record gotos that don't leave the try block.  */
625   temp.t = label;
626   if (!outside_finally_tree (temp, tf->try_finally_expr))
627     return;
628 
629   if (! tf->dest_array.exists ())
630     {
631       tf->dest_array.create (10);
632       tf->dest_array.quick_push (label);
633       index = 0;
634     }
635   else
636     {
637       int n = tf->dest_array.length ();
638       for (index = 0; index < n; ++index)
639         if (tf->dest_array[index] == label)
640           break;
641       if (index == n)
642         tf->dest_array.safe_push (label);
643     }
644 
645   /* In the case of a GOTO we want to record the destination label,
646      since with a GIMPLE_COND we have an easy access to the then/else
647      labels. */
648   new_stmt = stmt;
649   record_in_goto_queue (tf, new_stmt, index, true, location);
650 }
651 
652 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
653    node, and if so record that fact in the goto queue associated with that
654    try_finally node.  */
655 
656 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple * stmt)657 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
658 {
659   struct leh_tf_state *tf = state->tf;
660   treemple new_stmt;
661 
662   if (!tf)
663     return;
664 
665   switch (gimple_code (stmt))
666     {
667     case GIMPLE_COND:
668       {
669 	gcond *cond_stmt = as_a <gcond *> (stmt);
670 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
671 	record_in_goto_queue_label (tf, new_stmt,
672 				    gimple_cond_true_label (cond_stmt),
673 				    EXPR_LOCATION (*new_stmt.tp));
674 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
675 	record_in_goto_queue_label (tf, new_stmt,
676 				    gimple_cond_false_label (cond_stmt),
677 				    EXPR_LOCATION (*new_stmt.tp));
678       }
679       break;
680     case GIMPLE_GOTO:
681       new_stmt.g = stmt;
682       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
683 				  gimple_location (stmt));
684       break;
685 
686     case GIMPLE_RETURN:
687       tf->may_return = true;
688       new_stmt.g = stmt;
689       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
690       break;
691 
692     default:
693       gcc_unreachable ();
694     }
695 }
696 
697 
698 #if CHECKING_P
699 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
700    was in fact structured, and we've not yet done jump threading, then none
701    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
702 
703 static void
verify_norecord_switch_expr(struct leh_state * state,gswitch * switch_expr)704 verify_norecord_switch_expr (struct leh_state *state,
705 			     gswitch *switch_expr)
706 {
707   struct leh_tf_state *tf = state->tf;
708   size_t i, n;
709 
710   if (!tf)
711     return;
712 
713   n = gimple_switch_num_labels (switch_expr);
714 
715   for (i = 0; i < n; ++i)
716     {
717       treemple temp;
718       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
719       temp.t = lab;
720       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
721     }
722 }
723 #else
724 #define verify_norecord_switch_expr(state, switch_expr)
725 #endif
726 
727 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
728    non-null, insert it before the new branch.  */
729 
730 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)731 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
732 {
733   gimple *x;
734 
735   /* In the case of a return, the queue node must be a gimple statement.  */
736   gcc_assert (!q->is_label);
737 
738   /* Note that the return value may have already been computed, e.g.,
739 
740 	int x;
741 	int foo (void)
742 	{
743 	  x = 0;
744 	  try {
745 	    return x;
746 	  } finally {
747 	    x++;
748 	  }
749 	}
750 
751      should return 0, not 1.  We don't have to do anything to make
752      this happens because the return value has been placed in the
753      RESULT_DECL already.  */
754 
755   q->cont_stmt = q->stmt.g;
756 
757   if (mod)
758     gimple_seq_add_seq (&q->repl_stmt, mod);
759 
760   x = gimple_build_goto (finlab);
761   gimple_set_location (x, q->location);
762   gimple_seq_add_stmt (&q->repl_stmt, x);
763 }
764 
765 /* Similar, but easier, for GIMPLE_GOTO.  */
766 
767 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)768 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
769 		     struct leh_tf_state *tf)
770 {
771   ggoto *x;
772 
773   gcc_assert (q->is_label);
774 
775   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
776 
777   if (mod)
778     gimple_seq_add_seq (&q->repl_stmt, mod);
779 
780   x = gimple_build_goto (finlab);
781   gimple_set_location (x, q->location);
782   gimple_seq_add_stmt (&q->repl_stmt, x);
783 }
784 
785 /* Emit a standard landing pad sequence into SEQ for REGION.  */
786 
787 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)788 emit_post_landing_pad (gimple_seq *seq, eh_region region)
789 {
790   eh_landing_pad lp = region->landing_pads;
791   glabel *x;
792 
793   if (lp == NULL)
794     lp = gen_eh_landing_pad (region);
795 
796   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
797   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
798 
799   x = gimple_build_label (lp->post_landing_pad);
800   gimple_seq_add_stmt (seq, x);
801 }
802 
803 /* Emit a RESX statement into SEQ for REGION.  */
804 
805 static void
emit_resx(gimple_seq * seq,eh_region region)806 emit_resx (gimple_seq *seq, eh_region region)
807 {
808   gresx *x = gimple_build_resx (region->index);
809   gimple_seq_add_stmt (seq, x);
810   if (region->outer)
811     record_stmt_eh_region (region->outer, x);
812 }
813 
814 /* Emit an EH_DISPATCH statement into SEQ for REGION.  */
815 
816 static void
emit_eh_dispatch(gimple_seq * seq,eh_region region)817 emit_eh_dispatch (gimple_seq *seq, eh_region region)
818 {
819   geh_dispatch *x = gimple_build_eh_dispatch (region->index);
820   gimple_seq_add_stmt (seq, x);
821 }
822 
823 /* Note that the current EH region may contain a throw, or a
824    call to a function which itself may contain a throw.  */
825 
826 static void
note_eh_region_may_contain_throw(eh_region region)827 note_eh_region_may_contain_throw (eh_region region)
828 {
829   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
830     {
831       if (region->type == ERT_MUST_NOT_THROW)
832 	break;
833       region = region->outer;
834       if (region == NULL)
835 	break;
836     }
837 }
838 
839 /* Check if REGION has been marked as containing a throw.  If REGION is
840    NULL, this predicate is false.  */
841 
842 static inline bool
eh_region_may_contain_throw(eh_region r)843 eh_region_may_contain_throw (eh_region r)
844 {
845   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
846 }
847 
848 /* We want to transform
849 	try { body; } catch { stuff; }
850    to
851 	normal_sequence:
852 	  body;
853 	  over:
854 	eh_sequence:
855 	  landing_pad:
856 	  stuff;
857 	  goto over;
858 
859    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
860    should be placed before the second operand, or NULL.  OVER is
861    an existing label that should be put at the exit, or NULL.  */
862 
863 static gimple_seq
frob_into_branch_around(gtry * tp,eh_region region,tree over)864 frob_into_branch_around (gtry *tp, eh_region region, tree over)
865 {
866   gimple *x;
867   gimple_seq cleanup, result;
868   location_t loc = gimple_location (tp);
869 
870   cleanup = gimple_try_cleanup (tp);
871   result = gimple_try_eval (tp);
872 
873   if (region)
874     emit_post_landing_pad (&eh_seq, region);
875 
876   if (gimple_seq_may_fallthru (cleanup))
877     {
878       if (!over)
879 	over = create_artificial_label (loc);
880       x = gimple_build_goto (over);
881       gimple_set_location (x, loc);
882       gimple_seq_add_stmt (&cleanup, x);
883     }
884   gimple_seq_add_seq (&eh_seq, cleanup);
885 
886   if (over)
887     {
888       x = gimple_build_label (over);
889       gimple_seq_add_stmt (&result, x);
890     }
891   return result;
892 }
893 
894 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
895    Make sure to record all new labels found.  */
896 
897 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)898 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
899 			     location_t loc)
900 {
901   gtry *region = NULL;
902   gimple_seq new_seq;
903   gimple_stmt_iterator gsi;
904 
905   new_seq = copy_gimple_seq_and_replace_locals (seq);
906 
907   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
908     {
909       gimple *stmt = gsi_stmt (gsi);
910       /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
911 	 it on the EH paths.  When it is not eliminated, make it transparent in
912 	 the debug info.  */
913       if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
914 	gimple_set_location (stmt, UNKNOWN_LOCATION);
915       else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
916 	{
917 	  tree block = gimple_block (stmt);
918 	  gimple_set_location (stmt, loc);
919 	  gimple_set_block (stmt, block);
920 	}
921     }
922 
923   if (outer_state->tf)
924     region = outer_state->tf->try_finally_expr;
925   collect_finally_tree_1 (new_seq, region);
926 
927   return new_seq;
928 }
929 
930 /* A subroutine of lower_try_finally.  Create a fallthru label for
931    the given try_finally state.  The only tricky bit here is that
932    we have to make sure to record the label in our outer context.  */
933 
934 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)935 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
936 {
937   tree label = tf->fallthru_label;
938   treemple temp;
939 
940   if (!label)
941     {
942       label = create_artificial_label (gimple_location (tf->try_finally_expr));
943       tf->fallthru_label = label;
944       if (tf->outer->tf)
945         {
946           temp.t = label;
947           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
948         }
949     }
950   return label;
951 }
952 
953 /* A subroutine of lower_try_finally.  If FINALLY consits of a
954    GIMPLE_EH_ELSE node, return it.  */
955 
956 static inline geh_else *
get_eh_else(gimple_seq finally)957 get_eh_else (gimple_seq finally)
958 {
959   gimple *x = gimple_seq_first_stmt (finally);
960   if (gimple_code (x) == GIMPLE_EH_ELSE)
961     {
962       gcc_assert (gimple_seq_singleton_p (finally));
963       return as_a <geh_else *> (x);
964     }
965   return NULL;
966 }
967 
968 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
969    langhook returns non-null, then the language requires that the exception
970    path out of a try_finally be treated specially.  To wit: the code within
971    the finally block may not itself throw an exception.  We have two choices
972    here. First we can duplicate the finally block and wrap it in a
973    must_not_throw region.  Second, we can generate code like
974 
975 	try {
976 	  finally_block;
977 	} catch {
978 	  if (fintmp == eh_edge)
979 	    protect_cleanup_actions;
980 	}
981 
982    where "fintmp" is the temporary used in the switch statement generation
983    alternative considered below.  For the nonce, we always choose the first
984    option.
985 
986    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
987 
988 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)989 honor_protect_cleanup_actions (struct leh_state *outer_state,
990 			       struct leh_state *this_state,
991 			       struct leh_tf_state *tf)
992 {
993   gimple_seq finally = gimple_try_cleanup (tf->top_p);
994 
995   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
996      It does need to be handled here, so as to separate the (different)
997      EH path from the normal path.  But we should not attempt to wrap
998      it with a must-not-throw node (which indeed gets in the way).  */
999   if (geh_else *eh_else = get_eh_else (finally))
1000     {
1001       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1002       finally = gimple_eh_else_e_body (eh_else);
1003 
1004       /* Let the ELSE see the exception that's being processed.  */
1005       eh_region save_ehp = this_state->ehp_region;
1006       this_state->ehp_region = this_state->cur_region;
1007       lower_eh_constructs_1 (this_state, &finally);
1008       this_state->ehp_region = save_ehp;
1009     }
1010   else
1011     {
1012       /* First check for nothing to do.  */
1013       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1014 	return;
1015       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1016       if (actions == NULL)
1017 	return;
1018 
1019       if (this_state)
1020 	finally = lower_try_finally_dup_block (finally, outer_state,
1021 	  gimple_location (tf->try_finally_expr));
1022 
1023       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1024 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1025 	 to be in an enclosing scope, but needs to be implemented at this level
1026 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1027 	 cp/decl.c).  Since it's logically at an outer level, we should call
1028 	 terminate before we get to it, so strip it away before adding the
1029 	 MUST_NOT_THROW filter.  */
1030       gimple_stmt_iterator gsi = gsi_start (finally);
1031       gimple *x = gsi_stmt (gsi);
1032       if (gimple_code (x) == GIMPLE_TRY
1033 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1034 	  && gimple_try_catch_is_cleanup (x))
1035 	{
1036 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1037 	  gsi_remove (&gsi, false);
1038 	}
1039 
1040       /* Wrap the block with protect_cleanup_actions as the action.  */
1041       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1042       gtry *try_stmt = gimple_build_try (finally,
1043 					 gimple_seq_alloc_with_stmt (eh_mnt),
1044 					 GIMPLE_TRY_CATCH);
1045       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1046     }
1047 
1048   /* Drop all of this into the exception sequence.  */
1049   emit_post_landing_pad (&eh_seq, tf->region);
1050   gimple_seq_add_seq (&eh_seq, finally);
1051   if (gimple_seq_may_fallthru (finally))
1052     emit_resx (&eh_seq, tf->region);
1053 
1054   /* Having now been handled, EH isn't to be considered with
1055      the rest of the outgoing edges.  */
1056   tf->may_throw = false;
1057 }
1058 
1059 /* A subroutine of lower_try_finally.  We have determined that there is
1060    no fallthru edge out of the finally block.  This means that there is
1061    no outgoing edge corresponding to any incoming edge.  Restructure the
1062    try_finally node for this special case.  */
1063 
1064 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1065 lower_try_finally_nofallthru (struct leh_state *state,
1066 			      struct leh_tf_state *tf)
1067 {
1068   tree lab;
1069   gimple *x;
1070   geh_else *eh_else;
1071   gimple_seq finally;
1072   struct goto_queue_node *q, *qe;
1073 
1074   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1075 
1076   /* We expect that tf->top_p is a GIMPLE_TRY. */
1077   finally = gimple_try_cleanup (tf->top_p);
1078   tf->top_p_seq = gimple_try_eval (tf->top_p);
1079 
1080   x = gimple_build_label (lab);
1081   gimple_seq_add_stmt (&tf->top_p_seq, x);
1082 
1083   q = tf->goto_queue;
1084   qe = q + tf->goto_queue_active;
1085   for (; q < qe; ++q)
1086     if (q->index < 0)
1087       do_return_redirection (q, lab, NULL);
1088     else
1089       do_goto_redirection (q, lab, NULL, tf);
1090 
1091   replace_goto_queue (tf);
1092 
1093   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1094   eh_else = get_eh_else (finally);
1095   if (eh_else)
1096     {
1097       finally = gimple_eh_else_n_body (eh_else);
1098       lower_eh_constructs_1 (state, &finally);
1099       gimple_seq_add_seq (&tf->top_p_seq, finally);
1100 
1101       if (tf->may_throw)
1102 	{
1103 	  finally = gimple_eh_else_e_body (eh_else);
1104 	  lower_eh_constructs_1 (state, &finally);
1105 
1106 	  emit_post_landing_pad (&eh_seq, tf->region);
1107 	  gimple_seq_add_seq (&eh_seq, finally);
1108 	}
1109     }
1110   else
1111     {
1112       lower_eh_constructs_1 (state, &finally);
1113       gimple_seq_add_seq (&tf->top_p_seq, finally);
1114 
1115       if (tf->may_throw)
1116 	{
1117 	  emit_post_landing_pad (&eh_seq, tf->region);
1118 
1119 	  x = gimple_build_goto (lab);
1120 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1121 	  gimple_seq_add_stmt (&eh_seq, x);
1122 	}
1123     }
1124 }
1125 
1126 /* A subroutine of lower_try_finally.  We have determined that there is
1127    exactly one destination of the finally block.  Restructure the
1128    try_finally node for this special case.  */
1129 
1130 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1131 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1132 {
1133   struct goto_queue_node *q, *qe;
1134   geh_else *eh_else;
1135   glabel *label_stmt;
1136   gimple *x;
1137   gimple_seq finally;
1138   gimple_stmt_iterator gsi;
1139   tree finally_label;
1140   location_t loc = gimple_location (tf->try_finally_expr);
1141 
1142   finally = gimple_try_cleanup (tf->top_p);
1143   tf->top_p_seq = gimple_try_eval (tf->top_p);
1144 
1145   /* Since there's only one destination, and the destination edge can only
1146      either be EH or non-EH, that implies that all of our incoming edges
1147      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1148   eh_else = get_eh_else (finally);
1149   if (eh_else)
1150     {
1151       if (tf->may_throw)
1152 	finally = gimple_eh_else_e_body (eh_else);
1153       else
1154 	finally = gimple_eh_else_n_body (eh_else);
1155     }
1156 
1157   lower_eh_constructs_1 (state, &finally);
1158 
1159   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1160     {
1161       gimple *stmt = gsi_stmt (gsi);
1162       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1163 	{
1164 	  tree block = gimple_block (stmt);
1165 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1166 	  gimple_set_block (stmt, block);
1167 	}
1168     }
1169 
1170   if (tf->may_throw)
1171     {
1172       /* Only reachable via the exception edge.  Add the given label to
1173          the head of the FINALLY block.  Append a RESX at the end.  */
1174       emit_post_landing_pad (&eh_seq, tf->region);
1175       gimple_seq_add_seq (&eh_seq, finally);
1176       emit_resx (&eh_seq, tf->region);
1177       return;
1178     }
1179 
1180   if (tf->may_fallthru)
1181     {
1182       /* Only reachable via the fallthru edge.  Do nothing but let
1183 	 the two blocks run together; we'll fall out the bottom.  */
1184       gimple_seq_add_seq (&tf->top_p_seq, finally);
1185       return;
1186     }
1187 
1188   finally_label = create_artificial_label (loc);
1189   label_stmt = gimple_build_label (finally_label);
1190   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1191 
1192   gimple_seq_add_seq (&tf->top_p_seq, finally);
1193 
1194   q = tf->goto_queue;
1195   qe = q + tf->goto_queue_active;
1196 
1197   if (tf->may_return)
1198     {
1199       /* Reachable by return expressions only.  Redirect them.  */
1200       for (; q < qe; ++q)
1201 	do_return_redirection (q, finally_label, NULL);
1202       replace_goto_queue (tf);
1203     }
1204   else
1205     {
1206       /* Reachable by goto expressions only.  Redirect them.  */
1207       for (; q < qe; ++q)
1208 	do_goto_redirection (q, finally_label, NULL, tf);
1209       replace_goto_queue (tf);
1210 
1211       if (tf->dest_array[0] == tf->fallthru_label)
1212 	{
1213 	  /* Reachable by goto to fallthru label only.  Redirect it
1214 	     to the new label (already created, sadly), and do not
1215 	     emit the final branch out, or the fallthru label.  */
1216 	  tf->fallthru_label = NULL;
1217 	  return;
1218 	}
1219     }
1220 
1221   /* Place the original return/goto to the original destination
1222      immediately after the finally block. */
1223   x = tf->goto_queue[0].cont_stmt;
1224   gimple_seq_add_stmt (&tf->top_p_seq, x);
1225   maybe_record_in_goto_queue (state, x);
1226 }
1227 
1228 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1229    and outgoing from the finally block.  Implement this by duplicating the
1230    finally block for every destination.  */
1231 
1232 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1233 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1234 {
1235   gimple_seq finally;
1236   gimple_seq new_stmt;
1237   gimple_seq seq;
1238   gimple *x;
1239   geh_else *eh_else;
1240   tree tmp;
1241   location_t tf_loc = gimple_location (tf->try_finally_expr);
1242 
1243   finally = gimple_try_cleanup (tf->top_p);
1244 
1245   /* Notice EH_ELSE, and simplify some of the remaining code
1246      by considering FINALLY to be the normal return path only.  */
1247   eh_else = get_eh_else (finally);
1248   if (eh_else)
1249     finally = gimple_eh_else_n_body (eh_else);
1250 
1251   tf->top_p_seq = gimple_try_eval (tf->top_p);
1252   new_stmt = NULL;
1253 
1254   if (tf->may_fallthru)
1255     {
1256       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1257       lower_eh_constructs_1 (state, &seq);
1258       gimple_seq_add_seq (&new_stmt, seq);
1259 
1260       tmp = lower_try_finally_fallthru_label (tf);
1261       x = gimple_build_goto (tmp);
1262       gimple_set_location (x, tf_loc);
1263       gimple_seq_add_stmt (&new_stmt, x);
1264     }
1265 
1266   if (tf->may_throw)
1267     {
1268       /* We don't need to copy the EH path of EH_ELSE,
1269 	 since it is only emitted once.  */
1270       if (eh_else)
1271 	seq = gimple_eh_else_e_body (eh_else);
1272       else
1273 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1274       lower_eh_constructs_1 (state, &seq);
1275 
1276       emit_post_landing_pad (&eh_seq, tf->region);
1277       gimple_seq_add_seq (&eh_seq, seq);
1278       emit_resx (&eh_seq, tf->region);
1279     }
1280 
1281   if (tf->goto_queue)
1282     {
1283       struct goto_queue_node *q, *qe;
1284       int return_index, index;
1285       struct labels_s
1286       {
1287 	struct goto_queue_node *q;
1288 	tree label;
1289       } *labels;
1290 
1291       return_index = tf->dest_array.length ();
1292       labels = XCNEWVEC (struct labels_s, return_index + 1);
1293 
1294       q = tf->goto_queue;
1295       qe = q + tf->goto_queue_active;
1296       for (; q < qe; q++)
1297 	{
1298 	  index = q->index < 0 ? return_index : q->index;
1299 
1300 	  if (!labels[index].q)
1301 	    labels[index].q = q;
1302 	}
1303 
1304       for (index = 0; index < return_index + 1; index++)
1305 	{
1306 	  tree lab;
1307 
1308 	  q = labels[index].q;
1309 	  if (! q)
1310 	    continue;
1311 
1312 	  lab = labels[index].label
1313 	    = create_artificial_label (tf_loc);
1314 
1315 	  if (index == return_index)
1316 	    do_return_redirection (q, lab, NULL);
1317 	  else
1318 	    do_goto_redirection (q, lab, NULL, tf);
1319 
1320 	  x = gimple_build_label (lab);
1321           gimple_seq_add_stmt (&new_stmt, x);
1322 
1323 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1324 	  lower_eh_constructs_1 (state, &seq);
1325           gimple_seq_add_seq (&new_stmt, seq);
1326 
1327           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1328 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1329 	}
1330 
1331       for (q = tf->goto_queue; q < qe; q++)
1332 	{
1333 	  tree lab;
1334 
1335 	  index = q->index < 0 ? return_index : q->index;
1336 
1337 	  if (labels[index].q == q)
1338 	    continue;
1339 
1340 	  lab = labels[index].label;
1341 
1342 	  if (index == return_index)
1343 	    do_return_redirection (q, lab, NULL);
1344 	  else
1345 	    do_goto_redirection (q, lab, NULL, tf);
1346 	}
1347 
1348       replace_goto_queue (tf);
1349       free (labels);
1350     }
1351 
1352   /* Need to link new stmts after running replace_goto_queue due
1353      to not wanting to process the same goto stmts twice.  */
1354   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1355 }
1356 
1357 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1358    and outgoing from the finally block.  Implement this by instrumenting
1359    each incoming edge and creating a switch statement at the end of the
1360    finally block that branches to the appropriate destination.  */
1361 
1362 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1363 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1364 {
1365   struct goto_queue_node *q, *qe;
1366   tree finally_tmp, finally_label;
1367   int return_index, eh_index, fallthru_index;
1368   int nlabels, ndests, j, last_case_index;
1369   tree last_case;
1370   auto_vec<tree> case_label_vec;
1371   gimple_seq switch_body = NULL;
1372   gimple *x;
1373   geh_else *eh_else;
1374   tree tmp;
1375   gimple *switch_stmt;
1376   gimple_seq finally;
1377   hash_map<tree, gimple *> *cont_map = NULL;
1378   /* The location of the TRY_FINALLY stmt.  */
1379   location_t tf_loc = gimple_location (tf->try_finally_expr);
1380   /* The location of the finally block.  */
1381   location_t finally_loc;
1382 
1383   finally = gimple_try_cleanup (tf->top_p);
1384   eh_else = get_eh_else (finally);
1385 
1386   /* Mash the TRY block to the head of the chain.  */
1387   tf->top_p_seq = gimple_try_eval (tf->top_p);
1388 
1389   /* The location of the finally is either the last stmt in the finally
1390      block or the location of the TRY_FINALLY itself.  */
1391   x = gimple_seq_last_stmt (finally);
1392   finally_loc = x ? gimple_location (x) : tf_loc;
1393 
1394   /* Prepare for switch statement generation.  */
1395   nlabels = tf->dest_array.length ();
1396   return_index = nlabels;
1397   eh_index = return_index + tf->may_return;
1398   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1399   ndests = fallthru_index + tf->may_fallthru;
1400 
1401   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1402   finally_label = create_artificial_label (finally_loc);
1403 
1404   /* We use vec::quick_push on case_label_vec throughout this function,
1405      since we know the size in advance and allocate precisely as muce
1406      space as needed.  */
1407   case_label_vec.create (ndests);
1408   last_case = NULL;
1409   last_case_index = 0;
1410 
1411   /* Begin inserting code for getting to the finally block.  Things
1412      are done in this order to correspond to the sequence the code is
1413      laid out.  */
1414 
1415   if (tf->may_fallthru)
1416     {
1417       x = gimple_build_assign (finally_tmp,
1418 			       build_int_cst (integer_type_node,
1419 					      fallthru_index));
1420       gimple_set_location (x, finally_loc);
1421       gimple_seq_add_stmt (&tf->top_p_seq, x);
1422 
1423       tmp = build_int_cst (integer_type_node, fallthru_index);
1424       last_case = build_case_label (tmp, NULL,
1425 				    create_artificial_label (finally_loc));
1426       case_label_vec.quick_push (last_case);
1427       last_case_index++;
1428 
1429       x = gimple_build_label (CASE_LABEL (last_case));
1430       gimple_seq_add_stmt (&switch_body, x);
1431 
1432       tmp = lower_try_finally_fallthru_label (tf);
1433       x = gimple_build_goto (tmp);
1434       gimple_set_location (x, finally_loc);
1435       gimple_seq_add_stmt (&switch_body, x);
1436     }
1437 
1438   /* For EH_ELSE, emit the exception path (plus resx) now, then
1439      subsequently we only need consider the normal path.  */
1440   if (eh_else)
1441     {
1442       if (tf->may_throw)
1443 	{
1444 	  finally = gimple_eh_else_e_body (eh_else);
1445 	  lower_eh_constructs_1 (state, &finally);
1446 
1447 	  emit_post_landing_pad (&eh_seq, tf->region);
1448 	  gimple_seq_add_seq (&eh_seq, finally);
1449 	  emit_resx (&eh_seq, tf->region);
1450 	}
1451 
1452       finally = gimple_eh_else_n_body (eh_else);
1453     }
1454   else if (tf->may_throw)
1455     {
1456       emit_post_landing_pad (&eh_seq, tf->region);
1457 
1458       x = gimple_build_assign (finally_tmp,
1459 			       build_int_cst (integer_type_node, eh_index));
1460       gimple_seq_add_stmt (&eh_seq, x);
1461 
1462       x = gimple_build_goto (finally_label);
1463       gimple_set_location (x, tf_loc);
1464       gimple_seq_add_stmt (&eh_seq, x);
1465 
1466       tmp = build_int_cst (integer_type_node, eh_index);
1467       last_case = build_case_label (tmp, NULL,
1468 				    create_artificial_label (tf_loc));
1469       case_label_vec.quick_push (last_case);
1470       last_case_index++;
1471 
1472       x = gimple_build_label (CASE_LABEL (last_case));
1473       gimple_seq_add_stmt (&eh_seq, x);
1474       emit_resx (&eh_seq, tf->region);
1475     }
1476 
1477   x = gimple_build_label (finally_label);
1478   gimple_seq_add_stmt (&tf->top_p_seq, x);
1479 
1480   lower_eh_constructs_1 (state, &finally);
1481   gimple_seq_add_seq (&tf->top_p_seq, finally);
1482 
1483   /* Redirect each incoming goto edge.  */
1484   q = tf->goto_queue;
1485   qe = q + tf->goto_queue_active;
1486   j = last_case_index + tf->may_return;
1487   /* Prepare the assignments to finally_tmp that are executed upon the
1488      entrance through a particular edge. */
1489   for (; q < qe; ++q)
1490     {
1491       gimple_seq mod = NULL;
1492       int switch_id;
1493       unsigned int case_index;
1494 
1495       if (q->index < 0)
1496 	{
1497 	  x = gimple_build_assign (finally_tmp,
1498 				   build_int_cst (integer_type_node,
1499 						  return_index));
1500 	  gimple_seq_add_stmt (&mod, x);
1501 	  do_return_redirection (q, finally_label, mod);
1502 	  switch_id = return_index;
1503 	}
1504       else
1505 	{
1506 	  x = gimple_build_assign (finally_tmp,
1507 				   build_int_cst (integer_type_node, q->index));
1508 	  gimple_seq_add_stmt (&mod, x);
1509 	  do_goto_redirection (q, finally_label, mod, tf);
1510 	  switch_id = q->index;
1511 	}
1512 
1513       case_index = j + q->index;
1514       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1515         {
1516           tree case_lab;
1517 	  tmp = build_int_cst (integer_type_node, switch_id);
1518           case_lab = build_case_label (tmp, NULL,
1519 				       create_artificial_label (tf_loc));
1520           /* We store the cont_stmt in the pointer map, so that we can recover
1521              it in the loop below.  */
1522           if (!cont_map)
1523 	    cont_map = new hash_map<tree, gimple *>;
1524           cont_map->put (case_lab, q->cont_stmt);
1525           case_label_vec.quick_push (case_lab);
1526         }
1527     }
1528   for (j = last_case_index; j < last_case_index + nlabels; j++)
1529     {
1530       gimple *cont_stmt;
1531 
1532       last_case = case_label_vec[j];
1533 
1534       gcc_assert (last_case);
1535       gcc_assert (cont_map);
1536 
1537       cont_stmt = *cont_map->get (last_case);
1538 
1539       x = gimple_build_label (CASE_LABEL (last_case));
1540       gimple_seq_add_stmt (&switch_body, x);
1541       gimple_seq_add_stmt (&switch_body, cont_stmt);
1542       maybe_record_in_goto_queue (state, cont_stmt);
1543     }
1544   if (cont_map)
1545     delete cont_map;
1546 
1547   replace_goto_queue (tf);
1548 
1549   /* Make sure that the last case is the default label, as one is required.
1550      Then sort the labels, which is also required in GIMPLE.  */
1551   CASE_LOW (last_case) = NULL;
1552   tree tem = case_label_vec.pop ();
1553   gcc_assert (tem == last_case);
1554   sort_case_labels (case_label_vec);
1555 
1556   /* Build the switch statement, setting last_case to be the default
1557      label.  */
1558   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1559 				     case_label_vec);
1560   gimple_set_location (switch_stmt, finally_loc);
1561 
1562   /* Need to link SWITCH_STMT after running replace_goto_queue
1563      due to not wanting to process the same goto stmts twice.  */
1564   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1565   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1566 }
1567 
1568 /* Decide whether or not we are going to duplicate the finally block.
1569    There are several considerations.
1570 
1571    Second, we'd like to prevent egregious code growth.  One way to
1572    do this is to estimate the size of the finally block, multiply
1573    that by the number of copies we'd need to make, and compare against
1574    the estimate of the size of the switch machinery we'd have to add.  */
1575 
1576 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1577 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1578 {
1579   int f_estimate, sw_estimate;
1580   geh_else *eh_else;
1581 
1582   /* If there's an EH_ELSE involved, the exception path is separate
1583      and really doesn't come into play for this computation.  */
1584   eh_else = get_eh_else (finally);
1585   if (eh_else)
1586     {
1587       ndests -= may_throw;
1588       finally = gimple_eh_else_n_body (eh_else);
1589     }
1590 
1591   if (!optimize)
1592     {
1593       gimple_stmt_iterator gsi;
1594 
1595       if (ndests == 1)
1596         return true;
1597 
1598       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1599 	{
1600 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1601 	     on the EH paths and, consequently, useless cleanups.  */
1602 	  gimple *stmt = gsi_stmt (gsi);
1603 	  if (!is_gimple_debug (stmt)
1604 	      && !gimple_clobber_p (stmt)
1605 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1606 	    return false;
1607 	}
1608       return true;
1609     }
1610 
1611   /* Finally estimate N times, plus N gotos.  */
1612   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1613   f_estimate = (f_estimate + 1) * ndests;
1614 
1615   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1616   sw_estimate = 10 + 2 * ndests;
1617 
1618   /* Optimize for size clearly wants our best guess.  */
1619   if (optimize_function_for_size_p (cfun))
1620     return f_estimate < sw_estimate;
1621 
1622   /* ??? These numbers are completely made up so far.  */
1623   if (optimize > 1)
1624     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1625   else
1626     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1627 }
1628 
1629 /* REG is the enclosing region for a possible cleanup region, or the region
1630    itself.  Returns TRUE if such a region would be unreachable.
1631 
1632    Cleanup regions within a must-not-throw region aren't actually reachable
1633    even if there are throwing stmts within them, because the personality
1634    routine will call terminate before unwinding.  */
1635 
1636 static bool
cleanup_is_dead_in(eh_region reg)1637 cleanup_is_dead_in (eh_region reg)
1638 {
1639   while (reg && reg->type == ERT_CLEANUP)
1640     reg = reg->outer;
1641   return (reg && reg->type == ERT_MUST_NOT_THROW);
1642 }
1643 
1644 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1645    to a sequence of labels and blocks, plus the exception region trees
1646    that record all the magic.  This is complicated by the need to
1647    arrange for the FINALLY block to be executed on all exits.  */
1648 
1649 static gimple_seq
lower_try_finally(struct leh_state * state,gtry * tp)1650 lower_try_finally (struct leh_state *state, gtry *tp)
1651 {
1652   struct leh_tf_state this_tf;
1653   struct leh_state this_state;
1654   int ndests;
1655   gimple_seq old_eh_seq;
1656 
1657   /* Process the try block.  */
1658 
1659   memset (&this_tf, 0, sizeof (this_tf));
1660   this_tf.try_finally_expr = tp;
1661   this_tf.top_p = tp;
1662   this_tf.outer = state;
1663   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1664     {
1665       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1666       this_state.cur_region = this_tf.region;
1667     }
1668   else
1669     {
1670       this_tf.region = NULL;
1671       this_state.cur_region = state->cur_region;
1672     }
1673 
1674   this_state.ehp_region = state->ehp_region;
1675   this_state.tf = &this_tf;
1676 
1677   old_eh_seq = eh_seq;
1678   eh_seq = NULL;
1679 
1680   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1681 
1682   /* Determine if the try block is escaped through the bottom.  */
1683   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1684 
1685   /* Determine if any exceptions are possible within the try block.  */
1686   if (this_tf.region)
1687     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1688   if (this_tf.may_throw)
1689     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1690 
1691   /* Determine how many edges (still) reach the finally block.  Or rather,
1692      how many destinations are reached by the finally block.  Use this to
1693      determine how we process the finally block itself.  */
1694 
1695   ndests = this_tf.dest_array.length ();
1696   ndests += this_tf.may_fallthru;
1697   ndests += this_tf.may_return;
1698   ndests += this_tf.may_throw;
1699 
1700   /* If the FINALLY block is not reachable, dike it out.  */
1701   if (ndests == 0)
1702     {
1703       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1704       gimple_try_set_cleanup (tp, NULL);
1705     }
1706   /* If the finally block doesn't fall through, then any destination
1707      we might try to impose there isn't reached either.  There may be
1708      some minor amount of cleanup and redirection still needed.  */
1709   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1710     lower_try_finally_nofallthru (state, &this_tf);
1711 
1712   /* We can easily special-case redirection to a single destination.  */
1713   else if (ndests == 1)
1714     lower_try_finally_onedest (state, &this_tf);
1715   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1716 				    gimple_try_cleanup (tp)))
1717     lower_try_finally_copy (state, &this_tf);
1718   else
1719     lower_try_finally_switch (state, &this_tf);
1720 
1721   /* If someone requested we add a label at the end of the transformed
1722      block, do so.  */
1723   if (this_tf.fallthru_label)
1724     {
1725       /* This must be reached only if ndests == 0. */
1726       gimple *x = gimple_build_label (this_tf.fallthru_label);
1727       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1728     }
1729 
1730   this_tf.dest_array.release ();
1731   free (this_tf.goto_queue);
1732   if (this_tf.goto_queue_map)
1733     delete this_tf.goto_queue_map;
1734 
1735   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1736      If there was no old eh_seq, then the append is trivially already done.  */
1737   if (old_eh_seq)
1738     {
1739       if (eh_seq == NULL)
1740 	eh_seq = old_eh_seq;
1741       else
1742 	{
1743 	  gimple_seq new_eh_seq = eh_seq;
1744 	  eh_seq = old_eh_seq;
1745 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1746 	}
1747     }
1748 
1749   return this_tf.top_p_seq;
1750 }
1751 
1752 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1753    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1754    exception region trees that records all the magic.  */
1755 
1756 static gimple_seq
lower_catch(struct leh_state * state,gtry * tp)1757 lower_catch (struct leh_state *state, gtry *tp)
1758 {
1759   eh_region try_region = NULL;
1760   struct leh_state this_state = *state;
1761   gimple_stmt_iterator gsi;
1762   tree out_label;
1763   gimple_seq new_seq, cleanup;
1764   gimple *x;
1765   location_t try_catch_loc = gimple_location (tp);
1766 
1767   if (flag_exceptions)
1768     {
1769       try_region = gen_eh_region_try (state->cur_region);
1770       this_state.cur_region = try_region;
1771     }
1772 
1773   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1774 
1775   if (!eh_region_may_contain_throw (try_region))
1776     return gimple_try_eval (tp);
1777 
1778   new_seq = NULL;
1779   emit_eh_dispatch (&new_seq, try_region);
1780   emit_resx (&new_seq, try_region);
1781 
1782   this_state.cur_region = state->cur_region;
1783   this_state.ehp_region = try_region;
1784 
1785   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1786      itself, so that e.g. for coverage purposes the nested cleanups don't
1787      appear before the cleanup body.  See PR64634 for details.  */
1788   gimple_seq old_eh_seq = eh_seq;
1789   eh_seq = NULL;
1790 
1791   out_label = NULL;
1792   cleanup = gimple_try_cleanup (tp);
1793   for (gsi = gsi_start (cleanup);
1794        !gsi_end_p (gsi);
1795        gsi_next (&gsi))
1796     {
1797       eh_catch c;
1798       gcatch *catch_stmt;
1799       gimple_seq handler;
1800 
1801       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1802       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1803 
1804       handler = gimple_catch_handler (catch_stmt);
1805       lower_eh_constructs_1 (&this_state, &handler);
1806 
1807       c->label = create_artificial_label (UNKNOWN_LOCATION);
1808       x = gimple_build_label (c->label);
1809       gimple_seq_add_stmt (&new_seq, x);
1810 
1811       gimple_seq_add_seq (&new_seq, handler);
1812 
1813       if (gimple_seq_may_fallthru (new_seq))
1814 	{
1815 	  if (!out_label)
1816 	    out_label = create_artificial_label (try_catch_loc);
1817 
1818 	  x = gimple_build_goto (out_label);
1819 	  gimple_seq_add_stmt (&new_seq, x);
1820 	}
1821       if (!c->type_list)
1822 	break;
1823     }
1824 
1825   gimple_try_set_cleanup (tp, new_seq);
1826 
1827   gimple_seq new_eh_seq = eh_seq;
1828   eh_seq = old_eh_seq;
1829   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1830   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1831   return ret_seq;
1832 }
1833 
1834 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1835    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1836    region trees that record all the magic.  */
1837 
1838 static gimple_seq
lower_eh_filter(struct leh_state * state,gtry * tp)1839 lower_eh_filter (struct leh_state *state, gtry *tp)
1840 {
1841   struct leh_state this_state = *state;
1842   eh_region this_region = NULL;
1843   gimple *inner, *x;
1844   gimple_seq new_seq;
1845 
1846   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1847 
1848   if (flag_exceptions)
1849     {
1850       this_region = gen_eh_region_allowed (state->cur_region,
1851 				           gimple_eh_filter_types (inner));
1852       this_state.cur_region = this_region;
1853     }
1854 
1855   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1856 
1857   if (!eh_region_may_contain_throw (this_region))
1858     return gimple_try_eval (tp);
1859 
1860   new_seq = NULL;
1861   this_state.cur_region = state->cur_region;
1862   this_state.ehp_region = this_region;
1863 
1864   emit_eh_dispatch (&new_seq, this_region);
1865   emit_resx (&new_seq, this_region);
1866 
1867   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1868   x = gimple_build_label (this_region->u.allowed.label);
1869   gimple_seq_add_stmt (&new_seq, x);
1870 
1871   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1872   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1873 
1874   gimple_try_set_cleanup (tp, new_seq);
1875 
1876   return frob_into_branch_around (tp, this_region, NULL);
1877 }
1878 
1879 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1880    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1881    plus the exception region trees that record all the magic.  */
1882 
1883 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gtry * tp)1884 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1885 {
1886   struct leh_state this_state = *state;
1887 
1888   if (flag_exceptions)
1889     {
1890       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1891       eh_region this_region;
1892 
1893       this_region = gen_eh_region_must_not_throw (state->cur_region);
1894       this_region->u.must_not_throw.failure_decl
1895 	= gimple_eh_must_not_throw_fndecl (
1896 	    as_a <geh_mnt *> (inner));
1897       this_region->u.must_not_throw.failure_loc
1898 	= LOCATION_LOCUS (gimple_location (tp));
1899 
1900       /* In order to get mangling applied to this decl, we must mark it
1901 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1902 	 needs to happen.  */
1903       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1904 
1905       this_state.cur_region = this_region;
1906     }
1907 
1908   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1909 
1910   return gimple_try_eval (tp);
1911 }
1912 
1913 /* Implement a cleanup expression.  This is similar to try-finally,
1914    except that we only execute the cleanup block for exception edges.  */
1915 
1916 static gimple_seq
lower_cleanup(struct leh_state * state,gtry * tp)1917 lower_cleanup (struct leh_state *state, gtry *tp)
1918 {
1919   struct leh_state this_state = *state;
1920   eh_region this_region = NULL;
1921   struct leh_tf_state fake_tf;
1922   gimple_seq result;
1923   bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1924 
1925   if (flag_exceptions && !cleanup_dead)
1926     {
1927       this_region = gen_eh_region_cleanup (state->cur_region);
1928       this_state.cur_region = this_region;
1929     }
1930 
1931   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1932 
1933   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1934     return gimple_try_eval (tp);
1935 
1936   /* Build enough of a try-finally state so that we can reuse
1937      honor_protect_cleanup_actions.  */
1938   memset (&fake_tf, 0, sizeof (fake_tf));
1939   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1940   fake_tf.outer = state;
1941   fake_tf.region = this_region;
1942   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1943   fake_tf.may_throw = true;
1944 
1945   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1946 
1947   if (fake_tf.may_throw)
1948     {
1949       /* In this case honor_protect_cleanup_actions had nothing to do,
1950 	 and we should process this normally.  */
1951       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1952       result = frob_into_branch_around (tp, this_region,
1953                                         fake_tf.fallthru_label);
1954     }
1955   else
1956     {
1957       /* In this case honor_protect_cleanup_actions did nearly all of
1958 	 the work.  All we have left is to append the fallthru_label.  */
1959 
1960       result = gimple_try_eval (tp);
1961       if (fake_tf.fallthru_label)
1962 	{
1963 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1964 	  gimple_seq_add_stmt (&result, x);
1965 	}
1966     }
1967   return result;
1968 }
1969 
1970 /* Main loop for lowering eh constructs. Also moves gsi to the next
1971    statement. */
1972 
1973 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)1974 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1975 {
1976   gimple_seq replace;
1977   gimple *x;
1978   gimple *stmt = gsi_stmt (*gsi);
1979 
1980   switch (gimple_code (stmt))
1981     {
1982     case GIMPLE_CALL:
1983       {
1984 	tree fndecl = gimple_call_fndecl (stmt);
1985 	tree rhs, lhs;
1986 
1987 	if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
1988 	  switch (DECL_FUNCTION_CODE (fndecl))
1989 	    {
1990 	    case BUILT_IN_EH_POINTER:
1991 	      /* The front end may have generated a call to
1992 		 __builtin_eh_pointer (0) within a catch region.  Replace
1993 		 this zero argument with the current catch region number.  */
1994 	      if (state->ehp_region)
1995 		{
1996 		  tree nr = build_int_cst (integer_type_node,
1997 					   state->ehp_region->index);
1998 		  gimple_call_set_arg (stmt, 0, nr);
1999 		}
2000 	      else
2001 		{
2002 		  /* The user has dome something silly.  Remove it.  */
2003 		  rhs = null_pointer_node;
2004 		  goto do_replace;
2005 		}
2006 	      break;
2007 
2008 	    case BUILT_IN_EH_FILTER:
2009 	      /* ??? This should never appear, but since it's a builtin it
2010 		 is accessible to abuse by users.  Just remove it and
2011 		 replace the use with the arbitrary value zero.  */
2012 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2013 	    do_replace:
2014 	      lhs = gimple_call_lhs (stmt);
2015 	      x = gimple_build_assign (lhs, rhs);
2016 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2017 	      /* FALLTHRU */
2018 
2019 	    case BUILT_IN_EH_COPY_VALUES:
2020 	      /* Likewise this should not appear.  Remove it.  */
2021 	      gsi_remove (gsi, true);
2022 	      return;
2023 
2024 	    default:
2025 	      break;
2026 	    }
2027       }
2028       /* FALLTHRU */
2029 
2030     case GIMPLE_ASSIGN:
2031       /* If the stmt can throw, use a new temporary for the assignment
2032          to a LHS.  This makes sure the old value of the LHS is
2033 	 available on the EH edge.  Only do so for statements that
2034 	 potentially fall through (no noreturn calls e.g.), otherwise
2035 	 this new assignment might create fake fallthru regions.  */
2036       if (stmt_could_throw_p (cfun, stmt)
2037 	  && gimple_has_lhs (stmt)
2038 	  && gimple_stmt_may_fallthru (stmt)
2039 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2040 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2041 	{
2042 	  tree lhs = gimple_get_lhs (stmt);
2043 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2044 	  gimple *s = gimple_build_assign (lhs, tmp);
2045 	  gimple_set_location (s, gimple_location (stmt));
2046 	  gimple_set_block (s, gimple_block (stmt));
2047 	  gimple_set_lhs (stmt, tmp);
2048 	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2049 	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2050 	    DECL_GIMPLE_REG_P (tmp) = 1;
2051 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2052 	}
2053       /* Look for things that can throw exceptions, and record them.  */
2054       if (state->cur_region && stmt_could_throw_p (cfun, stmt))
2055 	{
2056 	  record_stmt_eh_region (state->cur_region, stmt);
2057 	  note_eh_region_may_contain_throw (state->cur_region);
2058 	}
2059       break;
2060 
2061     case GIMPLE_COND:
2062     case GIMPLE_GOTO:
2063     case GIMPLE_RETURN:
2064       maybe_record_in_goto_queue (state, stmt);
2065       break;
2066 
2067     case GIMPLE_SWITCH:
2068       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2069       break;
2070 
2071     case GIMPLE_TRY:
2072       {
2073 	gtry *try_stmt = as_a <gtry *> (stmt);
2074 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2075 	  replace = lower_try_finally (state, try_stmt);
2076 	else
2077 	  {
2078 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2079 	    if (!x)
2080 	      {
2081 		replace = gimple_try_eval (try_stmt);
2082 		lower_eh_constructs_1 (state, &replace);
2083 	      }
2084 	    else
2085 	      switch (gimple_code (x))
2086 		{
2087 		case GIMPLE_CATCH:
2088 		  replace = lower_catch (state, try_stmt);
2089 		  break;
2090 		case GIMPLE_EH_FILTER:
2091 		  replace = lower_eh_filter (state, try_stmt);
2092 		  break;
2093 		case GIMPLE_EH_MUST_NOT_THROW:
2094 		  replace = lower_eh_must_not_throw (state, try_stmt);
2095 		  break;
2096 		case GIMPLE_EH_ELSE:
2097 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2098 		  gcc_unreachable ();
2099 		default:
2100 		  replace = lower_cleanup (state, try_stmt);
2101 		  break;
2102 		}
2103 	  }
2104       }
2105 
2106       /* Remove the old stmt and insert the transformed sequence
2107 	 instead. */
2108       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2109       gsi_remove (gsi, true);
2110 
2111       /* Return since we don't want gsi_next () */
2112       return;
2113 
2114     case GIMPLE_EH_ELSE:
2115       /* We should be eliminating this in lower_try_finally et al.  */
2116       gcc_unreachable ();
2117 
2118     default:
2119       /* A type, a decl, or some kind of statement that we're not
2120 	 interested in.  Don't walk them.  */
2121       break;
2122     }
2123 
2124   gsi_next (gsi);
2125 }
2126 
2127 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2128 
2129 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2130 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2131 {
2132   gimple_stmt_iterator gsi;
2133   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2134     lower_eh_constructs_2 (state, &gsi);
2135 }
2136 
2137 namespace {
2138 
2139 const pass_data pass_data_lower_eh =
2140 {
2141   GIMPLE_PASS, /* type */
2142   "eh", /* name */
2143   OPTGROUP_NONE, /* optinfo_flags */
2144   TV_TREE_EH, /* tv_id */
2145   PROP_gimple_lcf, /* properties_required */
2146   PROP_gimple_leh, /* properties_provided */
2147   0, /* properties_destroyed */
2148   0, /* todo_flags_start */
2149   0, /* todo_flags_finish */
2150 };
2151 
2152 class pass_lower_eh : public gimple_opt_pass
2153 {
2154 public:
pass_lower_eh(gcc::context * ctxt)2155   pass_lower_eh (gcc::context *ctxt)
2156     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2157   {}
2158 
2159   /* opt_pass methods: */
2160   virtual unsigned int execute (function *);
2161 
2162 }; // class pass_lower_eh
2163 
2164 unsigned int
execute(function * fun)2165 pass_lower_eh::execute (function *fun)
2166 {
2167   struct leh_state null_state;
2168   gimple_seq bodyp;
2169 
2170   bodyp = gimple_body (current_function_decl);
2171   if (bodyp == NULL)
2172     return 0;
2173 
2174   finally_tree = new hash_table<finally_tree_hasher> (31);
2175   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2176   memset (&null_state, 0, sizeof (null_state));
2177 
2178   collect_finally_tree_1 (bodyp, NULL);
2179   lower_eh_constructs_1 (&null_state, &bodyp);
2180   gimple_set_body (current_function_decl, bodyp);
2181 
2182   /* We assume there's a return statement, or something, at the end of
2183      the function, and thus ploping the EH sequence afterward won't
2184      change anything.  */
2185   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2186   gimple_seq_add_seq (&bodyp, eh_seq);
2187 
2188   /* We assume that since BODYP already existed, adding EH_SEQ to it
2189      didn't change its value, and we don't have to re-set the function.  */
2190   gcc_assert (bodyp == gimple_body (current_function_decl));
2191 
2192   delete finally_tree;
2193   finally_tree = NULL;
2194   BITMAP_FREE (eh_region_may_contain_throw_map);
2195   eh_seq = NULL;
2196 
2197   /* If this function needs a language specific EH personality routine
2198      and the frontend didn't already set one do so now.  */
2199   if (function_needs_eh_personality (fun) == eh_personality_lang
2200       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2201     DECL_FUNCTION_PERSONALITY (current_function_decl)
2202       = lang_hooks.eh_personality ();
2203 
2204   return 0;
2205 }
2206 
2207 } // anon namespace
2208 
2209 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2210 make_pass_lower_eh (gcc::context *ctxt)
2211 {
2212   return new pass_lower_eh (ctxt);
2213 }
2214 
2215 /* Create the multiple edges from an EH_DISPATCH statement to all of
2216    the possible handlers for its EH region.  Return true if there's
2217    no fallthru edge; false if there is.  */
2218 
2219 bool
make_eh_dispatch_edges(geh_dispatch * stmt)2220 make_eh_dispatch_edges (geh_dispatch *stmt)
2221 {
2222   eh_region r;
2223   eh_catch c;
2224   basic_block src, dst;
2225 
2226   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2227   src = gimple_bb (stmt);
2228 
2229   switch (r->type)
2230     {
2231     case ERT_TRY:
2232       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2233 	{
2234 	  dst = label_to_block (cfun, c->label);
2235 	  make_edge (src, dst, 0);
2236 
2237 	  /* A catch-all handler doesn't have a fallthru.  */
2238 	  if (c->type_list == NULL)
2239 	    return false;
2240 	}
2241       break;
2242 
2243     case ERT_ALLOWED_EXCEPTIONS:
2244       dst = label_to_block (cfun, r->u.allowed.label);
2245       make_edge (src, dst, 0);
2246       break;
2247 
2248     default:
2249       gcc_unreachable ();
2250     }
2251 
2252   return true;
2253 }
2254 
2255 /* Create the single EH edge from STMT to its nearest landing pad,
2256    if there is such a landing pad within the current function.  */
2257 
2258 void
make_eh_edges(gimple * stmt)2259 make_eh_edges (gimple *stmt)
2260 {
2261   basic_block src, dst;
2262   eh_landing_pad lp;
2263   int lp_nr;
2264 
2265   lp_nr = lookup_stmt_eh_lp (stmt);
2266   if (lp_nr <= 0)
2267     return;
2268 
2269   lp = get_eh_landing_pad_from_number (lp_nr);
2270   gcc_assert (lp != NULL);
2271 
2272   src = gimple_bb (stmt);
2273   dst = label_to_block (cfun, lp->post_landing_pad);
2274   make_edge (src, dst, EDGE_EH);
2275 }
2276 
2277 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2278    do not actually perform the final edge redirection.
2279 
2280    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2281    we intend to change the destination EH region as well; this means
2282    EH_LANDING_PAD_NR must already be set on the destination block label.
2283    If false, we're being called from generic cfg manipulation code and we
2284    should preserve our place within the region tree.  */
2285 
2286 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2287 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2288 {
2289   eh_landing_pad old_lp, new_lp;
2290   basic_block old_bb;
2291   gimple *throw_stmt;
2292   int old_lp_nr, new_lp_nr;
2293   tree old_label, new_label;
2294   edge_iterator ei;
2295   edge e;
2296 
2297   old_bb = edge_in->dest;
2298   old_label = gimple_block_label (old_bb);
2299   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2300   gcc_assert (old_lp_nr > 0);
2301   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2302 
2303   throw_stmt = last_stmt (edge_in->src);
2304   gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2305 
2306   new_label = gimple_block_label (new_bb);
2307 
2308   /* Look for an existing region that might be using NEW_BB already.  */
2309   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2310   if (new_lp_nr)
2311     {
2312       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2313       gcc_assert (new_lp);
2314 
2315       /* Unless CHANGE_REGION is true, the new and old landing pad
2316 	 had better be associated with the same EH region.  */
2317       gcc_assert (change_region || new_lp->region == old_lp->region);
2318     }
2319   else
2320     {
2321       new_lp = NULL;
2322       gcc_assert (!change_region);
2323     }
2324 
2325   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2326   FOR_EACH_EDGE (e, ei, old_bb->preds)
2327     if (e != edge_in && (e->flags & EDGE_EH))
2328       break;
2329 
2330   if (new_lp)
2331     {
2332       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2333 	 there's nothing to do with the EH tree.  If there are no more
2334 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2335 	 If CHANGE_REGION is true, then our caller is expecting to remove
2336 	 the landing pad.  */
2337       if (e == NULL && !change_region)
2338 	remove_eh_landing_pad (old_lp);
2339     }
2340   else
2341     {
2342       /* No correct landing pad exists.  If there are no more edges
2343 	 into OLD_LP, then we can simply re-use the existing landing pad.
2344 	 Otherwise, we have to create a new landing pad.  */
2345       if (e == NULL)
2346 	{
2347 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2348 	  new_lp = old_lp;
2349 	}
2350       else
2351 	new_lp = gen_eh_landing_pad (old_lp->region);
2352       new_lp->post_landing_pad = new_label;
2353       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2354     }
2355 
2356   /* Maybe move the throwing statement to the new region.  */
2357   if (old_lp != new_lp)
2358     {
2359       remove_stmt_from_eh_lp (throw_stmt);
2360       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2361     }
2362 }
2363 
2364 /* Redirect EH edge E to NEW_BB.  */
2365 
2366 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2367 redirect_eh_edge (edge edge_in, basic_block new_bb)
2368 {
2369   redirect_eh_edge_1 (edge_in, new_bb, false);
2370   return ssa_redirect_edge (edge_in, new_bb);
2371 }
2372 
2373 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2374    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2375    The actual edge update will happen in the caller.  */
2376 
2377 void
redirect_eh_dispatch_edge(geh_dispatch * stmt,edge e,basic_block new_bb)2378 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2379 {
2380   tree new_lab = gimple_block_label (new_bb);
2381   bool any_changed = false;
2382   basic_block old_bb;
2383   eh_region r;
2384   eh_catch c;
2385 
2386   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2387   switch (r->type)
2388     {
2389     case ERT_TRY:
2390       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2391 	{
2392 	  old_bb = label_to_block (cfun, c->label);
2393 	  if (old_bb == e->dest)
2394 	    {
2395 	      c->label = new_lab;
2396 	      any_changed = true;
2397 	    }
2398 	}
2399       break;
2400 
2401     case ERT_ALLOWED_EXCEPTIONS:
2402       old_bb = label_to_block (cfun, r->u.allowed.label);
2403       gcc_assert (old_bb == e->dest);
2404       r->u.allowed.label = new_lab;
2405       any_changed = true;
2406       break;
2407 
2408     default:
2409       gcc_unreachable ();
2410     }
2411 
2412   gcc_assert (any_changed);
2413 }
2414 
2415 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2416 
2417 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2418 operation_could_trap_helper_p (enum tree_code op,
2419 			       bool fp_operation,
2420 			       bool honor_trapv,
2421 			       bool honor_nans,
2422 			       bool honor_snans,
2423 			       tree divisor,
2424 			       bool *handled)
2425 {
2426   *handled = true;
2427   switch (op)
2428     {
2429     case TRUNC_DIV_EXPR:
2430     case CEIL_DIV_EXPR:
2431     case FLOOR_DIV_EXPR:
2432     case ROUND_DIV_EXPR:
2433     case EXACT_DIV_EXPR:
2434     case CEIL_MOD_EXPR:
2435     case FLOOR_MOD_EXPR:
2436     case ROUND_MOD_EXPR:
2437     case TRUNC_MOD_EXPR:
2438     case RDIV_EXPR:
2439       if (honor_snans)
2440 	return true;
2441       if (fp_operation)
2442 	return flag_trapping_math;
2443       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2444         return true;
2445       return false;
2446 
2447     case LT_EXPR:
2448     case LE_EXPR:
2449     case GT_EXPR:
2450     case GE_EXPR:
2451     case LTGT_EXPR:
2452       /* Some floating point comparisons may trap.  */
2453       return honor_nans;
2454 
2455     case EQ_EXPR:
2456     case NE_EXPR:
2457     case UNORDERED_EXPR:
2458     case ORDERED_EXPR:
2459     case UNLT_EXPR:
2460     case UNLE_EXPR:
2461     case UNGT_EXPR:
2462     case UNGE_EXPR:
2463     case UNEQ_EXPR:
2464       return honor_snans;
2465 
2466     case NEGATE_EXPR:
2467     case ABS_EXPR:
2468     case CONJ_EXPR:
2469       /* These operations don't trap with floating point.  */
2470       if (honor_trapv)
2471 	return true;
2472       return false;
2473 
2474     case ABSU_EXPR:
2475       /* ABSU_EXPR never traps.  */
2476       return false;
2477 
2478     case PLUS_EXPR:
2479     case MINUS_EXPR:
2480     case MULT_EXPR:
2481       /* Any floating arithmetic may trap.  */
2482       if (fp_operation && flag_trapping_math)
2483 	return true;
2484       if (honor_trapv)
2485 	return true;
2486       return false;
2487 
2488     case COMPLEX_EXPR:
2489     case CONSTRUCTOR:
2490       /* Constructing an object cannot trap.  */
2491       return false;
2492 
2493     default:
2494       /* Any floating arithmetic may trap.  */
2495       if (fp_operation && flag_trapping_math)
2496 	return true;
2497 
2498       *handled = false;
2499       return false;
2500     }
2501 }
2502 
2503 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2504    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2505    type operands that may trap.  If OP is a division operator, DIVISOR contains
2506    the value of the divisor.  */
2507 
2508 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2509 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2510 			tree divisor)
2511 {
2512   bool honor_nans = (fp_operation && flag_trapping_math
2513 		     && !flag_finite_math_only);
2514   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2515   bool handled;
2516 
2517   if (TREE_CODE_CLASS (op) != tcc_comparison
2518       && TREE_CODE_CLASS (op) != tcc_unary
2519       && TREE_CODE_CLASS (op) != tcc_binary)
2520     return false;
2521 
2522   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2523 					honor_nans, honor_snans, divisor,
2524 					&handled);
2525 }
2526 
2527 
2528 /* Returns true if it is possible to prove that the index of
2529    an array access REF (an ARRAY_REF expression) falls into the
2530    array bounds.  */
2531 
2532 static bool
in_array_bounds_p(tree ref)2533 in_array_bounds_p (tree ref)
2534 {
2535   tree idx = TREE_OPERAND (ref, 1);
2536   tree min, max;
2537 
2538   if (TREE_CODE (idx) != INTEGER_CST)
2539     return false;
2540 
2541   min = array_ref_low_bound (ref);
2542   max = array_ref_up_bound (ref);
2543   if (!min
2544       || !max
2545       || TREE_CODE (min) != INTEGER_CST
2546       || TREE_CODE (max) != INTEGER_CST)
2547     return false;
2548 
2549   if (tree_int_cst_lt (idx, min)
2550       || tree_int_cst_lt (max, idx))
2551     return false;
2552 
2553   return true;
2554 }
2555 
2556 /* Returns true if it is possible to prove that the range of
2557    an array access REF (an ARRAY_RANGE_REF expression) falls
2558    into the array bounds.  */
2559 
2560 static bool
range_in_array_bounds_p(tree ref)2561 range_in_array_bounds_p (tree ref)
2562 {
2563   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2564   tree range_min, range_max, min, max;
2565 
2566   range_min = TYPE_MIN_VALUE (domain_type);
2567   range_max = TYPE_MAX_VALUE (domain_type);
2568   if (!range_min
2569       || !range_max
2570       || TREE_CODE (range_min) != INTEGER_CST
2571       || TREE_CODE (range_max) != INTEGER_CST)
2572     return false;
2573 
2574   min = array_ref_low_bound (ref);
2575   max = array_ref_up_bound (ref);
2576   if (!min
2577       || !max
2578       || TREE_CODE (min) != INTEGER_CST
2579       || TREE_CODE (max) != INTEGER_CST)
2580     return false;
2581 
2582   if (tree_int_cst_lt (range_min, min)
2583       || tree_int_cst_lt (max, range_max))
2584     return false;
2585 
2586   return true;
2587 }
2588 
2589 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2590    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2591    This routine expects only GIMPLE lhs or rhs input.  */
2592 
2593 bool
tree_could_trap_p(tree expr)2594 tree_could_trap_p (tree expr)
2595 {
2596   enum tree_code code;
2597   bool fp_operation = false;
2598   bool honor_trapv = false;
2599   tree t, base, div = NULL_TREE;
2600 
2601   if (!expr)
2602     return false;
2603 
2604   code = TREE_CODE (expr);
2605   t = TREE_TYPE (expr);
2606 
2607   if (t)
2608     {
2609       if (COMPARISON_CLASS_P (expr))
2610 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2611       else
2612 	fp_operation = FLOAT_TYPE_P (t);
2613       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2614     }
2615 
2616   if (TREE_CODE_CLASS (code) == tcc_binary)
2617     div = TREE_OPERAND (expr, 1);
2618   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2619     return true;
2620 
2621  restart:
2622   switch (code)
2623     {
2624     case COMPONENT_REF:
2625     case REALPART_EXPR:
2626     case IMAGPART_EXPR:
2627     case BIT_FIELD_REF:
2628     case VIEW_CONVERT_EXPR:
2629     case WITH_SIZE_EXPR:
2630       expr = TREE_OPERAND (expr, 0);
2631       code = TREE_CODE (expr);
2632       goto restart;
2633 
2634     case ARRAY_RANGE_REF:
2635       base = TREE_OPERAND (expr, 0);
2636       if (tree_could_trap_p (base))
2637 	return true;
2638       if (TREE_THIS_NOTRAP (expr))
2639 	return false;
2640       return !range_in_array_bounds_p (expr);
2641 
2642     case ARRAY_REF:
2643       base = TREE_OPERAND (expr, 0);
2644       if (tree_could_trap_p (base))
2645 	return true;
2646       if (TREE_THIS_NOTRAP (expr))
2647 	return false;
2648       return !in_array_bounds_p (expr);
2649 
2650     case TARGET_MEM_REF:
2651     case MEM_REF:
2652       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2653 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2654 	return true;
2655       if (TREE_THIS_NOTRAP (expr))
2656 	return false;
2657       /* We cannot prove that the access is in-bounds when we have
2658          variable-index TARGET_MEM_REFs.  */
2659       if (code == TARGET_MEM_REF
2660 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2661 	return true;
2662       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2663 	{
2664 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2665 	  poly_offset_int off = mem_ref_offset (expr);
2666 	  if (maybe_lt (off, 0))
2667 	    return true;
2668 	  if (TREE_CODE (base) == STRING_CST)
2669 	    return maybe_le (TREE_STRING_LENGTH (base), off);
2670 	  tree size = DECL_SIZE_UNIT (base);
2671 	  if (size == NULL_TREE
2672 	      || !poly_int_tree_p (size)
2673 	      || maybe_le (wi::to_poly_offset (size), off))
2674 	    return true;
2675 	  /* Now we are sure the first byte of the access is inside
2676 	     the object.  */
2677 	  return false;
2678 	}
2679       return true;
2680 
2681     case INDIRECT_REF:
2682       return !TREE_THIS_NOTRAP (expr);
2683 
2684     case ASM_EXPR:
2685       return TREE_THIS_VOLATILE (expr);
2686 
2687     case CALL_EXPR:
2688       t = get_callee_fndecl (expr);
2689       /* Assume that calls to weak functions may trap.  */
2690       if (!t || !DECL_P (t))
2691 	return true;
2692       if (DECL_WEAK (t))
2693 	return tree_could_trap_p (t);
2694       return false;
2695 
2696     case FUNCTION_DECL:
2697       /* Assume that accesses to weak functions may trap, unless we know
2698 	 they are certainly defined in current TU or in some other
2699 	 LTO partition.  */
2700       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2701 	{
2702 	  cgraph_node *node = cgraph_node::get (expr);
2703 	  if (node)
2704 	    node = node->function_symbol ();
2705 	  return !(node && node->in_other_partition);
2706 	}
2707       return false;
2708 
2709     case VAR_DECL:
2710       /* Assume that accesses to weak vars may trap, unless we know
2711 	 they are certainly defined in current TU or in some other
2712 	 LTO partition.  */
2713       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2714 	{
2715 	  varpool_node *node = varpool_node::get (expr);
2716 	  if (node)
2717 	    node = node->ultimate_alias_target ();
2718 	  return !(node && node->in_other_partition);
2719 	}
2720       return false;
2721 
2722     default:
2723       return false;
2724     }
2725 }
2726 
2727 /* Return non-NULL if there is an integer operation with trapping overflow
2728    we can rewrite into non-trapping.  Called via walk_tree from
2729    rewrite_to_non_trapping_overflow.  */
2730 
2731 static tree
find_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2732 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2733 {
2734   if (EXPR_P (*tp)
2735       && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2736       && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2737     return *tp;
2738   if (IS_TYPE_OR_DECL_P (*tp)
2739       || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2740     *walk_subtrees = 0;
2741   return NULL_TREE;
2742 }
2743 
2744 /* Rewrite selected operations into unsigned arithmetics, so that they
2745    don't trap on overflow.  */
2746 
2747 static tree
replace_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2748 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2749 {
2750   if (find_trapping_overflow (tp, walk_subtrees, data))
2751     {
2752       tree type = TREE_TYPE (*tp);
2753       tree utype = unsigned_type_for (type);
2754       *walk_subtrees = 0;
2755       int len = TREE_OPERAND_LENGTH (*tp);
2756       for (int i = 0; i < len; ++i)
2757 	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2758 		   data, (hash_set<tree> *) data);
2759 
2760       if (TREE_CODE (*tp) == ABS_EXPR)
2761 	{
2762 	  TREE_SET_CODE (*tp, ABSU_EXPR);
2763 	  TREE_TYPE (*tp) = utype;
2764 	  *tp = fold_convert (type, *tp);
2765 	}
2766       else
2767 	{
2768 	  TREE_TYPE (*tp) = utype;
2769 	  len = TREE_OPERAND_LENGTH (*tp);
2770 	  for (int i = 0; i < len; ++i)
2771 	    TREE_OPERAND (*tp, i)
2772 	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2773 	  *tp = fold_convert (type, *tp);
2774 	}
2775     }
2776   return NULL_TREE;
2777 }
2778 
2779 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2780    using unsigned arithmetics to avoid traps in it.  */
2781 
2782 tree
rewrite_to_non_trapping_overflow(tree expr)2783 rewrite_to_non_trapping_overflow (tree expr)
2784 {
2785   if (!flag_trapv)
2786     return expr;
2787   hash_set<tree> pset;
2788   if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2789     return expr;
2790   expr = unshare_expr (expr);
2791   pset.empty ();
2792   walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2793   return expr;
2794 }
2795 
2796 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2797    an assignment or a conditional) may throw.  */
2798 
2799 static bool
stmt_could_throw_1_p(gassign * stmt)2800 stmt_could_throw_1_p (gassign *stmt)
2801 {
2802   enum tree_code code = gimple_assign_rhs_code (stmt);
2803   bool honor_nans = false;
2804   bool honor_snans = false;
2805   bool fp_operation = false;
2806   bool honor_trapv = false;
2807   tree t;
2808   size_t i;
2809   bool handled, ret;
2810 
2811   if (TREE_CODE_CLASS (code) == tcc_comparison
2812       || TREE_CODE_CLASS (code) == tcc_unary
2813       || TREE_CODE_CLASS (code) == tcc_binary)
2814     {
2815       if (TREE_CODE_CLASS (code) == tcc_comparison)
2816 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2817       else
2818 	t = gimple_expr_type (stmt);
2819       fp_operation = FLOAT_TYPE_P (t);
2820       if (fp_operation)
2821 	{
2822 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2823 	  honor_snans = flag_signaling_nans != 0;
2824 	}
2825       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2826 	honor_trapv = true;
2827     }
2828 
2829   /* First check the LHS.  */
2830   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2831     return true;
2832 
2833   /* Check if the main expression may trap.  */
2834   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2835 				       honor_nans, honor_snans,
2836 				       gimple_assign_rhs2 (stmt),
2837 				       &handled);
2838   if (handled)
2839     return ret;
2840 
2841   /* If the expression does not trap, see if any of the individual operands may
2842      trap.  */
2843   for (i = 1; i < gimple_num_ops (stmt); i++)
2844     if (tree_could_trap_p (gimple_op (stmt, i)))
2845       return true;
2846 
2847   return false;
2848 }
2849 
2850 
2851 /* Return true if statement STMT within FUN could throw an exception.  */
2852 
2853 bool
stmt_could_throw_p(function * fun,gimple * stmt)2854 stmt_could_throw_p (function *fun, gimple *stmt)
2855 {
2856   if (!flag_exceptions)
2857     return false;
2858 
2859   /* The only statements that can throw an exception are assignments,
2860      conditionals, calls, resx, and asms.  */
2861   switch (gimple_code (stmt))
2862     {
2863     case GIMPLE_RESX:
2864       return true;
2865 
2866     case GIMPLE_CALL:
2867       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2868 
2869     case GIMPLE_COND:
2870       {
2871 	if (fun && !fun->can_throw_non_call_exceptions)
2872 	  return false;
2873 	gcond *cond = as_a <gcond *> (stmt);
2874 	tree lhs = gimple_cond_lhs (cond);
2875 	return operation_could_trap_p (gimple_cond_code (cond),
2876 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2877 				       false, NULL_TREE);
2878       }
2879 
2880     case GIMPLE_ASSIGN:
2881       if ((fun && !fun->can_throw_non_call_exceptions)
2882 	  || gimple_clobber_p (stmt))
2883         return false;
2884       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2885 
2886     case GIMPLE_ASM:
2887       if (fun && !fun->can_throw_non_call_exceptions)
2888         return false;
2889       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2890 
2891     default:
2892       return false;
2893     }
2894 }
2895 
2896 
2897 /* Return true if expression T could throw an exception.  */
2898 
2899 bool
tree_could_throw_p(tree t)2900 tree_could_throw_p (tree t)
2901 {
2902   if (!flag_exceptions)
2903     return false;
2904   if (TREE_CODE (t) == MODIFY_EXPR)
2905     {
2906       if (cfun->can_throw_non_call_exceptions
2907           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2908         return true;
2909       t = TREE_OPERAND (t, 1);
2910     }
2911 
2912   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2913     t = TREE_OPERAND (t, 0);
2914   if (TREE_CODE (t) == CALL_EXPR)
2915     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2916   if (cfun->can_throw_non_call_exceptions)
2917     return tree_could_trap_p (t);
2918   return false;
2919 }
2920 
2921 /* Return true if STMT can throw an exception that is not caught within its
2922    function FUN.  FUN can be NULL but the function is extra conservative
2923    then.  */
2924 
2925 bool
stmt_can_throw_external(function * fun,gimple * stmt)2926 stmt_can_throw_external (function *fun, gimple *stmt)
2927 {
2928   int lp_nr;
2929 
2930   if (!stmt_could_throw_p (fun, stmt))
2931     return false;
2932   if (!fun)
2933     return true;
2934 
2935   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
2936   return lp_nr == 0;
2937 }
2938 
2939 /* Return true if STMT can throw an exception that is caught within its
2940    function FUN.  */
2941 
2942 bool
stmt_can_throw_internal(function * fun,gimple * stmt)2943 stmt_can_throw_internal (function *fun, gimple *stmt)
2944 {
2945   int lp_nr;
2946 
2947   gcc_checking_assert (fun);
2948   if (!stmt_could_throw_p (fun, stmt))
2949     return false;
2950 
2951   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
2952   return lp_nr > 0;
2953 }
2954 
2955 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2956    remove any entry it might have from the EH table.  Return true if
2957    any change was made.  */
2958 
2959 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple * stmt)2960 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
2961 {
2962   if (stmt_could_throw_p (ifun, stmt))
2963     return false;
2964   return remove_stmt_from_eh_lp_fn (ifun, stmt);
2965 }
2966 
2967 /* Likewise, but always use the current function.  */
2968 
2969 bool
maybe_clean_eh_stmt(gimple * stmt)2970 maybe_clean_eh_stmt (gimple *stmt)
2971 {
2972   return maybe_clean_eh_stmt_fn (cfun, stmt);
2973 }
2974 
2975 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2976    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2977    in the table if it should be in there.  Return TRUE if a replacement was
2978    done that my require an EH edge purge.  */
2979 
2980 bool
maybe_clean_or_replace_eh_stmt(gimple * old_stmt,gimple * new_stmt)2981 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
2982 {
2983   int lp_nr = lookup_stmt_eh_lp (old_stmt);
2984 
2985   if (lp_nr != 0)
2986     {
2987       bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt);
2988 
2989       if (new_stmt == old_stmt && new_stmt_could_throw)
2990 	return false;
2991 
2992       remove_stmt_from_eh_lp (old_stmt);
2993       if (new_stmt_could_throw)
2994 	{
2995 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
2996 	  return false;
2997 	}
2998       else
2999 	return true;
3000     }
3001 
3002   return false;
3003 }
3004 
3005 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3006    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3007    operand is the return value of duplicate_eh_regions.  */
3008 
3009 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple * new_stmt,struct function * old_fun,gimple * old_stmt,hash_map<void *,void * > * map,int default_lp_nr)3010 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3011 			    struct function *old_fun, gimple *old_stmt,
3012 			    hash_map<void *, void *> *map,
3013 			    int default_lp_nr)
3014 {
3015   int old_lp_nr, new_lp_nr;
3016 
3017   if (!stmt_could_throw_p (new_fun, new_stmt))
3018     return false;
3019 
3020   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3021   if (old_lp_nr == 0)
3022     {
3023       if (default_lp_nr == 0)
3024 	return false;
3025       new_lp_nr = default_lp_nr;
3026     }
3027   else if (old_lp_nr > 0)
3028     {
3029       eh_landing_pad old_lp, new_lp;
3030 
3031       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3032       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3033       new_lp_nr = new_lp->index;
3034     }
3035   else
3036     {
3037       eh_region old_r, new_r;
3038 
3039       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3040       new_r = static_cast<eh_region> (*map->get (old_r));
3041       new_lp_nr = -new_r->index;
3042     }
3043 
3044   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3045   return true;
3046 }
3047 
3048 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3049    and thus no remapping is required.  */
3050 
3051 bool
maybe_duplicate_eh_stmt(gimple * new_stmt,gimple * old_stmt)3052 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3053 {
3054   int lp_nr;
3055 
3056   if (!stmt_could_throw_p (cfun, new_stmt))
3057     return false;
3058 
3059   lp_nr = lookup_stmt_eh_lp (old_stmt);
3060   if (lp_nr == 0)
3061     return false;
3062 
3063   add_stmt_to_eh_lp (new_stmt, lp_nr);
3064   return true;
3065 }
3066 
3067 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3068    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3069    this only handles handlers consisting of a single call, as that's the
3070    important case for C++: a destructor call for a particular object showing
3071    up in multiple handlers.  */
3072 
3073 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)3074 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3075 {
3076   gimple_stmt_iterator gsi;
3077   gimple *ones, *twos;
3078   unsigned int ai;
3079 
3080   gsi = gsi_start (oneh);
3081   if (!gsi_one_before_end_p (gsi))
3082     return false;
3083   ones = gsi_stmt (gsi);
3084 
3085   gsi = gsi_start (twoh);
3086   if (!gsi_one_before_end_p (gsi))
3087     return false;
3088   twos = gsi_stmt (gsi);
3089 
3090   if (!is_gimple_call (ones)
3091       || !is_gimple_call (twos)
3092       || gimple_call_lhs (ones)
3093       || gimple_call_lhs (twos)
3094       || gimple_call_chain (ones)
3095       || gimple_call_chain (twos)
3096       || !gimple_call_same_target_p (ones, twos)
3097       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3098     return false;
3099 
3100   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3101     if (!operand_equal_p (gimple_call_arg (ones, ai),
3102                           gimple_call_arg (twos, ai), 0))
3103       return false;
3104 
3105   return true;
3106 }
3107 
3108 /* Optimize
3109     try { A() } finally { try { ~B() } catch { ~A() } }
3110     try { ... } finally { ~A() }
3111    into
3112     try { A() } catch { ~B() }
3113     try { ~B() ... } finally { ~A() }
3114 
3115    This occurs frequently in C++, where A is a local variable and B is a
3116    temporary used in the initializer for A.  */
3117 
3118 static void
optimize_double_finally(gtry * one,gtry * two)3119 optimize_double_finally (gtry *one, gtry *two)
3120 {
3121   gimple *oneh;
3122   gimple_stmt_iterator gsi;
3123   gimple_seq cleanup;
3124 
3125   cleanup = gimple_try_cleanup (one);
3126   gsi = gsi_start (cleanup);
3127   if (!gsi_one_before_end_p (gsi))
3128     return;
3129 
3130   oneh = gsi_stmt (gsi);
3131   if (gimple_code (oneh) != GIMPLE_TRY
3132       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3133     return;
3134 
3135   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3136     {
3137       gimple_seq seq = gimple_try_eval (oneh);
3138 
3139       gimple_try_set_cleanup (one, seq);
3140       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3141       seq = copy_gimple_seq_and_replace_locals (seq);
3142       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3143       gimple_try_set_eval (two, seq);
3144     }
3145 }
3146 
3147 /* Perform EH refactoring optimizations that are simpler to do when code
3148    flow has been lowered but EH structures haven't.  */
3149 
3150 static void
refactor_eh_r(gimple_seq seq)3151 refactor_eh_r (gimple_seq seq)
3152 {
3153   gimple_stmt_iterator gsi;
3154   gimple *one, *two;
3155 
3156   one = NULL;
3157   two = NULL;
3158   gsi = gsi_start (seq);
3159   while (1)
3160     {
3161       one = two;
3162       if (gsi_end_p (gsi))
3163 	two = NULL;
3164       else
3165 	two = gsi_stmt (gsi);
3166       if (one && two)
3167 	if (gtry *try_one = dyn_cast <gtry *> (one))
3168 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3169 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3170 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3171 	      optimize_double_finally (try_one, try_two);
3172       if (one)
3173 	switch (gimple_code (one))
3174 	  {
3175 	  case GIMPLE_TRY:
3176 	    refactor_eh_r (gimple_try_eval (one));
3177 	    refactor_eh_r (gimple_try_cleanup (one));
3178 	    break;
3179 	  case GIMPLE_CATCH:
3180 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3181 	    break;
3182 	  case GIMPLE_EH_FILTER:
3183 	    refactor_eh_r (gimple_eh_filter_failure (one));
3184 	    break;
3185 	  case GIMPLE_EH_ELSE:
3186 	    {
3187 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3188 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3189 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3190 	    }
3191 	    break;
3192 	  default:
3193 	    break;
3194 	  }
3195       if (two)
3196 	gsi_next (&gsi);
3197       else
3198 	break;
3199     }
3200 }
3201 
3202 namespace {
3203 
3204 const pass_data pass_data_refactor_eh =
3205 {
3206   GIMPLE_PASS, /* type */
3207   "ehopt", /* name */
3208   OPTGROUP_NONE, /* optinfo_flags */
3209   TV_TREE_EH, /* tv_id */
3210   PROP_gimple_lcf, /* properties_required */
3211   0, /* properties_provided */
3212   0, /* properties_destroyed */
3213   0, /* todo_flags_start */
3214   0, /* todo_flags_finish */
3215 };
3216 
3217 class pass_refactor_eh : public gimple_opt_pass
3218 {
3219 public:
pass_refactor_eh(gcc::context * ctxt)3220   pass_refactor_eh (gcc::context *ctxt)
3221     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3222   {}
3223 
3224   /* opt_pass methods: */
gate(function *)3225   virtual bool gate (function *) { return flag_exceptions != 0; }
execute(function *)3226   virtual unsigned int execute (function *)
3227     {
3228       refactor_eh_r (gimple_body (current_function_decl));
3229       return 0;
3230     }
3231 
3232 }; // class pass_refactor_eh
3233 
3234 } // anon namespace
3235 
3236 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3237 make_pass_refactor_eh (gcc::context *ctxt)
3238 {
3239   return new pass_refactor_eh (ctxt);
3240 }
3241 
3242 /* At the end of gimple optimization, we can lower RESX.  */
3243 
3244 static bool
lower_resx(basic_block bb,gresx * stmt,hash_map<eh_region,tree> * mnt_map)3245 lower_resx (basic_block bb, gresx *stmt,
3246 	    hash_map<eh_region, tree> *mnt_map)
3247 {
3248   int lp_nr;
3249   eh_region src_r, dst_r;
3250   gimple_stmt_iterator gsi;
3251   gimple *x;
3252   tree fn, src_nr;
3253   bool ret = false;
3254 
3255   lp_nr = lookup_stmt_eh_lp (stmt);
3256   if (lp_nr != 0)
3257     dst_r = get_eh_region_from_lp_number (lp_nr);
3258   else
3259     dst_r = NULL;
3260 
3261   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3262   gsi = gsi_last_bb (bb);
3263 
3264   if (src_r == NULL)
3265     {
3266       /* We can wind up with no source region when pass_cleanup_eh shows
3267 	 that there are no entries into an eh region and deletes it, but
3268 	 then the block that contains the resx isn't removed.  This can
3269 	 happen without optimization when the switch statement created by
3270 	 lower_try_finally_switch isn't simplified to remove the eh case.
3271 
3272 	 Resolve this by expanding the resx node to an abort.  */
3273 
3274       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3275       x = gimple_build_call (fn, 0);
3276       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3277 
3278       while (EDGE_COUNT (bb->succs) > 0)
3279 	remove_edge (EDGE_SUCC (bb, 0));
3280     }
3281   else if (dst_r)
3282     {
3283       /* When we have a destination region, we resolve this by copying
3284 	 the excptr and filter values into place, and changing the edge
3285 	 to immediately after the landing pad.  */
3286       edge e;
3287 
3288       if (lp_nr < 0)
3289 	{
3290 	  basic_block new_bb;
3291 	  tree lab;
3292 
3293 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3294 	     the failure decl into a new block, if needed.  */
3295 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3296 
3297 	  tree *slot = mnt_map->get (dst_r);
3298 	  if (slot == NULL)
3299 	    {
3300 	      gimple_stmt_iterator gsi2;
3301 
3302 	      new_bb = create_empty_bb (bb);
3303 	      new_bb->count = bb->count;
3304 	      add_bb_to_loop (new_bb, bb->loop_father);
3305 	      lab = gimple_block_label (new_bb);
3306 	      gsi2 = gsi_start_bb (new_bb);
3307 
3308 	      fn = dst_r->u.must_not_throw.failure_decl;
3309 	      x = gimple_build_call (fn, 0);
3310 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3311 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3312 
3313 	      mnt_map->put (dst_r, lab);
3314 	    }
3315 	  else
3316 	    {
3317 	      lab = *slot;
3318 	      new_bb = label_to_block (cfun, lab);
3319 	    }
3320 
3321 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3322 	  e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3323 	}
3324       else
3325 	{
3326 	  edge_iterator ei;
3327 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3328 
3329 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3330 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3331 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3332 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3333 
3334 	  /* Update the flags for the outgoing edge.  */
3335 	  e = single_succ_edge (bb);
3336 	  gcc_assert (e->flags & EDGE_EH);
3337 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3338 	  e->probability = profile_probability::always ();
3339 
3340 	  /* If there are no more EH users of the landing pad, delete it.  */
3341 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3342 	    if (e->flags & EDGE_EH)
3343 	      break;
3344 	  if (e == NULL)
3345 	    {
3346 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3347 	      remove_eh_landing_pad (lp);
3348 	    }
3349 	}
3350 
3351       ret = true;
3352     }
3353   else
3354     {
3355       tree var;
3356 
3357       /* When we don't have a destination region, this exception escapes
3358 	 up the call chain.  We resolve this by generating a call to the
3359 	 _Unwind_Resume library function.  */
3360 
3361       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3362 	 with no arguments for C++.  Check for that.  */
3363       if (src_r->use_cxa_end_cleanup)
3364 	{
3365 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3366 	  x = gimple_build_call (fn, 0);
3367 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3368 	}
3369       else
3370 	{
3371 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3372 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3373 	  x = gimple_build_call (fn, 1, src_nr);
3374 	  var = create_tmp_var (ptr_type_node);
3375 	  var = make_ssa_name (var, x);
3376 	  gimple_call_set_lhs (x, var);
3377 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3378 
3379 	  /* When exception handling is delegated to a caller function, we
3380 	     have to guarantee that shadow memory variables living on stack
3381 	     will be cleaner before control is given to a parent function.  */
3382 	  if (sanitize_flags_p (SANITIZE_ADDRESS))
3383 	    {
3384 	      tree decl
3385 		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3386 	      gimple *g = gimple_build_call (decl, 0);
3387 	      gimple_set_location (g, gimple_location (stmt));
3388 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3389 	    }
3390 
3391 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3392 	  x = gimple_build_call (fn, 1, var);
3393 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3394 	}
3395 
3396       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3397     }
3398 
3399   gsi_remove (&gsi, true);
3400 
3401   return ret;
3402 }
3403 
3404 namespace {
3405 
3406 const pass_data pass_data_lower_resx =
3407 {
3408   GIMPLE_PASS, /* type */
3409   "resx", /* name */
3410   OPTGROUP_NONE, /* optinfo_flags */
3411   TV_TREE_EH, /* tv_id */
3412   PROP_gimple_lcf, /* properties_required */
3413   0, /* properties_provided */
3414   0, /* properties_destroyed */
3415   0, /* todo_flags_start */
3416   0, /* todo_flags_finish */
3417 };
3418 
3419 class pass_lower_resx : public gimple_opt_pass
3420 {
3421 public:
pass_lower_resx(gcc::context * ctxt)3422   pass_lower_resx (gcc::context *ctxt)
3423     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3424   {}
3425 
3426   /* opt_pass methods: */
gate(function *)3427   virtual bool gate (function *) { return flag_exceptions != 0; }
3428   virtual unsigned int execute (function *);
3429 
3430 }; // class pass_lower_resx
3431 
3432 unsigned
execute(function * fun)3433 pass_lower_resx::execute (function *fun)
3434 {
3435   basic_block bb;
3436   bool dominance_invalidated = false;
3437   bool any_rewritten = false;
3438 
3439   hash_map<eh_region, tree> mnt_map;
3440 
3441   FOR_EACH_BB_FN (bb, fun)
3442     {
3443       gimple *last = last_stmt (bb);
3444       if (last && is_gimple_resx (last))
3445 	{
3446 	  dominance_invalidated |=
3447 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3448 	  any_rewritten = true;
3449 	}
3450     }
3451 
3452   if (dominance_invalidated)
3453     {
3454       free_dominance_info (CDI_DOMINATORS);
3455       free_dominance_info (CDI_POST_DOMINATORS);
3456     }
3457 
3458   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3459 }
3460 
3461 } // anon namespace
3462 
3463 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3464 make_pass_lower_resx (gcc::context *ctxt)
3465 {
3466   return new pass_lower_resx (ctxt);
3467 }
3468 
3469 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3470    external throw.  */
3471 
3472 static void
optimize_clobbers(basic_block bb)3473 optimize_clobbers (basic_block bb)
3474 {
3475   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3476   bool any_clobbers = false;
3477   bool seen_stack_restore = false;
3478   edge_iterator ei;
3479   edge e;
3480 
3481   /* Only optimize anything if the bb contains at least one clobber,
3482      ends with resx (checked by caller), optionally contains some
3483      debug stmts or labels, or at most one __builtin_stack_restore
3484      call, and has an incoming EH edge.  */
3485   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3486     {
3487       gimple *stmt = gsi_stmt (gsi);
3488       if (is_gimple_debug (stmt))
3489 	continue;
3490       if (gimple_clobber_p (stmt))
3491 	{
3492 	  any_clobbers = true;
3493 	  continue;
3494 	}
3495       if (!seen_stack_restore
3496 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3497 	{
3498 	  seen_stack_restore = true;
3499 	  continue;
3500 	}
3501       if (gimple_code (stmt) == GIMPLE_LABEL)
3502 	break;
3503       return;
3504     }
3505   if (!any_clobbers)
3506     return;
3507   FOR_EACH_EDGE (e, ei, bb->preds)
3508     if (e->flags & EDGE_EH)
3509       break;
3510   if (e == NULL)
3511     return;
3512   gsi = gsi_last_bb (bb);
3513   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3514     {
3515       gimple *stmt = gsi_stmt (gsi);
3516       if (!gimple_clobber_p (stmt))
3517 	continue;
3518       unlink_stmt_vdef (stmt);
3519       gsi_remove (&gsi, true);
3520       release_defs (stmt);
3521     }
3522 }
3523 
3524 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3525    internal throw to successor BB.  */
3526 
3527 static int
sink_clobbers(basic_block bb)3528 sink_clobbers (basic_block bb)
3529 {
3530   edge e;
3531   edge_iterator ei;
3532   gimple_stmt_iterator gsi, dgsi;
3533   basic_block succbb;
3534   bool any_clobbers = false;
3535   unsigned todo = 0;
3536 
3537   /* Only optimize if BB has a single EH successor and
3538      all predecessor edges are EH too.  */
3539   if (!single_succ_p (bb)
3540       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3541     return 0;
3542 
3543   FOR_EACH_EDGE (e, ei, bb->preds)
3544     {
3545       if ((e->flags & EDGE_EH) == 0)
3546 	return 0;
3547     }
3548 
3549   /* And BB contains only CLOBBER stmts before the final
3550      RESX.  */
3551   gsi = gsi_last_bb (bb);
3552   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3553     {
3554       gimple *stmt = gsi_stmt (gsi);
3555       if (is_gimple_debug (stmt))
3556 	continue;
3557       if (gimple_code (stmt) == GIMPLE_LABEL)
3558 	break;
3559       if (!gimple_clobber_p (stmt))
3560 	return 0;
3561       any_clobbers = true;
3562     }
3563   if (!any_clobbers)
3564     return 0;
3565 
3566   edge succe = single_succ_edge (bb);
3567   succbb = succe->dest;
3568 
3569   /* See if there is a virtual PHI node to take an updated virtual
3570      operand from.  */
3571   gphi *vphi = NULL;
3572   tree vuse = NULL_TREE;
3573   for (gphi_iterator gpi = gsi_start_phis (succbb);
3574        !gsi_end_p (gpi); gsi_next (&gpi))
3575     {
3576       tree res = gimple_phi_result (gpi.phi ());
3577       if (virtual_operand_p (res))
3578 	{
3579 	  vphi = gpi.phi ();
3580 	  vuse = res;
3581 	  break;
3582 	}
3583     }
3584 
3585   dgsi = gsi_after_labels (succbb);
3586   gsi = gsi_last_bb (bb);
3587   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3588     {
3589       gimple *stmt = gsi_stmt (gsi);
3590       tree lhs;
3591       if (is_gimple_debug (stmt))
3592 	continue;
3593       if (gimple_code (stmt) == GIMPLE_LABEL)
3594 	break;
3595       lhs = gimple_assign_lhs (stmt);
3596       /* Unfortunately we don't have dominance info updated at this
3597 	 point, so checking if
3598 	 dominated_by_p (CDI_DOMINATORS, succbb,
3599 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3600 	 would be too costly.  Thus, avoid sinking any clobbers that
3601 	 refer to non-(D) SSA_NAMEs.  */
3602       if (TREE_CODE (lhs) == MEM_REF
3603 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3604 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3605 	{
3606 	  unlink_stmt_vdef (stmt);
3607 	  gsi_remove (&gsi, true);
3608 	  release_defs (stmt);
3609 	  continue;
3610 	}
3611 
3612       /* As we do not change stmt order when sinking across a
3613          forwarder edge we can keep virtual operands in place.  */
3614       gsi_remove (&gsi, false);
3615       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3616 
3617       /* But adjust virtual operands if we sunk across a PHI node.  */
3618       if (vuse)
3619 	{
3620 	  gimple *use_stmt;
3621 	  imm_use_iterator iter;
3622 	  use_operand_p use_p;
3623 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3624 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3625 	      SET_USE (use_p, gimple_vdef (stmt));
3626 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3627 	    {
3628 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3629 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3630 	    }
3631 	  /* Adjust the incoming virtual operand.  */
3632 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3633 	  SET_USE (gimple_vuse_op (stmt), vuse);
3634 	}
3635       /* If there isn't a single predecessor but no virtual PHI node
3636          arrange for virtual operands to be renamed.  */
3637       else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3638 	       && !single_pred_p (succbb))
3639 	{
3640 	  /* In this case there will be no use of the VDEF of this stmt.
3641 	     ???  Unless this is a secondary opportunity and we have not
3642 	     removed unreachable blocks yet, so we cannot assert this.
3643 	     Which also means we will end up renaming too many times.  */
3644 	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3645 	  mark_virtual_operands_for_renaming (cfun);
3646 	  todo |= TODO_update_ssa_only_virtuals;
3647 	}
3648     }
3649 
3650   return todo;
3651 }
3652 
3653 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3654    we have found some duplicate labels and removed some edges.  */
3655 
3656 static bool
lower_eh_dispatch(basic_block src,geh_dispatch * stmt)3657 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3658 {
3659   gimple_stmt_iterator gsi;
3660   int region_nr;
3661   eh_region r;
3662   tree filter, fn;
3663   gimple *x;
3664   bool redirected = false;
3665 
3666   region_nr = gimple_eh_dispatch_region (stmt);
3667   r = get_eh_region_from_number (region_nr);
3668 
3669   gsi = gsi_last_bb (src);
3670 
3671   switch (r->type)
3672     {
3673     case ERT_TRY:
3674       {
3675 	auto_vec<tree> labels;
3676 	tree default_label = NULL;
3677 	eh_catch c;
3678 	edge_iterator ei;
3679 	edge e;
3680 	hash_set<tree> seen_values;
3681 
3682 	/* Collect the labels for a switch.  Zero the post_landing_pad
3683 	   field becase we'll no longer have anything keeping these labels
3684 	   in existence and the optimizer will be free to merge these
3685 	   blocks at will.  */
3686 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3687 	  {
3688 	    tree tp_node, flt_node, lab = c->label;
3689 	    bool have_label = false;
3690 
3691 	    c->label = NULL;
3692 	    tp_node = c->type_list;
3693 	    flt_node = c->filter_list;
3694 
3695 	    if (tp_node == NULL)
3696 	      {
3697 	        default_label = lab;
3698 		break;
3699 	      }
3700 	    do
3701 	      {
3702 		/* Filter out duplicate labels that arise when this handler
3703 		   is shadowed by an earlier one.  When no labels are
3704 		   attached to the handler anymore, we remove
3705 		   the corresponding edge and then we delete unreachable
3706 		   blocks at the end of this pass.  */
3707 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3708 		  {
3709 		    tree t = build_case_label (TREE_VALUE (flt_node),
3710 					       NULL, lab);
3711 		    labels.safe_push (t);
3712 		    seen_values.add (TREE_VALUE (flt_node));
3713 		    have_label = true;
3714 		  }
3715 
3716 		tp_node = TREE_CHAIN (tp_node);
3717 		flt_node = TREE_CHAIN (flt_node);
3718 	      }
3719 	    while (tp_node);
3720 	    if (! have_label)
3721 	      {
3722 		remove_edge (find_edge (src, label_to_block (cfun, lab)));
3723 	        redirected = true;
3724 	      }
3725 	  }
3726 
3727 	/* Clean up the edge flags.  */
3728 	FOR_EACH_EDGE (e, ei, src->succs)
3729 	  {
3730 	    if (e->flags & EDGE_FALLTHRU)
3731 	      {
3732 		/* If there was no catch-all, use the fallthru edge.  */
3733 		if (default_label == NULL)
3734 		  default_label = gimple_block_label (e->dest);
3735 		e->flags &= ~EDGE_FALLTHRU;
3736 	      }
3737 	  }
3738 	gcc_assert (default_label != NULL);
3739 
3740 	/* Don't generate a switch if there's only a default case.
3741 	   This is common in the form of try { A; } catch (...) { B; }.  */
3742 	if (!labels.exists ())
3743 	  {
3744 	    e = single_succ_edge (src);
3745 	    e->flags |= EDGE_FALLTHRU;
3746 	  }
3747 	else
3748 	  {
3749 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3750 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3751 							 region_nr));
3752 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3753 	    filter = make_ssa_name (filter, x);
3754 	    gimple_call_set_lhs (x, filter);
3755 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3756 
3757 	    /* Turn the default label into a default case.  */
3758 	    default_label = build_case_label (NULL, NULL, default_label);
3759 	    sort_case_labels (labels);
3760 
3761 	    x = gimple_build_switch (filter, default_label, labels);
3762 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3763 	  }
3764       }
3765       break;
3766 
3767     case ERT_ALLOWED_EXCEPTIONS:
3768       {
3769 	edge b_e = BRANCH_EDGE (src);
3770 	edge f_e = FALLTHRU_EDGE (src);
3771 
3772 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3773 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3774 						     region_nr));
3775 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3776 	filter = make_ssa_name (filter, x);
3777 	gimple_call_set_lhs (x, filter);
3778 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3779 
3780 	r->u.allowed.label = NULL;
3781 	x = gimple_build_cond (EQ_EXPR, filter,
3782 			       build_int_cst (TREE_TYPE (filter),
3783 					      r->u.allowed.filter),
3784 			       NULL_TREE, NULL_TREE);
3785 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3786 
3787 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3788         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3789       }
3790       break;
3791 
3792     default:
3793       gcc_unreachable ();
3794     }
3795 
3796   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3797   gsi_remove (&gsi, true);
3798   return redirected;
3799 }
3800 
3801 namespace {
3802 
3803 const pass_data pass_data_lower_eh_dispatch =
3804 {
3805   GIMPLE_PASS, /* type */
3806   "ehdisp", /* name */
3807   OPTGROUP_NONE, /* optinfo_flags */
3808   TV_TREE_EH, /* tv_id */
3809   PROP_gimple_lcf, /* properties_required */
3810   0, /* properties_provided */
3811   0, /* properties_destroyed */
3812   0, /* todo_flags_start */
3813   0, /* todo_flags_finish */
3814 };
3815 
3816 class pass_lower_eh_dispatch : public gimple_opt_pass
3817 {
3818 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3819   pass_lower_eh_dispatch (gcc::context *ctxt)
3820     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3821   {}
3822 
3823   /* opt_pass methods: */
gate(function * fun)3824   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3825   virtual unsigned int execute (function *);
3826 
3827 }; // class pass_lower_eh_dispatch
3828 
3829 unsigned
execute(function * fun)3830 pass_lower_eh_dispatch::execute (function *fun)
3831 {
3832   basic_block bb;
3833   int flags = 0;
3834   bool redirected = false;
3835 
3836   assign_filter_values ();
3837 
3838   FOR_EACH_BB_FN (bb, fun)
3839     {
3840       gimple *last = last_stmt (bb);
3841       if (last == NULL)
3842 	continue;
3843       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3844 	{
3845 	  redirected |= lower_eh_dispatch (bb,
3846 					   as_a <geh_dispatch *> (last));
3847 	  flags |= TODO_update_ssa_only_virtuals;
3848 	}
3849       else if (gimple_code (last) == GIMPLE_RESX)
3850 	{
3851 	  if (stmt_can_throw_external (cfun, last))
3852 	    optimize_clobbers (bb);
3853 	  else
3854 	    flags |= sink_clobbers (bb);
3855 	}
3856     }
3857 
3858   if (redirected)
3859     {
3860       free_dominance_info (CDI_DOMINATORS);
3861       delete_unreachable_blocks ();
3862     }
3863   return flags;
3864 }
3865 
3866 } // anon namespace
3867 
3868 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)3869 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3870 {
3871   return new pass_lower_eh_dispatch (ctxt);
3872 }
3873 
3874 /* Walk statements, see what regions and, optionally, landing pads
3875    are really referenced.
3876 
3877    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3878    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3879 
3880    Passing NULL for LP_REACHABLE is valid, in this case only reachable
3881    regions are marked.
3882 
3883    The caller is responsible for freeing the returned sbitmaps.  */
3884 
3885 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)3886 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3887 {
3888   sbitmap r_reachable, lp_reachable;
3889   basic_block bb;
3890   bool mark_landing_pads = (lp_reachablep != NULL);
3891   gcc_checking_assert (r_reachablep != NULL);
3892 
3893   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3894   bitmap_clear (r_reachable);
3895   *r_reachablep = r_reachable;
3896 
3897   if (mark_landing_pads)
3898     {
3899       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3900       bitmap_clear (lp_reachable);
3901       *lp_reachablep = lp_reachable;
3902     }
3903   else
3904     lp_reachable = NULL;
3905 
3906   FOR_EACH_BB_FN (bb, cfun)
3907     {
3908       gimple_stmt_iterator gsi;
3909 
3910       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3911 	{
3912 	  gimple *stmt = gsi_stmt (gsi);
3913 
3914 	  if (mark_landing_pads)
3915 	    {
3916 	      int lp_nr = lookup_stmt_eh_lp (stmt);
3917 
3918 	      /* Negative LP numbers are MUST_NOT_THROW regions which
3919 		 are not considered BB enders.  */
3920 	      if (lp_nr < 0)
3921 		bitmap_set_bit (r_reachable, -lp_nr);
3922 
3923 	      /* Positive LP numbers are real landing pads, and BB enders.  */
3924 	      else if (lp_nr > 0)
3925 		{
3926 		  gcc_assert (gsi_one_before_end_p (gsi));
3927 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3928 		  bitmap_set_bit (r_reachable, region->index);
3929 		  bitmap_set_bit (lp_reachable, lp_nr);
3930 		}
3931 	    }
3932 
3933 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3934 	  switch (gimple_code (stmt))
3935 	    {
3936 	    case GIMPLE_RESX:
3937 	      bitmap_set_bit (r_reachable,
3938 			      gimple_resx_region (as_a <gresx *> (stmt)));
3939 	      break;
3940 	    case GIMPLE_EH_DISPATCH:
3941 	      bitmap_set_bit (r_reachable,
3942 			      gimple_eh_dispatch_region (
3943                                 as_a <geh_dispatch *> (stmt)));
3944 	      break;
3945 	    case GIMPLE_CALL:
3946 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3947 		for (int i = 0; i < 2; ++i)
3948 		  {
3949 		    tree rt = gimple_call_arg (stmt, i);
3950 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
3951 
3952 		    gcc_assert (ri == (int)ri);
3953 		    bitmap_set_bit (r_reachable, ri);
3954 		  }
3955 	      break;
3956 	    default:
3957 	      break;
3958 	    }
3959 	}
3960     }
3961 }
3962 
3963 /* Remove unreachable handlers and unreachable landing pads.  */
3964 
3965 static void
remove_unreachable_handlers(void)3966 remove_unreachable_handlers (void)
3967 {
3968   sbitmap r_reachable, lp_reachable;
3969   eh_region region;
3970   eh_landing_pad lp;
3971   unsigned i;
3972 
3973   mark_reachable_handlers (&r_reachable, &lp_reachable);
3974 
3975   if (dump_file)
3976     {
3977       fprintf (dump_file, "Before removal of unreachable regions:\n");
3978       dump_eh_tree (dump_file, cfun);
3979       fprintf (dump_file, "Reachable regions: ");
3980       dump_bitmap_file (dump_file, r_reachable);
3981       fprintf (dump_file, "Reachable landing pads: ");
3982       dump_bitmap_file (dump_file, lp_reachable);
3983     }
3984 
3985   if (dump_file)
3986     {
3987       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
3988 	if (region && !bitmap_bit_p (r_reachable, region->index))
3989 	  fprintf (dump_file,
3990 		   "Removing unreachable region %d\n",
3991 		   region->index);
3992     }
3993 
3994   remove_unreachable_eh_regions (r_reachable);
3995 
3996   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
3997     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
3998       {
3999 	if (dump_file)
4000 	  fprintf (dump_file,
4001 		   "Removing unreachable landing pad %d\n",
4002 		   lp->index);
4003 	remove_eh_landing_pad (lp);
4004       }
4005 
4006   if (dump_file)
4007     {
4008       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4009       dump_eh_tree (dump_file, cfun);
4010       fprintf (dump_file, "\n\n");
4011     }
4012 
4013   sbitmap_free (r_reachable);
4014   sbitmap_free (lp_reachable);
4015 
4016   if (flag_checking)
4017     verify_eh_tree (cfun);
4018 }
4019 
4020 /* Remove unreachable handlers if any landing pads have been removed after
4021    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4022 
4023 void
maybe_remove_unreachable_handlers(void)4024 maybe_remove_unreachable_handlers (void)
4025 {
4026   eh_landing_pad lp;
4027   unsigned i;
4028 
4029   if (cfun->eh == NULL)
4030     return;
4031 
4032   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4033     if (lp && lp->post_landing_pad)
4034       {
4035 	if (label_to_block (cfun, lp->post_landing_pad) == NULL)
4036 	  {
4037 	    remove_unreachable_handlers ();
4038 	    return;
4039 	  }
4040       }
4041 }
4042 
4043 /* Remove regions that do not have landing pads.  This assumes
4044    that remove_unreachable_handlers has already been run, and
4045    that we've just manipulated the landing pads since then.
4046 
4047    Preserve regions with landing pads and regions that prevent
4048    exceptions from propagating further, even if these regions
4049    are not reachable.  */
4050 
4051 static void
remove_unreachable_handlers_no_lp(void)4052 remove_unreachable_handlers_no_lp (void)
4053 {
4054   eh_region region;
4055   sbitmap r_reachable;
4056   unsigned i;
4057 
4058   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4059 
4060   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4061     {
4062       if (! region)
4063 	continue;
4064 
4065       if (region->landing_pads != NULL
4066 	  || region->type == ERT_MUST_NOT_THROW)
4067 	bitmap_set_bit (r_reachable, region->index);
4068 
4069       if (dump_file
4070 	  && !bitmap_bit_p (r_reachable, region->index))
4071 	fprintf (dump_file,
4072 		 "Removing unreachable region %d\n",
4073 		 region->index);
4074     }
4075 
4076   remove_unreachable_eh_regions (r_reachable);
4077 
4078   sbitmap_free (r_reachable);
4079 }
4080 
4081 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
4082    optimisticaly split all sorts of edges, including EH edges.  The
4083    optimization passes in between may not have needed them; if not,
4084    we should undo the split.
4085 
4086    Recognize this case by having one EH edge incoming to the BB and
4087    one normal edge outgoing; BB should be empty apart from the
4088    post_landing_pad label.
4089 
4090    Note that this is slightly different from the empty handler case
4091    handled by cleanup_empty_eh, in that the actual handler may yet
4092    have actual code but the landing pad has been separated from the
4093    handler.  As such, cleanup_empty_eh relies on this transformation
4094    having been done first.  */
4095 
4096 static bool
unsplit_eh(eh_landing_pad lp)4097 unsplit_eh (eh_landing_pad lp)
4098 {
4099   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4100   gimple_stmt_iterator gsi;
4101   edge e_in, e_out;
4102 
4103   /* Quickly check the edge counts on BB for singularity.  */
4104   if (!single_pred_p (bb) || !single_succ_p (bb))
4105     return false;
4106   e_in = single_pred_edge (bb);
4107   e_out = single_succ_edge (bb);
4108 
4109   /* Input edge must be EH and output edge must be normal.  */
4110   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4111     return false;
4112 
4113   /* The block must be empty except for the labels and debug insns.  */
4114   gsi = gsi_after_labels (bb);
4115   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4116     gsi_next_nondebug (&gsi);
4117   if (!gsi_end_p (gsi))
4118     return false;
4119 
4120   /* The destination block must not already have a landing pad
4121      for a different region.  */
4122   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4123     {
4124       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4125       tree lab;
4126       int lp_nr;
4127 
4128       if (!label_stmt)
4129 	break;
4130       lab = gimple_label_label (label_stmt);
4131       lp_nr = EH_LANDING_PAD_NR (lab);
4132       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4133 	return false;
4134     }
4135 
4136   /* The new destination block must not already be a destination of
4137      the source block, lest we merge fallthru and eh edges and get
4138      all sorts of confused.  */
4139   if (find_edge (e_in->src, e_out->dest))
4140     return false;
4141 
4142   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4143      thought this should have been cleaned up by a phicprop pass, but
4144      that doesn't appear to handle virtuals.  Propagate by hand.  */
4145   if (!gimple_seq_empty_p (phi_nodes (bb)))
4146     {
4147       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4148 	{
4149 	  gimple *use_stmt;
4150 	  gphi *phi = gpi.phi ();
4151 	  tree lhs = gimple_phi_result (phi);
4152 	  tree rhs = gimple_phi_arg_def (phi, 0);
4153 	  use_operand_p use_p;
4154 	  imm_use_iterator iter;
4155 
4156 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4157 	    {
4158 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4159 		SET_USE (use_p, rhs);
4160 	    }
4161 
4162 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4163 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4164 
4165 	  remove_phi_node (&gpi, true);
4166 	}
4167     }
4168 
4169   if (dump_file && (dump_flags & TDF_DETAILS))
4170     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4171 	     lp->index, e_out->dest->index);
4172 
4173   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4174      a successor edge, humor it.  But do the real CFG change with the
4175      predecessor of E_OUT in order to preserve the ordering of arguments
4176      to the PHI nodes in E_OUT->DEST.  */
4177   redirect_eh_edge_1 (e_in, e_out->dest, false);
4178   redirect_edge_pred (e_out, e_in->src);
4179   e_out->flags = e_in->flags;
4180   e_out->probability = e_in->probability;
4181   remove_edge (e_in);
4182 
4183   return true;
4184 }
4185 
4186 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4187 
4188 static bool
unsplit_all_eh(void)4189 unsplit_all_eh (void)
4190 {
4191   bool changed = false;
4192   eh_landing_pad lp;
4193   int i;
4194 
4195   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4196     if (lp)
4197       changed |= unsplit_eh (lp);
4198 
4199   return changed;
4200 }
4201 
4202 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4203    to OLD_BB to NEW_BB; return true on success, false on failure.
4204 
4205    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4206    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4207    Virtual PHIs may be deleted and marked for renaming.  */
4208 
4209 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4210 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4211 			     edge old_bb_out, bool change_region)
4212 {
4213   gphi_iterator ngsi, ogsi;
4214   edge_iterator ei;
4215   edge e;
4216   bitmap ophi_handled;
4217 
4218   /* The destination block must not be a regular successor for any
4219      of the preds of the landing pad.  Thus, avoid turning
4220         <..>
4221 	 |  \ EH
4222 	 |  <..>
4223 	 |  /
4224 	<..>
4225      into
4226         <..>
4227 	|  | EH
4228 	<..>
4229      which CFG verification would choke on.  See PR45172 and PR51089.  */
4230   FOR_EACH_EDGE (e, ei, old_bb->preds)
4231     if (find_edge (e->src, new_bb))
4232       return false;
4233 
4234   FOR_EACH_EDGE (e, ei, old_bb->preds)
4235     redirect_edge_var_map_clear (e);
4236 
4237   ophi_handled = BITMAP_ALLOC (NULL);
4238 
4239   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4240      for the edges we're going to move.  */
4241   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4242     {
4243       gphi *ophi, *nphi = ngsi.phi ();
4244       tree nresult, nop;
4245 
4246       nresult = gimple_phi_result (nphi);
4247       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4248 
4249       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4250 	 the source ssa_name.  */
4251       ophi = NULL;
4252       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4253 	{
4254 	  ophi = ogsi.phi ();
4255 	  if (gimple_phi_result (ophi) == nop)
4256 	    break;
4257 	  ophi = NULL;
4258 	}
4259 
4260       /* If we did find the corresponding PHI, copy those inputs.  */
4261       if (ophi)
4262 	{
4263 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4264 	  if (!has_single_use (nop))
4265 	    {
4266 	      imm_use_iterator imm_iter;
4267 	      use_operand_p use_p;
4268 
4269 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4270 		{
4271 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4272 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4273 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4274 		    goto fail;
4275 		}
4276 	    }
4277 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4278 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4279 	    {
4280 	      location_t oloc;
4281 	      tree oop;
4282 
4283 	      if ((e->flags & EDGE_EH) == 0)
4284 		continue;
4285 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4286 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4287 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4288 	    }
4289 	}
4290       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4291 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4292 	 variable is unchanged from input to the block and we can simply
4293 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4294       else
4295 	{
4296 	  location_t nloc
4297 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4298 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4299 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4300 	}
4301     }
4302 
4303   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4304      we don't know what values from the other edges into NEW_BB to use.  */
4305   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4306     {
4307       gphi *ophi = ogsi.phi ();
4308       tree oresult = gimple_phi_result (ophi);
4309       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4310 	goto fail;
4311     }
4312 
4313   /* Finally, move the edges and update the PHIs.  */
4314   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4315     if (e->flags & EDGE_EH)
4316       {
4317 	/* ???  CFG manipluation routines do not try to update loop
4318 	   form on edge redirection.  Do so manually here for now.  */
4319 	/* If we redirect a loop entry or latch edge that will either create
4320 	   a multiple entry loop or rotate the loop.  If the loops merge
4321 	   we may have created a loop with multiple latches.
4322 	   All of this isn't easily fixed thus cancel the affected loop
4323 	   and mark the other loop as possibly having multiple latches.  */
4324 	if (e->dest == e->dest->loop_father->header)
4325 	  {
4326 	    mark_loop_for_removal (e->dest->loop_father);
4327 	    new_bb->loop_father->latch = NULL;
4328 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4329 	  }
4330 	redirect_eh_edge_1 (e, new_bb, change_region);
4331 	redirect_edge_succ (e, new_bb);
4332 	flush_pending_stmts (e);
4333       }
4334     else
4335       ei_next (&ei);
4336 
4337   BITMAP_FREE (ophi_handled);
4338   return true;
4339 
4340  fail:
4341   FOR_EACH_EDGE (e, ei, old_bb->preds)
4342     redirect_edge_var_map_clear (e);
4343   BITMAP_FREE (ophi_handled);
4344   return false;
4345 }
4346 
4347 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4348    old region to NEW_REGION at BB.  */
4349 
4350 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4351 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4352 			  eh_landing_pad lp, eh_region new_region)
4353 {
4354   gimple_stmt_iterator gsi;
4355   eh_landing_pad *pp;
4356 
4357   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4358     continue;
4359   *pp = lp->next_lp;
4360 
4361   lp->region = new_region;
4362   lp->next_lp = new_region->landing_pads;
4363   new_region->landing_pads = lp;
4364 
4365   /* Delete the RESX that was matched within the empty handler block.  */
4366   gsi = gsi_last_bb (bb);
4367   unlink_stmt_vdef (gsi_stmt (gsi));
4368   gsi_remove (&gsi, true);
4369 
4370   /* Clean up E_OUT for the fallthru.  */
4371   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4372   e_out->probability = profile_probability::always ();
4373 }
4374 
4375 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4376    unsplitting than unsplit_eh was prepared to handle, e.g. when
4377    multiple incoming edges and phis are involved.  */
4378 
4379 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4380 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4381 {
4382   gimple_stmt_iterator gsi;
4383   tree lab;
4384 
4385   /* We really ought not have totally lost everything following
4386      a landing pad label.  Given that BB is empty, there had better
4387      be a successor.  */
4388   gcc_assert (e_out != NULL);
4389 
4390   /* The destination block must not already have a landing pad
4391      for a different region.  */
4392   lab = NULL;
4393   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4394     {
4395       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4396       int lp_nr;
4397 
4398       if (!stmt)
4399 	break;
4400       lab = gimple_label_label (stmt);
4401       lp_nr = EH_LANDING_PAD_NR (lab);
4402       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4403 	return false;
4404     }
4405 
4406   /* Attempt to move the PHIs into the successor block.  */
4407   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4408     {
4409       if (dump_file && (dump_flags & TDF_DETAILS))
4410 	fprintf (dump_file,
4411 		 "Unsplit EH landing pad %d to block %i "
4412 		 "(via cleanup_empty_eh).\n",
4413 		 lp->index, e_out->dest->index);
4414       return true;
4415     }
4416 
4417   return false;
4418 }
4419 
4420 /* Return true if edge E_FIRST is part of an empty infinite loop
4421    or leads to such a loop through a series of single successor
4422    empty bbs.  */
4423 
4424 static bool
infinite_empty_loop_p(edge e_first)4425 infinite_empty_loop_p (edge e_first)
4426 {
4427   bool inf_loop = false;
4428   edge e;
4429 
4430   if (e_first->dest == e_first->src)
4431     return true;
4432 
4433   e_first->src->aux = (void *) 1;
4434   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4435     {
4436       gimple_stmt_iterator gsi;
4437       if (e->dest->aux)
4438 	{
4439 	  inf_loop = true;
4440 	  break;
4441 	}
4442       e->dest->aux = (void *) 1;
4443       gsi = gsi_after_labels (e->dest);
4444       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4445 	gsi_next_nondebug (&gsi);
4446       if (!gsi_end_p (gsi))
4447 	break;
4448     }
4449   e_first->src->aux = NULL;
4450   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4451     e->dest->aux = NULL;
4452 
4453   return inf_loop;
4454 }
4455 
4456 /* Examine the block associated with LP to determine if it's an empty
4457    handler for its EH region.  If so, attempt to redirect EH edges to
4458    an outer region.  Return true the CFG was updated in any way.  This
4459    is similar to jump forwarding, just across EH edges.  */
4460 
4461 static bool
cleanup_empty_eh(eh_landing_pad lp)4462 cleanup_empty_eh (eh_landing_pad lp)
4463 {
4464   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4465   gimple_stmt_iterator gsi;
4466   gimple *resx;
4467   eh_region new_region;
4468   edge_iterator ei;
4469   edge e, e_out;
4470   bool has_non_eh_pred;
4471   bool ret = false;
4472   int new_lp_nr;
4473 
4474   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4475   switch (EDGE_COUNT (bb->succs))
4476     {
4477     case 0:
4478       e_out = NULL;
4479       break;
4480     case 1:
4481       e_out = single_succ_edge (bb);
4482       break;
4483     default:
4484       return false;
4485     }
4486 
4487   gsi = gsi_last_nondebug_bb (bb);
4488   resx = gsi_stmt (gsi);
4489   if (resx && is_gimple_resx (resx))
4490     {
4491       if (stmt_can_throw_external (cfun, resx))
4492 	optimize_clobbers (bb);
4493       else if (sink_clobbers (bb))
4494 	ret = true;
4495     }
4496 
4497   gsi = gsi_after_labels (bb);
4498 
4499   /* Make sure to skip debug statements.  */
4500   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4501     gsi_next_nondebug (&gsi);
4502 
4503   /* If the block is totally empty, look for more unsplitting cases.  */
4504   if (gsi_end_p (gsi))
4505     {
4506       /* For the degenerate case of an infinite loop bail out.
4507 	 If bb has no successors and is totally empty, which can happen e.g.
4508 	 because of incorrect noreturn attribute, bail out too.  */
4509       if (e_out == NULL
4510 	  || infinite_empty_loop_p (e_out))
4511 	return ret;
4512 
4513       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4514     }
4515 
4516   /* The block should consist only of a single RESX statement, modulo a
4517      preceding call to __builtin_stack_restore if there is no outgoing
4518      edge, since the call can be eliminated in this case.  */
4519   resx = gsi_stmt (gsi);
4520   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4521     {
4522       gsi_next_nondebug (&gsi);
4523       resx = gsi_stmt (gsi);
4524     }
4525   if (!is_gimple_resx (resx))
4526     return ret;
4527   gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4528 
4529   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4530   has_non_eh_pred = false;
4531   FOR_EACH_EDGE (e, ei, bb->preds)
4532     if (!(e->flags & EDGE_EH))
4533       has_non_eh_pred = true;
4534 
4535   /* Find the handler that's outer of the empty handler by looking at
4536      where the RESX instruction was vectored.  */
4537   new_lp_nr = lookup_stmt_eh_lp (resx);
4538   new_region = get_eh_region_from_lp_number (new_lp_nr);
4539 
4540   /* If there's no destination region within the current function,
4541      redirection is trivial via removing the throwing statements from
4542      the EH region, removing the EH edges, and allowing the block
4543      to go unreachable.  */
4544   if (new_region == NULL)
4545     {
4546       gcc_assert (e_out == NULL);
4547       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4548 	if (e->flags & EDGE_EH)
4549 	  {
4550 	    gimple *stmt = last_stmt (e->src);
4551 	    remove_stmt_from_eh_lp (stmt);
4552 	    remove_edge (e);
4553 	  }
4554 	else
4555 	  ei_next (&ei);
4556       goto succeed;
4557     }
4558 
4559   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4560      to handle the abort and allow the blocks to go unreachable.  */
4561   if (new_region->type == ERT_MUST_NOT_THROW)
4562     {
4563       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4564 	if (e->flags & EDGE_EH)
4565 	  {
4566 	    gimple *stmt = last_stmt (e->src);
4567 	    remove_stmt_from_eh_lp (stmt);
4568 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4569 	    remove_edge (e);
4570 	  }
4571 	else
4572 	  ei_next (&ei);
4573       goto succeed;
4574     }
4575 
4576   /* Try to redirect the EH edges and merge the PHIs into the destination
4577      landing pad block.  If the merge succeeds, we'll already have redirected
4578      all the EH edges.  The handler itself will go unreachable if there were
4579      no normal edges.  */
4580   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4581     goto succeed;
4582 
4583   /* Finally, if all input edges are EH edges, then we can (potentially)
4584      reduce the number of transfers from the runtime by moving the landing
4585      pad from the original region to the new region.  This is a win when
4586      we remove the last CLEANUP region along a particular exception
4587      propagation path.  Since nothing changes except for the region with
4588      which the landing pad is associated, the PHI nodes do not need to be
4589      adjusted at all.  */
4590   if (!has_non_eh_pred)
4591     {
4592       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4593       if (dump_file && (dump_flags & TDF_DETAILS))
4594 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4595 		 lp->index, new_region->index);
4596 
4597       /* ??? The CFG didn't change, but we may have rendered the
4598 	 old EH region unreachable.  Trigger a cleanup there.  */
4599       return true;
4600     }
4601 
4602   return ret;
4603 
4604  succeed:
4605   if (dump_file && (dump_flags & TDF_DETAILS))
4606     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4607   remove_eh_landing_pad (lp);
4608   return true;
4609 }
4610 
4611 /* Do a post-order traversal of the EH region tree.  Examine each
4612    post_landing_pad block and see if we can eliminate it as empty.  */
4613 
4614 static bool
cleanup_all_empty_eh(void)4615 cleanup_all_empty_eh (void)
4616 {
4617   bool changed = false;
4618   eh_landing_pad lp;
4619   int i;
4620 
4621   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4622     if (lp)
4623       changed |= cleanup_empty_eh (lp);
4624 
4625   return changed;
4626 }
4627 
4628 /* Perform cleanups and lowering of exception handling
4629     1) cleanups regions with handlers doing nothing are optimized out
4630     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4631     3) Info about regions that are containing instructions, and regions
4632        reachable via local EH edges is collected
4633     4) Eh tree is pruned for regions no longer necessary.
4634 
4635    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4636 	 Unify those that have the same failure decl and locus.
4637 */
4638 
4639 static unsigned int
execute_cleanup_eh_1(void)4640 execute_cleanup_eh_1 (void)
4641 {
4642   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4643      looking up unreachable landing pads.  */
4644   remove_unreachable_handlers ();
4645 
4646   /* Watch out for the region tree vanishing due to all unreachable.  */
4647   if (cfun->eh->region_tree)
4648     {
4649       bool changed = false;
4650 
4651       if (optimize)
4652 	changed |= unsplit_all_eh ();
4653       changed |= cleanup_all_empty_eh ();
4654 
4655       if (changed)
4656 	{
4657 	  free_dominance_info (CDI_DOMINATORS);
4658 	  free_dominance_info (CDI_POST_DOMINATORS);
4659 
4660           /* We delayed all basic block deletion, as we may have performed
4661 	     cleanups on EH edges while non-EH edges were still present.  */
4662 	  delete_unreachable_blocks ();
4663 
4664 	  /* We manipulated the landing pads.  Remove any region that no
4665 	     longer has a landing pad.  */
4666 	  remove_unreachable_handlers_no_lp ();
4667 
4668 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4669 	}
4670     }
4671 
4672   return 0;
4673 }
4674 
4675 namespace {
4676 
4677 const pass_data pass_data_cleanup_eh =
4678 {
4679   GIMPLE_PASS, /* type */
4680   "ehcleanup", /* name */
4681   OPTGROUP_NONE, /* optinfo_flags */
4682   TV_TREE_EH, /* tv_id */
4683   PROP_gimple_lcf, /* properties_required */
4684   0, /* properties_provided */
4685   0, /* properties_destroyed */
4686   0, /* todo_flags_start */
4687   0, /* todo_flags_finish */
4688 };
4689 
4690 class pass_cleanup_eh : public gimple_opt_pass
4691 {
4692 public:
pass_cleanup_eh(gcc::context * ctxt)4693   pass_cleanup_eh (gcc::context *ctxt)
4694     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4695   {}
4696 
4697   /* opt_pass methods: */
clone()4698   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate(function * fun)4699   virtual bool gate (function *fun)
4700     {
4701       return fun->eh != NULL && fun->eh->region_tree != NULL;
4702     }
4703 
4704   virtual unsigned int execute (function *);
4705 
4706 }; // class pass_cleanup_eh
4707 
4708 unsigned int
execute(function * fun)4709 pass_cleanup_eh::execute (function *fun)
4710 {
4711   int ret = execute_cleanup_eh_1 ();
4712 
4713   /* If the function no longer needs an EH personality routine
4714      clear it.  This exposes cross-language inlining opportunities
4715      and avoids references to a never defined personality routine.  */
4716   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4717       && function_needs_eh_personality (fun) != eh_personality_lang)
4718     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4719 
4720   return ret;
4721 }
4722 
4723 } // anon namespace
4724 
4725 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4726 make_pass_cleanup_eh (gcc::context *ctxt)
4727 {
4728   return new pass_cleanup_eh (ctxt);
4729 }
4730 
4731 /* Verify that BB containing STMT as the last statement, has precisely the
4732    edge that make_eh_edges would create.  */
4733 
4734 DEBUG_FUNCTION bool
verify_eh_edges(gimple * stmt)4735 verify_eh_edges (gimple *stmt)
4736 {
4737   basic_block bb = gimple_bb (stmt);
4738   eh_landing_pad lp = NULL;
4739   int lp_nr;
4740   edge_iterator ei;
4741   edge e, eh_edge;
4742 
4743   lp_nr = lookup_stmt_eh_lp (stmt);
4744   if (lp_nr > 0)
4745     lp = get_eh_landing_pad_from_number (lp_nr);
4746 
4747   eh_edge = NULL;
4748   FOR_EACH_EDGE (e, ei, bb->succs)
4749     {
4750       if (e->flags & EDGE_EH)
4751 	{
4752 	  if (eh_edge)
4753 	    {
4754 	      error ("BB %i has multiple EH edges", bb->index);
4755 	      return true;
4756 	    }
4757 	  else
4758 	    eh_edge = e;
4759 	}
4760     }
4761 
4762   if (lp == NULL)
4763     {
4764       if (eh_edge)
4765 	{
4766 	  error ("BB %i cannot throw but has an EH edge", bb->index);
4767 	  return true;
4768 	}
4769       return false;
4770     }
4771 
4772   if (!stmt_could_throw_p (cfun, stmt))
4773     {
4774       error ("BB %i last statement has incorrectly set lp", bb->index);
4775       return true;
4776     }
4777 
4778   if (eh_edge == NULL)
4779     {
4780       error ("BB %i is missing an EH edge", bb->index);
4781       return true;
4782     }
4783 
4784   if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
4785     {
4786       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4787       return true;
4788     }
4789 
4790   return false;
4791 }
4792 
4793 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4794 
4795 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(geh_dispatch * stmt)4796 verify_eh_dispatch_edge (geh_dispatch *stmt)
4797 {
4798   eh_region r;
4799   eh_catch c;
4800   basic_block src, dst;
4801   bool want_fallthru = true;
4802   edge_iterator ei;
4803   edge e, fall_edge;
4804 
4805   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4806   src = gimple_bb (stmt);
4807 
4808   FOR_EACH_EDGE (e, ei, src->succs)
4809     gcc_assert (e->aux == NULL);
4810 
4811   switch (r->type)
4812     {
4813     case ERT_TRY:
4814       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4815 	{
4816 	  dst = label_to_block (cfun, c->label);
4817 	  e = find_edge (src, dst);
4818 	  if (e == NULL)
4819 	    {
4820 	      error ("BB %i is missing an edge", src->index);
4821 	      return true;
4822 	    }
4823 	  e->aux = (void *)e;
4824 
4825 	  /* A catch-all handler doesn't have a fallthru.  */
4826 	  if (c->type_list == NULL)
4827 	    {
4828 	      want_fallthru = false;
4829 	      break;
4830 	    }
4831 	}
4832       break;
4833 
4834     case ERT_ALLOWED_EXCEPTIONS:
4835       dst = label_to_block (cfun, r->u.allowed.label);
4836       e = find_edge (src, dst);
4837       if (e == NULL)
4838 	{
4839 	  error ("BB %i is missing an edge", src->index);
4840 	  return true;
4841 	}
4842       e->aux = (void *)e;
4843       break;
4844 
4845     default:
4846       gcc_unreachable ();
4847     }
4848 
4849   fall_edge = NULL;
4850   FOR_EACH_EDGE (e, ei, src->succs)
4851     {
4852       if (e->flags & EDGE_FALLTHRU)
4853 	{
4854 	  if (fall_edge != NULL)
4855 	    {
4856 	      error ("BB %i too many fallthru edges", src->index);
4857 	      return true;
4858 	    }
4859 	  fall_edge = e;
4860 	}
4861       else if (e->aux)
4862 	e->aux = NULL;
4863       else
4864 	{
4865 	  error ("BB %i has incorrect edge", src->index);
4866 	  return true;
4867 	}
4868     }
4869   if ((fall_edge != NULL) ^ want_fallthru)
4870     {
4871       error ("BB %i has incorrect fallthru edge", src->index);
4872       return true;
4873     }
4874 
4875   return false;
4876 }
4877