1 /* Control flow functions for trees.
2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "cgraph.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "trans-mem.h"
37 #include "stor-layout.h"
38 #include "print-tree.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimplify-me.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-into-ssa.h"
49 #include "tree-dfa.h"
50 #include "tree-ssa.h"
51 #include "except.h"
52 #include "cfgloop.h"
53 #include "tree-ssa-propagate.h"
54 #include "value-prof.h"
55 #include "tree-inline.h"
56 #include "tree-ssa-live.h"
57 #include "omp-general.h"
58 #include "omp-expand.h"
59 #include "tree-cfgcleanup.h"
60 #include "gimplify.h"
61 #include "attribs.h"
62 #include "selftest.h"
63 #include "opts.h"
64 #include "asan.h"
65
66 /* This file contains functions for building the Control Flow Graph (CFG)
67 for a function tree. */
68
69 /* Local declarations. */
70
71 /* Initial capacity for the basic block array. */
72 static const int initial_cfg_capacity = 20;
73
74 /* This hash table allows us to efficiently lookup all CASE_LABEL_EXPRs
75 which use a particular edge. The CASE_LABEL_EXPRs are chained together
76 via their CASE_CHAIN field, which we clear after we're done with the
77 hash table to prevent problems with duplication of GIMPLE_SWITCHes.
78
79 Access to this list of CASE_LABEL_EXPRs allows us to efficiently
80 update the case vector in response to edge redirections.
81
82 Right now this table is set up and torn down at key points in the
83 compilation process. It would be nice if we could make the table
84 more persistent. The key is getting notification of changes to
85 the CFG (particularly edge removal, creation and redirection). */
86
87 static hash_map<edge, tree> *edge_to_cases;
88
89 /* If we record edge_to_cases, this bitmap will hold indexes
90 of basic blocks that end in a GIMPLE_SWITCH which we touched
91 due to edge manipulations. */
92
93 static bitmap touched_switch_bbs;
94
95 /* CFG statistics. */
96 struct cfg_stats_d
97 {
98 long num_merged_labels;
99 };
100
101 static struct cfg_stats_d cfg_stats;
102
103 /* Data to pass to replace_block_vars_by_duplicates_1. */
104 struct replace_decls_d
105 {
106 hash_map<tree, tree> *vars_map;
107 tree to_context;
108 };
109
110 /* Hash table to store last discriminator assigned for each locus. */
111 struct locus_discrim_map
112 {
113 int location_line;
114 int discriminator;
115 };
116
117 /* Hashtable helpers. */
118
119 struct locus_discrim_hasher : free_ptr_hash <locus_discrim_map>
120 {
121 static inline hashval_t hash (const locus_discrim_map *);
122 static inline bool equal (const locus_discrim_map *,
123 const locus_discrim_map *);
124 };
125
126 /* Trivial hash function for a location_t. ITEM is a pointer to
127 a hash table entry that maps a location_t to a discriminator. */
128
129 inline hashval_t
hash(const locus_discrim_map * item)130 locus_discrim_hasher::hash (const locus_discrim_map *item)
131 {
132 return item->location_line;
133 }
134
135 /* Equality function for the locus-to-discriminator map. A and B
136 point to the two hash table entries to compare. */
137
138 inline bool
equal(const locus_discrim_map * a,const locus_discrim_map * b)139 locus_discrim_hasher::equal (const locus_discrim_map *a,
140 const locus_discrim_map *b)
141 {
142 return a->location_line == b->location_line;
143 }
144
145 static hash_table<locus_discrim_hasher> *discriminator_per_locus;
146
147 /* Basic blocks and flowgraphs. */
148 static void make_blocks (gimple_seq);
149
150 /* Edges. */
151 static void make_edges (void);
152 static void assign_discriminators (void);
153 static void make_cond_expr_edges (basic_block);
154 static void make_gimple_switch_edges (gswitch *, basic_block);
155 static bool make_goto_expr_edges (basic_block);
156 static void make_gimple_asm_edges (basic_block);
157 static edge gimple_redirect_edge_and_branch (edge, basic_block);
158 static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
159
160 /* Various helpers. */
161 static inline bool stmt_starts_bb_p (gimple *, gimple *);
162 static int gimple_verify_flow_info (void);
163 static void gimple_make_forwarder_block (edge);
164 static gimple *first_non_label_stmt (basic_block);
165 static bool verify_gimple_transaction (gtransaction *);
166 static bool call_can_make_abnormal_goto (gimple *);
167
168 /* Flowgraph optimization and cleanup. */
169 static void gimple_merge_blocks (basic_block, basic_block);
170 static bool gimple_can_merge_blocks_p (basic_block, basic_block);
171 static void remove_bb (basic_block);
172 static edge find_taken_edge_computed_goto (basic_block, tree);
173 static edge find_taken_edge_cond_expr (const gcond *, tree);
174
175 void
init_empty_tree_cfg_for_function(struct function * fn)176 init_empty_tree_cfg_for_function (struct function *fn)
177 {
178 /* Initialize the basic block array. */
179 init_flow (fn);
180 profile_status_for_fn (fn) = PROFILE_ABSENT;
181 n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
182 last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
183 vec_alloc (basic_block_info_for_fn (fn), initial_cfg_capacity);
184 vec_safe_grow_cleared (basic_block_info_for_fn (fn),
185 initial_cfg_capacity);
186
187 /* Build a mapping of labels to their associated blocks. */
188 vec_alloc (label_to_block_map_for_fn (fn), initial_cfg_capacity);
189 vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
190 initial_cfg_capacity);
191
192 SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
193 SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
194
195 ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
196 = EXIT_BLOCK_PTR_FOR_FN (fn);
197 EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
198 = ENTRY_BLOCK_PTR_FOR_FN (fn);
199 }
200
201 void
init_empty_tree_cfg(void)202 init_empty_tree_cfg (void)
203 {
204 init_empty_tree_cfg_for_function (cfun);
205 }
206
207 /*---------------------------------------------------------------------------
208 Create basic blocks
209 ---------------------------------------------------------------------------*/
210
211 /* Entry point to the CFG builder for trees. SEQ is the sequence of
212 statements to be added to the flowgraph. */
213
214 static void
build_gimple_cfg(gimple_seq seq)215 build_gimple_cfg (gimple_seq seq)
216 {
217 /* Register specific gimple functions. */
218 gimple_register_cfg_hooks ();
219
220 memset ((void *) &cfg_stats, 0, sizeof (cfg_stats));
221
222 init_empty_tree_cfg ();
223
224 make_blocks (seq);
225
226 /* Make sure there is always at least one block, even if it's empty. */
227 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
228 create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
229
230 /* Adjust the size of the array. */
231 if (basic_block_info_for_fn (cfun)->length ()
232 < (size_t) n_basic_blocks_for_fn (cfun))
233 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
234 n_basic_blocks_for_fn (cfun));
235
236 /* To speed up statement iterator walks, we first purge dead labels. */
237 cleanup_dead_labels ();
238
239 /* Group case nodes to reduce the number of edges.
240 We do this after cleaning up dead labels because otherwise we miss
241 a lot of obvious case merging opportunities. */
242 group_case_labels ();
243
244 /* Create the edges of the flowgraph. */
245 discriminator_per_locus = new hash_table<locus_discrim_hasher> (13);
246 make_edges ();
247 assign_discriminators ();
248 cleanup_dead_labels ();
249 delete discriminator_per_locus;
250 discriminator_per_locus = NULL;
251 }
252
253 /* Look for ANNOTATE calls with loop annotation kind in BB; if found, remove
254 them and propagate the information to LOOP. We assume that the annotations
255 come immediately before the condition in BB, if any. */
256
257 static void
replace_loop_annotate_in_block(basic_block bb,struct loop * loop)258 replace_loop_annotate_in_block (basic_block bb, struct loop *loop)
259 {
260 gimple_stmt_iterator gsi = gsi_last_bb (bb);
261 gimple *stmt = gsi_stmt (gsi);
262
263 if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
264 return;
265
266 for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
267 {
268 stmt = gsi_stmt (gsi);
269 if (gimple_code (stmt) != GIMPLE_CALL)
270 break;
271 if (!gimple_call_internal_p (stmt)
272 || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
273 break;
274
275 switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
276 {
277 case annot_expr_ivdep_kind:
278 loop->safelen = INT_MAX;
279 break;
280 case annot_expr_unroll_kind:
281 loop->unroll
282 = (unsigned short) tree_to_shwi (gimple_call_arg (stmt, 2));
283 cfun->has_unroll = true;
284 break;
285 case annot_expr_no_vector_kind:
286 loop->dont_vectorize = true;
287 break;
288 case annot_expr_vector_kind:
289 loop->force_vectorize = true;
290 cfun->has_force_vectorize_loops = true;
291 break;
292 case annot_expr_parallel_kind:
293 loop->can_be_parallel = true;
294 loop->safelen = INT_MAX;
295 break;
296 default:
297 gcc_unreachable ();
298 }
299
300 stmt = gimple_build_assign (gimple_call_lhs (stmt),
301 gimple_call_arg (stmt, 0));
302 gsi_replace (&gsi, stmt, true);
303 }
304 }
305
306 /* Look for ANNOTATE calls with loop annotation kind; if found, remove
307 them and propagate the information to the loop. We assume that the
308 annotations come immediately before the condition of the loop. */
309
310 static void
replace_loop_annotate(void)311 replace_loop_annotate (void)
312 {
313 struct loop *loop;
314 basic_block bb;
315 gimple_stmt_iterator gsi;
316 gimple *stmt;
317
318 FOR_EACH_LOOP (loop, 0)
319 {
320 /* First look into the header. */
321 replace_loop_annotate_in_block (loop->header, loop);
322
323 /* Then look into the latch, if any. */
324 if (loop->latch)
325 replace_loop_annotate_in_block (loop->latch, loop);
326 }
327
328 /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
329 FOR_EACH_BB_FN (bb, cfun)
330 {
331 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
332 {
333 stmt = gsi_stmt (gsi);
334 if (gimple_code (stmt) != GIMPLE_CALL)
335 continue;
336 if (!gimple_call_internal_p (stmt)
337 || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
338 continue;
339
340 switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
341 {
342 case annot_expr_ivdep_kind:
343 case annot_expr_unroll_kind:
344 case annot_expr_no_vector_kind:
345 case annot_expr_vector_kind:
346 case annot_expr_parallel_kind:
347 break;
348 default:
349 gcc_unreachable ();
350 }
351
352 warning_at (gimple_location (stmt), 0, "ignoring loop annotation");
353 stmt = gimple_build_assign (gimple_call_lhs (stmt),
354 gimple_call_arg (stmt, 0));
355 gsi_replace (&gsi, stmt, true);
356 }
357 }
358 }
359
360 static unsigned int
execute_build_cfg(void)361 execute_build_cfg (void)
362 {
363 gimple_seq body = gimple_body (current_function_decl);
364
365 build_gimple_cfg (body);
366 gimple_set_body (current_function_decl, NULL);
367 if (dump_file && (dump_flags & TDF_DETAILS))
368 {
369 fprintf (dump_file, "Scope blocks:\n");
370 dump_scope_blocks (dump_file, dump_flags);
371 }
372 cleanup_tree_cfg ();
373 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
374 replace_loop_annotate ();
375 return 0;
376 }
377
378 namespace {
379
380 const pass_data pass_data_build_cfg =
381 {
382 GIMPLE_PASS, /* type */
383 "cfg", /* name */
384 OPTGROUP_NONE, /* optinfo_flags */
385 TV_TREE_CFG, /* tv_id */
386 PROP_gimple_leh, /* properties_required */
387 ( PROP_cfg | PROP_loops ), /* properties_provided */
388 0, /* properties_destroyed */
389 0, /* todo_flags_start */
390 0, /* todo_flags_finish */
391 };
392
393 class pass_build_cfg : public gimple_opt_pass
394 {
395 public:
pass_build_cfg(gcc::context * ctxt)396 pass_build_cfg (gcc::context *ctxt)
397 : gimple_opt_pass (pass_data_build_cfg, ctxt)
398 {}
399
400 /* opt_pass methods: */
execute(function *)401 virtual unsigned int execute (function *) { return execute_build_cfg (); }
402
403 }; // class pass_build_cfg
404
405 } // anon namespace
406
407 gimple_opt_pass *
make_pass_build_cfg(gcc::context * ctxt)408 make_pass_build_cfg (gcc::context *ctxt)
409 {
410 return new pass_build_cfg (ctxt);
411 }
412
413
414 /* Return true if T is a computed goto. */
415
416 bool
computed_goto_p(gimple * t)417 computed_goto_p (gimple *t)
418 {
419 return (gimple_code (t) == GIMPLE_GOTO
420 && TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL);
421 }
422
423 /* Returns true if the sequence of statements STMTS only contains
424 a call to __builtin_unreachable (). */
425
426 bool
gimple_seq_unreachable_p(gimple_seq stmts)427 gimple_seq_unreachable_p (gimple_seq stmts)
428 {
429 if (stmts == NULL
430 /* Return false if -fsanitize=unreachable, we don't want to
431 optimize away those calls, but rather turn them into
432 __ubsan_handle_builtin_unreachable () or __builtin_trap ()
433 later. */
434 || sanitize_flags_p (SANITIZE_UNREACHABLE))
435 return false;
436
437 gimple_stmt_iterator gsi = gsi_last (stmts);
438
439 if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_UNREACHABLE))
440 return false;
441
442 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
443 {
444 gimple *stmt = gsi_stmt (gsi);
445 if (gimple_code (stmt) != GIMPLE_LABEL
446 && !is_gimple_debug (stmt)
447 && !gimple_clobber_p (stmt))
448 return false;
449 }
450 return true;
451 }
452
453 /* Returns true for edge E where e->src ends with a GIMPLE_COND and
454 the other edge points to a bb with just __builtin_unreachable ().
455 I.e. return true for C->M edge in:
456 <bb C>:
457 ...
458 if (something)
459 goto <bb N>;
460 else
461 goto <bb M>;
462 <bb N>:
463 __builtin_unreachable ();
464 <bb M>: */
465
466 bool
assert_unreachable_fallthru_edge_p(edge e)467 assert_unreachable_fallthru_edge_p (edge e)
468 {
469 basic_block pred_bb = e->src;
470 gimple *last = last_stmt (pred_bb);
471 if (last && gimple_code (last) == GIMPLE_COND)
472 {
473 basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest;
474 if (other_bb == e->dest)
475 other_bb = EDGE_SUCC (pred_bb, 1)->dest;
476 if (EDGE_COUNT (other_bb->succs) == 0)
477 return gimple_seq_unreachable_p (bb_seq (other_bb));
478 }
479 return false;
480 }
481
482
483 /* Initialize GF_CALL_CTRL_ALTERING flag, which indicates the call
484 could alter control flow except via eh. We initialize the flag at
485 CFG build time and only ever clear it later. */
486
487 static void
gimple_call_initialize_ctrl_altering(gimple * stmt)488 gimple_call_initialize_ctrl_altering (gimple *stmt)
489 {
490 int flags = gimple_call_flags (stmt);
491
492 /* A call alters control flow if it can make an abnormal goto. */
493 if (call_can_make_abnormal_goto (stmt)
494 /* A call also alters control flow if it does not return. */
495 || flags & ECF_NORETURN
496 /* TM ending statements have backedges out of the transaction.
497 Return true so we split the basic block containing them.
498 Note that the TM_BUILTIN test is merely an optimization. */
499 || ((flags & ECF_TM_BUILTIN)
500 && is_tm_ending_fndecl (gimple_call_fndecl (stmt)))
501 /* BUILT_IN_RETURN call is same as return statement. */
502 || gimple_call_builtin_p (stmt, BUILT_IN_RETURN)
503 /* IFN_UNIQUE should be the last insn, to make checking for it
504 as cheap as possible. */
505 || (gimple_call_internal_p (stmt)
506 && gimple_call_internal_unique_p (stmt)))
507 gimple_call_set_ctrl_altering (stmt, true);
508 else
509 gimple_call_set_ctrl_altering (stmt, false);
510 }
511
512
513 /* Insert SEQ after BB and build a flowgraph. */
514
515 static basic_block
make_blocks_1(gimple_seq seq,basic_block bb)516 make_blocks_1 (gimple_seq seq, basic_block bb)
517 {
518 gimple_stmt_iterator i = gsi_start (seq);
519 gimple *stmt = NULL;
520 gimple *prev_stmt = NULL;
521 bool start_new_block = true;
522 bool first_stmt_of_seq = true;
523
524 while (!gsi_end_p (i))
525 {
526 /* PREV_STMT should only be set to a debug stmt if the debug
527 stmt is before nondebug stmts. Once stmt reaches a nondebug
528 nonlabel, prev_stmt will be set to it, so that
529 stmt_starts_bb_p will know to start a new block if a label is
530 found. However, if stmt was a label after debug stmts only,
531 keep the label in prev_stmt even if we find further debug
532 stmts, for there may be other labels after them, and they
533 should land in the same block. */
534 if (!prev_stmt || !stmt || !is_gimple_debug (stmt))
535 prev_stmt = stmt;
536 stmt = gsi_stmt (i);
537
538 if (stmt && is_gimple_call (stmt))
539 gimple_call_initialize_ctrl_altering (stmt);
540
541 /* If the statement starts a new basic block or if we have determined
542 in a previous pass that we need to create a new block for STMT, do
543 so now. */
544 if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt))
545 {
546 if (!first_stmt_of_seq)
547 gsi_split_seq_before (&i, &seq);
548 bb = create_basic_block (seq, bb);
549 start_new_block = false;
550 prev_stmt = NULL;
551 }
552
553 /* Now add STMT to BB and create the subgraphs for special statement
554 codes. */
555 gimple_set_bb (stmt, bb);
556
557 /* If STMT is a basic block terminator, set START_NEW_BLOCK for the
558 next iteration. */
559 if (stmt_ends_bb_p (stmt))
560 {
561 /* If the stmt can make abnormal goto use a new temporary
562 for the assignment to the LHS. This makes sure the old value
563 of the LHS is available on the abnormal edge. Otherwise
564 we will end up with overlapping life-ranges for abnormal
565 SSA names. */
566 if (gimple_has_lhs (stmt)
567 && stmt_can_make_abnormal_goto (stmt)
568 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
569 {
570 tree lhs = gimple_get_lhs (stmt);
571 tree tmp = create_tmp_var (TREE_TYPE (lhs));
572 gimple *s = gimple_build_assign (lhs, tmp);
573 gimple_set_location (s, gimple_location (stmt));
574 gimple_set_block (s, gimple_block (stmt));
575 gimple_set_lhs (stmt, tmp);
576 if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
577 || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
578 DECL_GIMPLE_REG_P (tmp) = 1;
579 gsi_insert_after (&i, s, GSI_SAME_STMT);
580 }
581 start_new_block = true;
582 }
583
584 gsi_next (&i);
585 first_stmt_of_seq = false;
586 }
587 return bb;
588 }
589
590 /* Build a flowgraph for the sequence of stmts SEQ. */
591
592 static void
make_blocks(gimple_seq seq)593 make_blocks (gimple_seq seq)
594 {
595 /* Look for debug markers right before labels, and move the debug
596 stmts after the labels. Accepting labels among debug markers
597 adds no value, just complexity; if we wanted to annotate labels
598 with view numbers (so sequencing among markers would matter) or
599 somesuch, we're probably better off still moving the labels, but
600 adding other debug annotations in their original positions or
601 emitting nonbind or bind markers associated with the labels in
602 the original position of the labels.
603
604 Moving labels would probably be simpler, but we can't do that:
605 moving labels assigns label ids to them, and doing so because of
606 debug markers makes for -fcompare-debug and possibly even codegen
607 differences. So, we have to move the debug stmts instead. To
608 that end, we scan SEQ backwards, marking the position of the
609 latest (earliest we find) label, and moving debug stmts that are
610 not separated from it by nondebug nonlabel stmts after the
611 label. */
612 if (MAY_HAVE_DEBUG_MARKER_STMTS)
613 {
614 gimple_stmt_iterator label = gsi_none ();
615
616 for (gimple_stmt_iterator i = gsi_last (seq); !gsi_end_p (i); gsi_prev (&i))
617 {
618 gimple *stmt = gsi_stmt (i);
619
620 /* If this is the first label we encounter (latest in SEQ)
621 before nondebug stmts, record its position. */
622 if (is_a <glabel *> (stmt))
623 {
624 if (gsi_end_p (label))
625 label = i;
626 continue;
627 }
628
629 /* Without a recorded label position to move debug stmts to,
630 there's nothing to do. */
631 if (gsi_end_p (label))
632 continue;
633
634 /* Move the debug stmt at I after LABEL. */
635 if (is_gimple_debug (stmt))
636 {
637 gcc_assert (gimple_debug_nonbind_marker_p (stmt));
638 /* As STMT is removed, I advances to the stmt after
639 STMT, so the gsi_prev in the for "increment"
640 expression gets us to the stmt we're to visit after
641 STMT. LABEL, however, would advance to the moved
642 stmt if we passed it to gsi_move_after, so pass it a
643 copy instead, so as to keep LABEL pointing to the
644 LABEL. */
645 gimple_stmt_iterator copy = label;
646 gsi_move_after (&i, ©);
647 continue;
648 }
649
650 /* There aren't any (more?) debug stmts before label, so
651 there isn't anything else to move after it. */
652 label = gsi_none ();
653 }
654 }
655
656 make_blocks_1 (seq, ENTRY_BLOCK_PTR_FOR_FN (cfun));
657 }
658
659 /* Create and return a new empty basic block after bb AFTER. */
660
661 static basic_block
create_bb(void * h,void * e,basic_block after)662 create_bb (void *h, void *e, basic_block after)
663 {
664 basic_block bb;
665
666 gcc_assert (!e);
667
668 /* Create and initialize a new basic block. Since alloc_block uses
669 GC allocation that clears memory to allocate a basic block, we do
670 not have to clear the newly allocated basic block here. */
671 bb = alloc_block ();
672
673 bb->index = last_basic_block_for_fn (cfun);
674 bb->flags = BB_NEW;
675 set_bb_seq (bb, h ? (gimple_seq) h : NULL);
676
677 /* Add the new block to the linked list of blocks. */
678 link_block (bb, after);
679
680 /* Grow the basic block array if needed. */
681 if ((size_t) last_basic_block_for_fn (cfun)
682 == basic_block_info_for_fn (cfun)->length ())
683 {
684 size_t new_size =
685 (last_basic_block_for_fn (cfun)
686 + (last_basic_block_for_fn (cfun) + 3) / 4);
687 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
688 }
689
690 /* Add the newly created block to the array. */
691 SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block_for_fn (cfun), bb);
692
693 n_basic_blocks_for_fn (cfun)++;
694 last_basic_block_for_fn (cfun)++;
695
696 return bb;
697 }
698
699
700 /*---------------------------------------------------------------------------
701 Edge creation
702 ---------------------------------------------------------------------------*/
703
704 /* If basic block BB has an abnormal edge to a basic block
705 containing IFN_ABNORMAL_DISPATCHER internal call, return
706 that the dispatcher's basic block, otherwise return NULL. */
707
708 basic_block
get_abnormal_succ_dispatcher(basic_block bb)709 get_abnormal_succ_dispatcher (basic_block bb)
710 {
711 edge e;
712 edge_iterator ei;
713
714 FOR_EACH_EDGE (e, ei, bb->succs)
715 if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
716 {
717 gimple_stmt_iterator gsi
718 = gsi_start_nondebug_after_labels_bb (e->dest);
719 gimple *g = gsi_stmt (gsi);
720 if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
721 return e->dest;
722 }
723 return NULL;
724 }
725
726 /* Helper function for make_edges. Create a basic block with
727 with ABNORMAL_DISPATCHER internal call in it if needed, and
728 create abnormal edges from BBS to it and from it to FOR_BB
729 if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
730
731 static void
handle_abnormal_edges(basic_block * dispatcher_bbs,basic_block for_bb,int * bb_to_omp_idx,auto_vec<basic_block> * bbs,bool computed_goto)732 handle_abnormal_edges (basic_block *dispatcher_bbs,
733 basic_block for_bb, int *bb_to_omp_idx,
734 auto_vec<basic_block> *bbs, bool computed_goto)
735 {
736 basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
737 unsigned int idx = 0;
738 basic_block bb;
739 bool inner = false;
740
741 if (bb_to_omp_idx)
742 {
743 dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
744 if (bb_to_omp_idx[for_bb->index] != 0)
745 inner = true;
746 }
747
748 /* If the dispatcher has been created already, then there are basic
749 blocks with abnormal edges to it, so just make a new edge to
750 for_bb. */
751 if (*dispatcher == NULL)
752 {
753 /* Check if there are any basic blocks that need to have
754 abnormal edges to this dispatcher. If there are none, return
755 early. */
756 if (bb_to_omp_idx == NULL)
757 {
758 if (bbs->is_empty ())
759 return;
760 }
761 else
762 {
763 FOR_EACH_VEC_ELT (*bbs, idx, bb)
764 if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index])
765 break;
766 if (bb == NULL)
767 return;
768 }
769
770 /* Create the dispatcher bb. */
771 *dispatcher = create_basic_block (NULL, for_bb);
772 if (computed_goto)
773 {
774 /* Factor computed gotos into a common computed goto site. Also
775 record the location of that site so that we can un-factor the
776 gotos after we have converted back to normal form. */
777 gimple_stmt_iterator gsi = gsi_start_bb (*dispatcher);
778
779 /* Create the destination of the factored goto. Each original
780 computed goto will put its desired destination into this
781 variable and jump to the label we create immediately below. */
782 tree var = create_tmp_var (ptr_type_node, "gotovar");
783
784 /* Build a label for the new block which will contain the
785 factored computed goto. */
786 tree factored_label_decl
787 = create_artificial_label (UNKNOWN_LOCATION);
788 gimple *factored_computed_goto_label
789 = gimple_build_label (factored_label_decl);
790 gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
791
792 /* Build our new computed goto. */
793 gimple *factored_computed_goto = gimple_build_goto (var);
794 gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
795
796 FOR_EACH_VEC_ELT (*bbs, idx, bb)
797 {
798 if (bb_to_omp_idx
799 && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
800 continue;
801
802 gsi = gsi_last_bb (bb);
803 gimple *last = gsi_stmt (gsi);
804
805 gcc_assert (computed_goto_p (last));
806
807 /* Copy the original computed goto's destination into VAR. */
808 gimple *assignment
809 = gimple_build_assign (var, gimple_goto_dest (last));
810 gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
811
812 edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU);
813 e->goto_locus = gimple_location (last);
814 gsi_remove (&gsi, true);
815 }
816 }
817 else
818 {
819 tree arg = inner ? boolean_true_node : boolean_false_node;
820 gimple *g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
821 1, arg);
822 gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher);
823 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
824
825 /* Create predecessor edges of the dispatcher. */
826 FOR_EACH_VEC_ELT (*bbs, idx, bb)
827 {
828 if (bb_to_omp_idx
829 && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
830 continue;
831 make_edge (bb, *dispatcher, EDGE_ABNORMAL);
832 }
833 }
834 }
835
836 make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
837 }
838
839 /* Creates outgoing edges for BB. Returns 1 when it ends with an
840 computed goto, returns 2 when it ends with a statement that
841 might return to this function via an nonlocal goto, otherwise
842 return 0. Updates *PCUR_REGION with the OMP region this BB is in. */
843
844 static int
make_edges_bb(basic_block bb,struct omp_region ** pcur_region,int * pomp_index)845 make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index)
846 {
847 gimple *last = last_stmt (bb);
848 bool fallthru = false;
849 int ret = 0;
850
851 if (!last)
852 return ret;
853
854 switch (gimple_code (last))
855 {
856 case GIMPLE_GOTO:
857 if (make_goto_expr_edges (bb))
858 ret = 1;
859 fallthru = false;
860 break;
861 case GIMPLE_RETURN:
862 {
863 edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
864 e->goto_locus = gimple_location (last);
865 fallthru = false;
866 }
867 break;
868 case GIMPLE_COND:
869 make_cond_expr_edges (bb);
870 fallthru = false;
871 break;
872 case GIMPLE_SWITCH:
873 make_gimple_switch_edges (as_a <gswitch *> (last), bb);
874 fallthru = false;
875 break;
876 case GIMPLE_RESX:
877 make_eh_edges (last);
878 fallthru = false;
879 break;
880 case GIMPLE_EH_DISPATCH:
881 fallthru = make_eh_dispatch_edges (as_a <geh_dispatch *> (last));
882 break;
883
884 case GIMPLE_CALL:
885 /* If this function receives a nonlocal goto, then we need to
886 make edges from this call site to all the nonlocal goto
887 handlers. */
888 if (stmt_can_make_abnormal_goto (last))
889 ret = 2;
890
891 /* If this statement has reachable exception handlers, then
892 create abnormal edges to them. */
893 make_eh_edges (last);
894
895 /* BUILTIN_RETURN is really a return statement. */
896 if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
897 {
898 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
899 fallthru = false;
900 }
901 /* Some calls are known not to return. */
902 else
903 fallthru = !gimple_call_noreturn_p (last);
904 break;
905
906 case GIMPLE_ASSIGN:
907 /* A GIMPLE_ASSIGN may throw internally and thus be considered
908 control-altering. */
909 if (is_ctrl_altering_stmt (last))
910 make_eh_edges (last);
911 fallthru = true;
912 break;
913
914 case GIMPLE_ASM:
915 make_gimple_asm_edges (bb);
916 fallthru = true;
917 break;
918
919 CASE_GIMPLE_OMP:
920 fallthru = omp_make_gimple_edges (bb, pcur_region, pomp_index);
921 break;
922
923 case GIMPLE_TRANSACTION:
924 {
925 gtransaction *txn = as_a <gtransaction *> (last);
926 tree label1 = gimple_transaction_label_norm (txn);
927 tree label2 = gimple_transaction_label_uninst (txn);
928
929 if (label1)
930 make_edge (bb, label_to_block (cfun, label1), EDGE_FALLTHRU);
931 if (label2)
932 make_edge (bb, label_to_block (cfun, label2),
933 EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU));
934
935 tree label3 = gimple_transaction_label_over (txn);
936 if (gimple_transaction_subcode (txn)
937 & (GTMA_HAVE_ABORT | GTMA_IS_OUTER))
938 make_edge (bb, label_to_block (cfun, label3), EDGE_TM_ABORT);
939
940 fallthru = false;
941 }
942 break;
943
944 default:
945 gcc_assert (!stmt_ends_bb_p (last));
946 fallthru = true;
947 break;
948 }
949
950 if (fallthru)
951 make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
952
953 return ret;
954 }
955
956 /* Join all the blocks in the flowgraph. */
957
958 static void
make_edges(void)959 make_edges (void)
960 {
961 basic_block bb;
962 struct omp_region *cur_region = NULL;
963 auto_vec<basic_block> ab_edge_goto;
964 auto_vec<basic_block> ab_edge_call;
965 int *bb_to_omp_idx = NULL;
966 int cur_omp_region_idx = 0;
967
968 /* Create an edge from entry to the first block with executable
969 statements in it. */
970 make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
971 BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS),
972 EDGE_FALLTHRU);
973
974 /* Traverse the basic block array placing edges. */
975 FOR_EACH_BB_FN (bb, cfun)
976 {
977 int mer;
978
979 if (bb_to_omp_idx)
980 bb_to_omp_idx[bb->index] = cur_omp_region_idx;
981
982 mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
983 if (mer == 1)
984 ab_edge_goto.safe_push (bb);
985 else if (mer == 2)
986 ab_edge_call.safe_push (bb);
987
988 if (cur_region && bb_to_omp_idx == NULL)
989 bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
990 }
991
992 /* Computed gotos are hell to deal with, especially if there are
993 lots of them with a large number of destinations. So we factor
994 them to a common computed goto location before we build the
995 edge list. After we convert back to normal form, we will un-factor
996 the computed gotos since factoring introduces an unwanted jump.
997 For non-local gotos and abnormal edges from calls to calls that return
998 twice or forced labels, factor the abnormal edges too, by having all
999 abnormal edges from the calls go to a common artificial basic block
1000 with ABNORMAL_DISPATCHER internal call and abnormal edges from that
1001 basic block to all forced labels and calls returning twice.
1002 We do this per-OpenMP structured block, because those regions
1003 are guaranteed to be single entry single exit by the standard,
1004 so it is not allowed to enter or exit such regions abnormally this way,
1005 thus all computed gotos, non-local gotos and setjmp/longjmp calls
1006 must not transfer control across SESE region boundaries. */
1007 if (!ab_edge_goto.is_empty () || !ab_edge_call.is_empty ())
1008 {
1009 gimple_stmt_iterator gsi;
1010 basic_block dispatcher_bb_array[2] = { NULL, NULL };
1011 basic_block *dispatcher_bbs = dispatcher_bb_array;
1012 int count = n_basic_blocks_for_fn (cfun);
1013
1014 if (bb_to_omp_idx)
1015 dispatcher_bbs = XCNEWVEC (basic_block, 2 * count);
1016
1017 FOR_EACH_BB_FN (bb, cfun)
1018 {
1019 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1020 {
1021 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
1022 tree target;
1023
1024 if (!label_stmt)
1025 break;
1026
1027 target = gimple_label_label (label_stmt);
1028
1029 /* Make an edge to every label block that has been marked as a
1030 potential target for a computed goto or a non-local goto. */
1031 if (FORCED_LABEL (target))
1032 handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
1033 &ab_edge_goto, true);
1034 if (DECL_NONLOCAL (target))
1035 {
1036 handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
1037 &ab_edge_call, false);
1038 break;
1039 }
1040 }
1041
1042 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
1043 gsi_next_nondebug (&gsi);
1044 if (!gsi_end_p (gsi))
1045 {
1046 /* Make an edge to every setjmp-like call. */
1047 gimple *call_stmt = gsi_stmt (gsi);
1048 if (is_gimple_call (call_stmt)
1049 && ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
1050 || gimple_call_builtin_p (call_stmt,
1051 BUILT_IN_SETJMP_RECEIVER)))
1052 handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
1053 &ab_edge_call, false);
1054 }
1055 }
1056
1057 if (bb_to_omp_idx)
1058 XDELETE (dispatcher_bbs);
1059 }
1060
1061 XDELETE (bb_to_omp_idx);
1062
1063 omp_free_regions ();
1064 }
1065
1066 /* Add SEQ after GSI. Start new bb after GSI, and created further bbs as
1067 needed. Returns true if new bbs were created.
1068 Note: This is transitional code, and should not be used for new code. We
1069 should be able to get rid of this by rewriting all target va-arg
1070 gimplification hooks to use an interface gimple_build_cond_value as described
1071 in https://gcc.gnu.org/ml/gcc-patches/2015-02/msg01194.html. */
1072
1073 bool
gimple_find_sub_bbs(gimple_seq seq,gimple_stmt_iterator * gsi)1074 gimple_find_sub_bbs (gimple_seq seq, gimple_stmt_iterator *gsi)
1075 {
1076 gimple *stmt = gsi_stmt (*gsi);
1077 basic_block bb = gimple_bb (stmt);
1078 basic_block lastbb, afterbb;
1079 int old_num_bbs = n_basic_blocks_for_fn (cfun);
1080 edge e;
1081 lastbb = make_blocks_1 (seq, bb);
1082 if (old_num_bbs == n_basic_blocks_for_fn (cfun))
1083 return false;
1084 e = split_block (bb, stmt);
1085 /* Move e->dest to come after the new basic blocks. */
1086 afterbb = e->dest;
1087 unlink_block (afterbb);
1088 link_block (afterbb, lastbb);
1089 redirect_edge_succ (e, bb->next_bb);
1090 bb = bb->next_bb;
1091 while (bb != afterbb)
1092 {
1093 struct omp_region *cur_region = NULL;
1094 profile_count cnt = profile_count::zero ();
1095 bool all = true;
1096
1097 int cur_omp_region_idx = 0;
1098 int mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
1099 gcc_assert (!mer && !cur_region);
1100 add_bb_to_loop (bb, afterbb->loop_father);
1101
1102 edge e;
1103 edge_iterator ei;
1104 FOR_EACH_EDGE (e, ei, bb->preds)
1105 {
1106 if (e->count ().initialized_p ())
1107 cnt += e->count ();
1108 else
1109 all = false;
1110 }
1111 tree_guess_outgoing_edge_probabilities (bb);
1112 if (all || profile_status_for_fn (cfun) == PROFILE_READ)
1113 bb->count = cnt;
1114
1115 bb = bb->next_bb;
1116 }
1117 return true;
1118 }
1119
1120 /* Find the next available discriminator value for LOCUS. The
1121 discriminator distinguishes among several basic blocks that
1122 share a common locus, allowing for more accurate sample-based
1123 profiling. */
1124
1125 static int
next_discriminator_for_locus(int line)1126 next_discriminator_for_locus (int line)
1127 {
1128 struct locus_discrim_map item;
1129 struct locus_discrim_map **slot;
1130
1131 item.location_line = line;
1132 item.discriminator = 0;
1133 slot = discriminator_per_locus->find_slot_with_hash (&item, line, INSERT);
1134 gcc_assert (slot);
1135 if (*slot == HTAB_EMPTY_ENTRY)
1136 {
1137 *slot = XNEW (struct locus_discrim_map);
1138 gcc_assert (*slot);
1139 (*slot)->location_line = line;
1140 (*slot)->discriminator = 0;
1141 }
1142 (*slot)->discriminator++;
1143 return (*slot)->discriminator;
1144 }
1145
1146 /* Return TRUE if LOCUS1 and LOCUS2 refer to the same source line. */
1147
1148 static bool
same_line_p(location_t locus1,expanded_location * from,location_t locus2)1149 same_line_p (location_t locus1, expanded_location *from, location_t locus2)
1150 {
1151 expanded_location to;
1152
1153 if (locus1 == locus2)
1154 return true;
1155
1156 to = expand_location (locus2);
1157
1158 if (from->line != to.line)
1159 return false;
1160 if (from->file == to.file)
1161 return true;
1162 return (from->file != NULL
1163 && to.file != NULL
1164 && filename_cmp (from->file, to.file) == 0);
1165 }
1166
1167 /* Assign discriminators to each basic block. */
1168
1169 static void
assign_discriminators(void)1170 assign_discriminators (void)
1171 {
1172 basic_block bb;
1173
1174 FOR_EACH_BB_FN (bb, cfun)
1175 {
1176 edge e;
1177 edge_iterator ei;
1178 gimple *last = last_stmt (bb);
1179 location_t locus = last ? gimple_location (last) : UNKNOWN_LOCATION;
1180
1181 if (locus == UNKNOWN_LOCATION)
1182 continue;
1183
1184 expanded_location locus_e = expand_location (locus);
1185
1186 FOR_EACH_EDGE (e, ei, bb->succs)
1187 {
1188 gimple *first = first_non_label_stmt (e->dest);
1189 gimple *last = last_stmt (e->dest);
1190 if ((first && same_line_p (locus, &locus_e,
1191 gimple_location (first)))
1192 || (last && same_line_p (locus, &locus_e,
1193 gimple_location (last))))
1194 {
1195 if (e->dest->discriminator != 0 && bb->discriminator == 0)
1196 bb->discriminator
1197 = next_discriminator_for_locus (locus_e.line);
1198 else
1199 e->dest->discriminator
1200 = next_discriminator_for_locus (locus_e.line);
1201 }
1202 }
1203 }
1204 }
1205
1206 /* Create the edges for a GIMPLE_COND starting at block BB. */
1207
1208 static void
make_cond_expr_edges(basic_block bb)1209 make_cond_expr_edges (basic_block bb)
1210 {
1211 gcond *entry = as_a <gcond *> (last_stmt (bb));
1212 gimple *then_stmt, *else_stmt;
1213 basic_block then_bb, else_bb;
1214 tree then_label, else_label;
1215 edge e;
1216
1217 gcc_assert (entry);
1218 gcc_assert (gimple_code (entry) == GIMPLE_COND);
1219
1220 /* Entry basic blocks for each component. */
1221 then_label = gimple_cond_true_label (entry);
1222 else_label = gimple_cond_false_label (entry);
1223 then_bb = label_to_block (cfun, then_label);
1224 else_bb = label_to_block (cfun, else_label);
1225 then_stmt = first_stmt (then_bb);
1226 else_stmt = first_stmt (else_bb);
1227
1228 e = make_edge (bb, then_bb, EDGE_TRUE_VALUE);
1229 e->goto_locus = gimple_location (then_stmt);
1230 e = make_edge (bb, else_bb, EDGE_FALSE_VALUE);
1231 if (e)
1232 e->goto_locus = gimple_location (else_stmt);
1233
1234 /* We do not need the labels anymore. */
1235 gimple_cond_set_true_label (entry, NULL_TREE);
1236 gimple_cond_set_false_label (entry, NULL_TREE);
1237 }
1238
1239
1240 /* Called for each element in the hash table (P) as we delete the
1241 edge to cases hash table.
1242
1243 Clear all the CASE_CHAINs to prevent problems with copying of
1244 SWITCH_EXPRs and structure sharing rules, then free the hash table
1245 element. */
1246
1247 bool
edge_to_cases_cleanup(edge const &,tree const & value,void *)1248 edge_to_cases_cleanup (edge const &, tree const &value, void *)
1249 {
1250 tree t, next;
1251
1252 for (t = value; t; t = next)
1253 {
1254 next = CASE_CHAIN (t);
1255 CASE_CHAIN (t) = NULL;
1256 }
1257
1258 return true;
1259 }
1260
1261 /* Start recording information mapping edges to case labels. */
1262
1263 void
start_recording_case_labels(void)1264 start_recording_case_labels (void)
1265 {
1266 gcc_assert (edge_to_cases == NULL);
1267 edge_to_cases = new hash_map<edge, tree>;
1268 touched_switch_bbs = BITMAP_ALLOC (NULL);
1269 }
1270
1271 /* Return nonzero if we are recording information for case labels. */
1272
1273 static bool
recording_case_labels_p(void)1274 recording_case_labels_p (void)
1275 {
1276 return (edge_to_cases != NULL);
1277 }
1278
1279 /* Stop recording information mapping edges to case labels and
1280 remove any information we have recorded. */
1281 void
end_recording_case_labels(void)1282 end_recording_case_labels (void)
1283 {
1284 bitmap_iterator bi;
1285 unsigned i;
1286 edge_to_cases->traverse<void *, edge_to_cases_cleanup> (NULL);
1287 delete edge_to_cases;
1288 edge_to_cases = NULL;
1289 EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
1290 {
1291 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1292 if (bb)
1293 {
1294 gimple *stmt = last_stmt (bb);
1295 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1296 group_case_labels_stmt (as_a <gswitch *> (stmt));
1297 }
1298 }
1299 BITMAP_FREE (touched_switch_bbs);
1300 }
1301
1302 /* If we are inside a {start,end}_recording_cases block, then return
1303 a chain of CASE_LABEL_EXPRs from T which reference E.
1304
1305 Otherwise return NULL. */
1306
1307 static tree
get_cases_for_edge(edge e,gswitch * t)1308 get_cases_for_edge (edge e, gswitch *t)
1309 {
1310 tree *slot;
1311 size_t i, n;
1312
1313 /* If we are not recording cases, then we do not have CASE_LABEL_EXPR
1314 chains available. Return NULL so the caller can detect this case. */
1315 if (!recording_case_labels_p ())
1316 return NULL;
1317
1318 slot = edge_to_cases->get (e);
1319 if (slot)
1320 return *slot;
1321
1322 /* If we did not find E in the hash table, then this must be the first
1323 time we have been queried for information about E & T. Add all the
1324 elements from T to the hash table then perform the query again. */
1325
1326 n = gimple_switch_num_labels (t);
1327 for (i = 0; i < n; i++)
1328 {
1329 tree elt = gimple_switch_label (t, i);
1330 tree lab = CASE_LABEL (elt);
1331 basic_block label_bb = label_to_block (cfun, lab);
1332 edge this_edge = find_edge (e->src, label_bb);
1333
1334 /* Add it to the chain of CASE_LABEL_EXPRs referencing E, or create
1335 a new chain. */
1336 tree &s = edge_to_cases->get_or_insert (this_edge);
1337 CASE_CHAIN (elt) = s;
1338 s = elt;
1339 }
1340
1341 return *edge_to_cases->get (e);
1342 }
1343
1344 /* Create the edges for a GIMPLE_SWITCH starting at block BB. */
1345
1346 static void
make_gimple_switch_edges(gswitch * entry,basic_block bb)1347 make_gimple_switch_edges (gswitch *entry, basic_block bb)
1348 {
1349 size_t i, n;
1350
1351 n = gimple_switch_num_labels (entry);
1352
1353 for (i = 0; i < n; ++i)
1354 {
1355 basic_block label_bb = gimple_switch_label_bb (cfun, entry, i);
1356 make_edge (bb, label_bb, 0);
1357 }
1358 }
1359
1360
1361 /* Return the basic block holding label DEST. */
1362
1363 basic_block
label_to_block(struct function * ifun,tree dest)1364 label_to_block (struct function *ifun, tree dest)
1365 {
1366 int uid = LABEL_DECL_UID (dest);
1367
1368 /* We would die hard when faced by an undefined label. Emit a label to
1369 the very first basic block. This will hopefully make even the dataflow
1370 and undefined variable warnings quite right. */
1371 if (seen_error () && uid < 0)
1372 {
1373 gimple_stmt_iterator gsi =
1374 gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS));
1375 gimple *stmt;
1376
1377 stmt = gimple_build_label (dest);
1378 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
1379 uid = LABEL_DECL_UID (dest);
1380 }
1381 if (vec_safe_length (ifun->cfg->x_label_to_block_map) <= (unsigned int) uid)
1382 return NULL;
1383 return (*ifun->cfg->x_label_to_block_map)[uid];
1384 }
1385
1386 /* Create edges for a goto statement at block BB. Returns true
1387 if abnormal edges should be created. */
1388
1389 static bool
make_goto_expr_edges(basic_block bb)1390 make_goto_expr_edges (basic_block bb)
1391 {
1392 gimple_stmt_iterator last = gsi_last_bb (bb);
1393 gimple *goto_t = gsi_stmt (last);
1394
1395 /* A simple GOTO creates normal edges. */
1396 if (simple_goto_p (goto_t))
1397 {
1398 tree dest = gimple_goto_dest (goto_t);
1399 basic_block label_bb = label_to_block (cfun, dest);
1400 edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
1401 e->goto_locus = gimple_location (goto_t);
1402 gsi_remove (&last, true);
1403 return false;
1404 }
1405
1406 /* A computed GOTO creates abnormal edges. */
1407 return true;
1408 }
1409
1410 /* Create edges for an asm statement with labels at block BB. */
1411
1412 static void
make_gimple_asm_edges(basic_block bb)1413 make_gimple_asm_edges (basic_block bb)
1414 {
1415 gasm *stmt = as_a <gasm *> (last_stmt (bb));
1416 int i, n = gimple_asm_nlabels (stmt);
1417
1418 for (i = 0; i < n; ++i)
1419 {
1420 tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
1421 basic_block label_bb = label_to_block (cfun, label);
1422 make_edge (bb, label_bb, 0);
1423 }
1424 }
1425
1426 /*---------------------------------------------------------------------------
1427 Flowgraph analysis
1428 ---------------------------------------------------------------------------*/
1429
1430 /* Cleanup useless labels in basic blocks. This is something we wish
1431 to do early because it allows us to group case labels before creating
1432 the edges for the CFG, and it speeds up block statement iterators in
1433 all passes later on.
1434 We rerun this pass after CFG is created, to get rid of the labels that
1435 are no longer referenced. After then we do not run it any more, since
1436 (almost) no new labels should be created. */
1437
1438 /* A map from basic block index to the leading label of that block. */
1439 static struct label_record
1440 {
1441 /* The label. */
1442 tree label;
1443
1444 /* True if the label is referenced from somewhere. */
1445 bool used;
1446 } *label_for_bb;
1447
1448 /* Given LABEL return the first label in the same basic block. */
1449
1450 static tree
main_block_label(tree label)1451 main_block_label (tree label)
1452 {
1453 basic_block bb = label_to_block (cfun, label);
1454 tree main_label = label_for_bb[bb->index].label;
1455
1456 /* label_to_block possibly inserted undefined label into the chain. */
1457 if (!main_label)
1458 {
1459 label_for_bb[bb->index].label = label;
1460 main_label = label;
1461 }
1462
1463 label_for_bb[bb->index].used = true;
1464 return main_label;
1465 }
1466
1467 /* Clean up redundant labels within the exception tree. */
1468
1469 static void
cleanup_dead_labels_eh(void)1470 cleanup_dead_labels_eh (void)
1471 {
1472 eh_landing_pad lp;
1473 eh_region r;
1474 tree lab;
1475 int i;
1476
1477 if (cfun->eh == NULL)
1478 return;
1479
1480 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
1481 if (lp && lp->post_landing_pad)
1482 {
1483 lab = main_block_label (lp->post_landing_pad);
1484 if (lab != lp->post_landing_pad)
1485 {
1486 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
1487 EH_LANDING_PAD_NR (lab) = lp->index;
1488 }
1489 }
1490
1491 FOR_ALL_EH_REGION (r)
1492 switch (r->type)
1493 {
1494 case ERT_CLEANUP:
1495 case ERT_MUST_NOT_THROW:
1496 break;
1497
1498 case ERT_TRY:
1499 {
1500 eh_catch c;
1501 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
1502 {
1503 lab = c->label;
1504 if (lab)
1505 c->label = main_block_label (lab);
1506 }
1507 }
1508 break;
1509
1510 case ERT_ALLOWED_EXCEPTIONS:
1511 lab = r->u.allowed.label;
1512 if (lab)
1513 r->u.allowed.label = main_block_label (lab);
1514 break;
1515 }
1516 }
1517
1518
1519 /* Cleanup redundant labels. This is a three-step process:
1520 1) Find the leading label for each block.
1521 2) Redirect all references to labels to the leading labels.
1522 3) Cleanup all useless labels. */
1523
1524 void
cleanup_dead_labels(void)1525 cleanup_dead_labels (void)
1526 {
1527 basic_block bb;
1528 label_for_bb = XCNEWVEC (struct label_record, last_basic_block_for_fn (cfun));
1529
1530 /* Find a suitable label for each block. We use the first user-defined
1531 label if there is one, or otherwise just the first label we see. */
1532 FOR_EACH_BB_FN (bb, cfun)
1533 {
1534 gimple_stmt_iterator i;
1535
1536 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
1537 {
1538 tree label;
1539 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
1540
1541 if (!label_stmt)
1542 break;
1543
1544 label = gimple_label_label (label_stmt);
1545
1546 /* If we have not yet seen a label for the current block,
1547 remember this one and see if there are more labels. */
1548 if (!label_for_bb[bb->index].label)
1549 {
1550 label_for_bb[bb->index].label = label;
1551 continue;
1552 }
1553
1554 /* If we did see a label for the current block already, but it
1555 is an artificially created label, replace it if the current
1556 label is a user defined label. */
1557 if (!DECL_ARTIFICIAL (label)
1558 && DECL_ARTIFICIAL (label_for_bb[bb->index].label))
1559 {
1560 label_for_bb[bb->index].label = label;
1561 break;
1562 }
1563 }
1564 }
1565
1566 /* Now redirect all jumps/branches to the selected label.
1567 First do so for each block ending in a control statement. */
1568 FOR_EACH_BB_FN (bb, cfun)
1569 {
1570 gimple *stmt = last_stmt (bb);
1571 tree label, new_label;
1572
1573 if (!stmt)
1574 continue;
1575
1576 switch (gimple_code (stmt))
1577 {
1578 case GIMPLE_COND:
1579 {
1580 gcond *cond_stmt = as_a <gcond *> (stmt);
1581 label = gimple_cond_true_label (cond_stmt);
1582 if (label)
1583 {
1584 new_label = main_block_label (label);
1585 if (new_label != label)
1586 gimple_cond_set_true_label (cond_stmt, new_label);
1587 }
1588
1589 label = gimple_cond_false_label (cond_stmt);
1590 if (label)
1591 {
1592 new_label = main_block_label (label);
1593 if (new_label != label)
1594 gimple_cond_set_false_label (cond_stmt, new_label);
1595 }
1596 }
1597 break;
1598
1599 case GIMPLE_SWITCH:
1600 {
1601 gswitch *switch_stmt = as_a <gswitch *> (stmt);
1602 size_t i, n = gimple_switch_num_labels (switch_stmt);
1603
1604 /* Replace all destination labels. */
1605 for (i = 0; i < n; ++i)
1606 {
1607 tree case_label = gimple_switch_label (switch_stmt, i);
1608 label = CASE_LABEL (case_label);
1609 new_label = main_block_label (label);
1610 if (new_label != label)
1611 CASE_LABEL (case_label) = new_label;
1612 }
1613 break;
1614 }
1615
1616 case GIMPLE_ASM:
1617 {
1618 gasm *asm_stmt = as_a <gasm *> (stmt);
1619 int i, n = gimple_asm_nlabels (asm_stmt);
1620
1621 for (i = 0; i < n; ++i)
1622 {
1623 tree cons = gimple_asm_label_op (asm_stmt, i);
1624 tree label = main_block_label (TREE_VALUE (cons));
1625 TREE_VALUE (cons) = label;
1626 }
1627 break;
1628 }
1629
1630 /* We have to handle gotos until they're removed, and we don't
1631 remove them until after we've created the CFG edges. */
1632 case GIMPLE_GOTO:
1633 if (!computed_goto_p (stmt))
1634 {
1635 ggoto *goto_stmt = as_a <ggoto *> (stmt);
1636 label = gimple_goto_dest (goto_stmt);
1637 new_label = main_block_label (label);
1638 if (new_label != label)
1639 gimple_goto_set_dest (goto_stmt, new_label);
1640 }
1641 break;
1642
1643 case GIMPLE_TRANSACTION:
1644 {
1645 gtransaction *txn = as_a <gtransaction *> (stmt);
1646
1647 label = gimple_transaction_label_norm (txn);
1648 if (label)
1649 {
1650 new_label = main_block_label (label);
1651 if (new_label != label)
1652 gimple_transaction_set_label_norm (txn, new_label);
1653 }
1654
1655 label = gimple_transaction_label_uninst (txn);
1656 if (label)
1657 {
1658 new_label = main_block_label (label);
1659 if (new_label != label)
1660 gimple_transaction_set_label_uninst (txn, new_label);
1661 }
1662
1663 label = gimple_transaction_label_over (txn);
1664 if (label)
1665 {
1666 new_label = main_block_label (label);
1667 if (new_label != label)
1668 gimple_transaction_set_label_over (txn, new_label);
1669 }
1670 }
1671 break;
1672
1673 default:
1674 break;
1675 }
1676 }
1677
1678 /* Do the same for the exception region tree labels. */
1679 cleanup_dead_labels_eh ();
1680
1681 /* Finally, purge dead labels. All user-defined labels and labels that
1682 can be the target of non-local gotos and labels which have their
1683 address taken are preserved. */
1684 FOR_EACH_BB_FN (bb, cfun)
1685 {
1686 gimple_stmt_iterator i;
1687 tree label_for_this_bb = label_for_bb[bb->index].label;
1688
1689 if (!label_for_this_bb)
1690 continue;
1691
1692 /* If the main label of the block is unused, we may still remove it. */
1693 if (!label_for_bb[bb->index].used)
1694 label_for_this_bb = NULL;
1695
1696 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
1697 {
1698 tree label;
1699 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
1700
1701 if (!label_stmt)
1702 break;
1703
1704 label = gimple_label_label (label_stmt);
1705
1706 if (label == label_for_this_bb
1707 || !DECL_ARTIFICIAL (label)
1708 || DECL_NONLOCAL (label)
1709 || FORCED_LABEL (label))
1710 gsi_next (&i);
1711 else
1712 gsi_remove (&i, true);
1713 }
1714 }
1715
1716 free (label_for_bb);
1717 }
1718
1719 /* Scan the sorted vector of cases in STMT (a GIMPLE_SWITCH) and combine
1720 the ones jumping to the same label.
1721 Eg. three separate entries 1: 2: 3: become one entry 1..3: */
1722
1723 bool
group_case_labels_stmt(gswitch * stmt)1724 group_case_labels_stmt (gswitch *stmt)
1725 {
1726 int old_size = gimple_switch_num_labels (stmt);
1727 int i, next_index, new_size;
1728 basic_block default_bb = NULL;
1729 hash_set<tree> *removed_labels = NULL;
1730
1731 default_bb = gimple_switch_default_bb (cfun, stmt);
1732
1733 /* Look for possible opportunities to merge cases. */
1734 new_size = i = 1;
1735 while (i < old_size)
1736 {
1737 tree base_case, base_high;
1738 basic_block base_bb;
1739
1740 base_case = gimple_switch_label (stmt, i);
1741
1742 gcc_assert (base_case);
1743 base_bb = label_to_block (cfun, CASE_LABEL (base_case));
1744
1745 /* Discard cases that have the same destination as the default case or
1746 whose destination blocks have already been removed as unreachable. */
1747 if (base_bb == NULL
1748 || base_bb == default_bb
1749 || (removed_labels
1750 && removed_labels->contains (CASE_LABEL (base_case))))
1751 {
1752 i++;
1753 continue;
1754 }
1755
1756 base_high = CASE_HIGH (base_case)
1757 ? CASE_HIGH (base_case)
1758 : CASE_LOW (base_case);
1759 next_index = i + 1;
1760
1761 /* Try to merge case labels. Break out when we reach the end
1762 of the label vector or when we cannot merge the next case
1763 label with the current one. */
1764 while (next_index < old_size)
1765 {
1766 tree merge_case = gimple_switch_label (stmt, next_index);
1767 basic_block merge_bb = label_to_block (cfun, CASE_LABEL (merge_case));
1768 wide_int bhp1 = wi::to_wide (base_high) + 1;
1769
1770 /* Merge the cases if they jump to the same place,
1771 and their ranges are consecutive. */
1772 if (merge_bb == base_bb
1773 && (removed_labels == NULL
1774 || !removed_labels->contains (CASE_LABEL (merge_case)))
1775 && wi::to_wide (CASE_LOW (merge_case)) == bhp1)
1776 {
1777 base_high
1778 = (CASE_HIGH (merge_case)
1779 ? CASE_HIGH (merge_case) : CASE_LOW (merge_case));
1780 CASE_HIGH (base_case) = base_high;
1781 next_index++;
1782 }
1783 else
1784 break;
1785 }
1786
1787 /* Discard cases that have an unreachable destination block. */
1788 if (EDGE_COUNT (base_bb->succs) == 0
1789 && gimple_seq_unreachable_p (bb_seq (base_bb))
1790 /* Don't optimize this if __builtin_unreachable () is the
1791 implicitly added one by the C++ FE too early, before
1792 -Wreturn-type can be diagnosed. We'll optimize it later
1793 during switchconv pass or any other cfg cleanup. */
1794 && (gimple_in_ssa_p (cfun)
1795 || (LOCATION_LOCUS (gimple_location (last_stmt (base_bb)))
1796 != BUILTINS_LOCATION)))
1797 {
1798 edge base_edge = find_edge (gimple_bb (stmt), base_bb);
1799 if (base_edge != NULL)
1800 {
1801 for (gimple_stmt_iterator gsi = gsi_start_bb (base_bb);
1802 !gsi_end_p (gsi); gsi_next (&gsi))
1803 if (glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi)))
1804 {
1805 if (FORCED_LABEL (gimple_label_label (stmt))
1806 || DECL_NONLOCAL (gimple_label_label (stmt)))
1807 {
1808 /* Forced/non-local labels aren't going to be removed,
1809 but they will be moved to some neighbouring basic
1810 block. If some later case label refers to one of
1811 those labels, we should throw that case away rather
1812 than keeping it around and refering to some random
1813 other basic block without an edge to it. */
1814 if (removed_labels == NULL)
1815 removed_labels = new hash_set<tree>;
1816 removed_labels->add (gimple_label_label (stmt));
1817 }
1818 }
1819 else
1820 break;
1821 remove_edge_and_dominated_blocks (base_edge);
1822 }
1823 i = next_index;
1824 continue;
1825 }
1826
1827 if (new_size < i)
1828 gimple_switch_set_label (stmt, new_size,
1829 gimple_switch_label (stmt, i));
1830 i = next_index;
1831 new_size++;
1832 }
1833
1834 gcc_assert (new_size <= old_size);
1835
1836 if (new_size < old_size)
1837 gimple_switch_set_num_labels (stmt, new_size);
1838
1839 delete removed_labels;
1840 return new_size < old_size;
1841 }
1842
1843 /* Look for blocks ending in a multiway branch (a GIMPLE_SWITCH),
1844 and scan the sorted vector of cases. Combine the ones jumping to the
1845 same label. */
1846
1847 bool
group_case_labels(void)1848 group_case_labels (void)
1849 {
1850 basic_block bb;
1851 bool changed = false;
1852
1853 FOR_EACH_BB_FN (bb, cfun)
1854 {
1855 gimple *stmt = last_stmt (bb);
1856 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1857 changed |= group_case_labels_stmt (as_a <gswitch *> (stmt));
1858 }
1859
1860 return changed;
1861 }
1862
1863 /* Checks whether we can merge block B into block A. */
1864
1865 static bool
gimple_can_merge_blocks_p(basic_block a,basic_block b)1866 gimple_can_merge_blocks_p (basic_block a, basic_block b)
1867 {
1868 gimple *stmt;
1869
1870 if (!single_succ_p (a))
1871 return false;
1872
1873 if (single_succ_edge (a)->flags & EDGE_COMPLEX)
1874 return false;
1875
1876 if (single_succ (a) != b)
1877 return false;
1878
1879 if (!single_pred_p (b))
1880 return false;
1881
1882 if (a == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1883 || b == EXIT_BLOCK_PTR_FOR_FN (cfun))
1884 return false;
1885
1886 /* If A ends by a statement causing exceptions or something similar, we
1887 cannot merge the blocks. */
1888 stmt = last_stmt (a);
1889 if (stmt && stmt_ends_bb_p (stmt))
1890 return false;
1891
1892 /* Do not allow a block with only a non-local label to be merged. */
1893 if (stmt)
1894 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
1895 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1896 return false;
1897
1898 /* Examine the labels at the beginning of B. */
1899 for (gimple_stmt_iterator gsi = gsi_start_bb (b); !gsi_end_p (gsi);
1900 gsi_next (&gsi))
1901 {
1902 tree lab;
1903 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
1904 if (!label_stmt)
1905 break;
1906 lab = gimple_label_label (label_stmt);
1907
1908 /* Do not remove user forced labels or for -O0 any user labels. */
1909 if (!DECL_ARTIFICIAL (lab) && (!optimize || FORCED_LABEL (lab)))
1910 return false;
1911 }
1912
1913 /* Protect simple loop latches. We only want to avoid merging
1914 the latch with the loop header or with a block in another
1915 loop in this case. */
1916 if (current_loops
1917 && b->loop_father->latch == b
1918 && loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES)
1919 && (b->loop_father->header == a
1920 || b->loop_father != a->loop_father))
1921 return false;
1922
1923 /* It must be possible to eliminate all phi nodes in B. If ssa form
1924 is not up-to-date and a name-mapping is registered, we cannot eliminate
1925 any phis. Symbols marked for renaming are never a problem though. */
1926 for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (gsi);
1927 gsi_next (&gsi))
1928 {
1929 gphi *phi = gsi.phi ();
1930 /* Technically only new names matter. */
1931 if (name_registered_for_update_p (PHI_RESULT (phi)))
1932 return false;
1933 }
1934
1935 /* When not optimizing, don't merge if we'd lose goto_locus. */
1936 if (!optimize
1937 && single_succ_edge (a)->goto_locus != UNKNOWN_LOCATION)
1938 {
1939 location_t goto_locus = single_succ_edge (a)->goto_locus;
1940 gimple_stmt_iterator prev, next;
1941 prev = gsi_last_nondebug_bb (a);
1942 next = gsi_after_labels (b);
1943 if (!gsi_end_p (next) && is_gimple_debug (gsi_stmt (next)))
1944 gsi_next_nondebug (&next);
1945 if ((gsi_end_p (prev)
1946 || gimple_location (gsi_stmt (prev)) != goto_locus)
1947 && (gsi_end_p (next)
1948 || gimple_location (gsi_stmt (next)) != goto_locus))
1949 return false;
1950 }
1951
1952 return true;
1953 }
1954
1955 /* Replaces all uses of NAME by VAL. */
1956
1957 void
replace_uses_by(tree name,tree val)1958 replace_uses_by (tree name, tree val)
1959 {
1960 imm_use_iterator imm_iter;
1961 use_operand_p use;
1962 gimple *stmt;
1963 edge e;
1964
1965 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
1966 {
1967 /* Mark the block if we change the last stmt in it. */
1968 if (cfgcleanup_altered_bbs
1969 && stmt_ends_bb_p (stmt))
1970 bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
1971
1972 FOR_EACH_IMM_USE_ON_STMT (use, imm_iter)
1973 {
1974 replace_exp (use, val);
1975
1976 if (gimple_code (stmt) == GIMPLE_PHI)
1977 {
1978 e = gimple_phi_arg_edge (as_a <gphi *> (stmt),
1979 PHI_ARG_INDEX_FROM_USE (use));
1980 if (e->flags & EDGE_ABNORMAL
1981 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
1982 {
1983 /* This can only occur for virtual operands, since
1984 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1985 would prevent replacement. */
1986 gcc_checking_assert (virtual_operand_p (name));
1987 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
1988 }
1989 }
1990 }
1991
1992 if (gimple_code (stmt) != GIMPLE_PHI)
1993 {
1994 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1995 gimple *orig_stmt = stmt;
1996 size_t i;
1997
1998 /* FIXME. It shouldn't be required to keep TREE_CONSTANT
1999 on ADDR_EXPRs up-to-date on GIMPLE. Propagation will
2000 only change sth from non-invariant to invariant, and only
2001 when propagating constants. */
2002 if (is_gimple_min_invariant (val))
2003 for (i = 0; i < gimple_num_ops (stmt); i++)
2004 {
2005 tree op = gimple_op (stmt, i);
2006 /* Operands may be empty here. For example, the labels
2007 of a GIMPLE_COND are nulled out following the creation
2008 of the corresponding CFG edges. */
2009 if (op && TREE_CODE (op) == ADDR_EXPR)
2010 recompute_tree_invariant_for_addr_expr (op);
2011 }
2012
2013 if (fold_stmt (&gsi))
2014 stmt = gsi_stmt (gsi);
2015
2016 if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
2017 gimple_purge_dead_eh_edges (gimple_bb (stmt));
2018
2019 update_stmt (stmt);
2020 }
2021 }
2022
2023 gcc_checking_assert (has_zero_uses (name));
2024
2025 /* Also update the trees stored in loop structures. */
2026 if (current_loops)
2027 {
2028 struct loop *loop;
2029
2030 FOR_EACH_LOOP (loop, 0)
2031 {
2032 substitute_in_loop_info (loop, name, val);
2033 }
2034 }
2035 }
2036
2037 /* Merge block B into block A. */
2038
2039 static void
gimple_merge_blocks(basic_block a,basic_block b)2040 gimple_merge_blocks (basic_block a, basic_block b)
2041 {
2042 gimple_stmt_iterator last, gsi;
2043 gphi_iterator psi;
2044
2045 if (dump_file)
2046 fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index);
2047
2048 /* Remove all single-valued PHI nodes from block B of the form
2049 V_i = PHI <V_j> by propagating V_j to all the uses of V_i. */
2050 gsi = gsi_last_bb (a);
2051 for (psi = gsi_start_phis (b); !gsi_end_p (psi); )
2052 {
2053 gimple *phi = gsi_stmt (psi);
2054 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
2055 gimple *copy;
2056 bool may_replace_uses = (virtual_operand_p (def)
2057 || may_propagate_copy (def, use));
2058
2059 /* In case we maintain loop closed ssa form, do not propagate arguments
2060 of loop exit phi nodes. */
2061 if (current_loops
2062 && loops_state_satisfies_p (LOOP_CLOSED_SSA)
2063 && !virtual_operand_p (def)
2064 && TREE_CODE (use) == SSA_NAME
2065 && a->loop_father != b->loop_father)
2066 may_replace_uses = false;
2067
2068 if (!may_replace_uses)
2069 {
2070 gcc_assert (!virtual_operand_p (def));
2071
2072 /* Note that just emitting the copies is fine -- there is no problem
2073 with ordering of phi nodes. This is because A is the single
2074 predecessor of B, therefore results of the phi nodes cannot
2075 appear as arguments of the phi nodes. */
2076 copy = gimple_build_assign (def, use);
2077 gsi_insert_after (&gsi, copy, GSI_NEW_STMT);
2078 remove_phi_node (&psi, false);
2079 }
2080 else
2081 {
2082 /* If we deal with a PHI for virtual operands, we can simply
2083 propagate these without fussing with folding or updating
2084 the stmt. */
2085 if (virtual_operand_p (def))
2086 {
2087 imm_use_iterator iter;
2088 use_operand_p use_p;
2089 gimple *stmt;
2090
2091 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
2092 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2093 SET_USE (use_p, use);
2094
2095 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
2096 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use) = 1;
2097 }
2098 else
2099 replace_uses_by (def, use);
2100
2101 remove_phi_node (&psi, true);
2102 }
2103 }
2104
2105 /* Ensure that B follows A. */
2106 move_block_after (b, a);
2107
2108 gcc_assert (single_succ_edge (a)->flags & EDGE_FALLTHRU);
2109 gcc_assert (!last_stmt (a) || !stmt_ends_bb_p (last_stmt (a)));
2110
2111 /* Remove labels from B and set gimple_bb to A for other statements. */
2112 for (gsi = gsi_start_bb (b); !gsi_end_p (gsi);)
2113 {
2114 gimple *stmt = gsi_stmt (gsi);
2115 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2116 {
2117 tree label = gimple_label_label (label_stmt);
2118 int lp_nr;
2119
2120 gsi_remove (&gsi, false);
2121
2122 /* Now that we can thread computed gotos, we might have
2123 a situation where we have a forced label in block B
2124 However, the label at the start of block B might still be
2125 used in other ways (think about the runtime checking for
2126 Fortran assigned gotos). So we cannot just delete the
2127 label. Instead we move the label to the start of block A. */
2128 if (FORCED_LABEL (label))
2129 {
2130 gimple_stmt_iterator dest_gsi = gsi_start_bb (a);
2131 tree first_label = NULL_TREE;
2132 if (!gsi_end_p (dest_gsi))
2133 if (glabel *first_label_stmt
2134 = dyn_cast <glabel *> (gsi_stmt (dest_gsi)))
2135 first_label = gimple_label_label (first_label_stmt);
2136 if (first_label
2137 && (DECL_NONLOCAL (first_label)
2138 || EH_LANDING_PAD_NR (first_label) != 0))
2139 gsi_insert_after (&dest_gsi, stmt, GSI_NEW_STMT);
2140 else
2141 gsi_insert_before (&dest_gsi, stmt, GSI_NEW_STMT);
2142 }
2143 /* Other user labels keep around in a form of a debug stmt. */
2144 else if (!DECL_ARTIFICIAL (label) && MAY_HAVE_DEBUG_BIND_STMTS)
2145 {
2146 gimple *dbg = gimple_build_debug_bind (label,
2147 integer_zero_node,
2148 stmt);
2149 gimple_debug_bind_reset_value (dbg);
2150 gsi_insert_before (&gsi, dbg, GSI_SAME_STMT);
2151 }
2152
2153 lp_nr = EH_LANDING_PAD_NR (label);
2154 if (lp_nr)
2155 {
2156 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
2157 lp->post_landing_pad = NULL;
2158 }
2159 }
2160 else
2161 {
2162 gimple_set_bb (stmt, a);
2163 gsi_next (&gsi);
2164 }
2165 }
2166
2167 /* When merging two BBs, if their counts are different, the larger count
2168 is selected as the new bb count. This is to handle inconsistent
2169 profiles. */
2170 if (a->loop_father == b->loop_father)
2171 {
2172 a->count = a->count.merge (b->count);
2173 }
2174
2175 /* Merge the sequences. */
2176 last = gsi_last_bb (a);
2177 gsi_insert_seq_after (&last, bb_seq (b), GSI_NEW_STMT);
2178 set_bb_seq (b, NULL);
2179
2180 if (cfgcleanup_altered_bbs)
2181 bitmap_set_bit (cfgcleanup_altered_bbs, a->index);
2182 }
2183
2184
2185 /* Return the one of two successors of BB that is not reachable by a
2186 complex edge, if there is one. Else, return BB. We use
2187 this in optimizations that use post-dominators for their heuristics,
2188 to catch the cases in C++ where function calls are involved. */
2189
2190 basic_block
single_noncomplex_succ(basic_block bb)2191 single_noncomplex_succ (basic_block bb)
2192 {
2193 edge e0, e1;
2194 if (EDGE_COUNT (bb->succs) != 2)
2195 return bb;
2196
2197 e0 = EDGE_SUCC (bb, 0);
2198 e1 = EDGE_SUCC (bb, 1);
2199 if (e0->flags & EDGE_COMPLEX)
2200 return e1->dest;
2201 if (e1->flags & EDGE_COMPLEX)
2202 return e0->dest;
2203
2204 return bb;
2205 }
2206
2207 /* T is CALL_EXPR. Set current_function_calls_* flags. */
2208
2209 void
notice_special_calls(gcall * call)2210 notice_special_calls (gcall *call)
2211 {
2212 int flags = gimple_call_flags (call);
2213
2214 if (flags & ECF_MAY_BE_ALLOCA)
2215 cfun->calls_alloca = true;
2216 if (flags & ECF_RETURNS_TWICE)
2217 cfun->calls_setjmp = true;
2218 }
2219
2220
2221 /* Clear flags set by notice_special_calls. Used by dead code removal
2222 to update the flags. */
2223
2224 void
clear_special_calls(void)2225 clear_special_calls (void)
2226 {
2227 cfun->calls_alloca = false;
2228 cfun->calls_setjmp = false;
2229 }
2230
2231 /* Remove PHI nodes associated with basic block BB and all edges out of BB. */
2232
2233 static void
remove_phi_nodes_and_edges_for_unreachable_block(basic_block bb)2234 remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb)
2235 {
2236 /* Since this block is no longer reachable, we can just delete all
2237 of its PHI nodes. */
2238 remove_phi_nodes (bb);
2239
2240 /* Remove edges to BB's successors. */
2241 while (EDGE_COUNT (bb->succs) > 0)
2242 remove_edge (EDGE_SUCC (bb, 0));
2243 }
2244
2245
2246 /* Remove statements of basic block BB. */
2247
2248 static void
remove_bb(basic_block bb)2249 remove_bb (basic_block bb)
2250 {
2251 gimple_stmt_iterator i;
2252
2253 if (dump_file)
2254 {
2255 fprintf (dump_file, "Removing basic block %d\n", bb->index);
2256 if (dump_flags & TDF_DETAILS)
2257 {
2258 dump_bb (dump_file, bb, 0, TDF_BLOCKS);
2259 fprintf (dump_file, "\n");
2260 }
2261 }
2262
2263 if (current_loops)
2264 {
2265 struct loop *loop = bb->loop_father;
2266
2267 /* If a loop gets removed, clean up the information associated
2268 with it. */
2269 if (loop->latch == bb
2270 || loop->header == bb)
2271 free_numbers_of_iterations_estimates (loop);
2272 }
2273
2274 /* Remove all the instructions in the block. */
2275 if (bb_seq (bb) != NULL)
2276 {
2277 /* Walk backwards so as to get a chance to substitute all
2278 released DEFs into debug stmts. See
2279 eliminate_unnecessary_stmts() in tree-ssa-dce.c for more
2280 details. */
2281 for (i = gsi_last_bb (bb); !gsi_end_p (i);)
2282 {
2283 gimple *stmt = gsi_stmt (i);
2284 glabel *label_stmt = dyn_cast <glabel *> (stmt);
2285 if (label_stmt
2286 && (FORCED_LABEL (gimple_label_label (label_stmt))
2287 || DECL_NONLOCAL (gimple_label_label (label_stmt))))
2288 {
2289 basic_block new_bb;
2290 gimple_stmt_iterator new_gsi;
2291
2292 /* A non-reachable non-local label may still be referenced.
2293 But it no longer needs to carry the extra semantics of
2294 non-locality. */
2295 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
2296 {
2297 DECL_NONLOCAL (gimple_label_label (label_stmt)) = 0;
2298 FORCED_LABEL (gimple_label_label (label_stmt)) = 1;
2299 }
2300
2301 new_bb = bb->prev_bb;
2302 /* Don't move any labels into ENTRY block. */
2303 if (new_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2304 {
2305 new_bb = single_succ (new_bb);
2306 gcc_assert (new_bb != bb);
2307 }
2308 new_gsi = gsi_after_labels (new_bb);
2309 gsi_remove (&i, false);
2310 gsi_insert_before (&new_gsi, stmt, GSI_NEW_STMT);
2311 }
2312 else
2313 {
2314 /* Release SSA definitions. */
2315 release_defs (stmt);
2316 gsi_remove (&i, true);
2317 }
2318
2319 if (gsi_end_p (i))
2320 i = gsi_last_bb (bb);
2321 else
2322 gsi_prev (&i);
2323 }
2324 }
2325
2326 remove_phi_nodes_and_edges_for_unreachable_block (bb);
2327 bb->il.gimple.seq = NULL;
2328 bb->il.gimple.phi_nodes = NULL;
2329 }
2330
2331
2332 /* Given a basic block BB and a value VAL for use in the final statement
2333 of the block (if a GIMPLE_COND, GIMPLE_SWITCH, or computed goto), return
2334 the edge that will be taken out of the block.
2335 If VAL is NULL_TREE, then the current value of the final statement's
2336 predicate or index is used.
2337 If the value does not match a unique edge, NULL is returned. */
2338
2339 edge
find_taken_edge(basic_block bb,tree val)2340 find_taken_edge (basic_block bb, tree val)
2341 {
2342 gimple *stmt;
2343
2344 stmt = last_stmt (bb);
2345
2346 /* Handle ENTRY and EXIT. */
2347 if (!stmt)
2348 return NULL;
2349
2350 if (gimple_code (stmt) == GIMPLE_COND)
2351 return find_taken_edge_cond_expr (as_a <gcond *> (stmt), val);
2352
2353 if (gimple_code (stmt) == GIMPLE_SWITCH)
2354 return find_taken_edge_switch_expr (as_a <gswitch *> (stmt), val);
2355
2356 if (computed_goto_p (stmt))
2357 {
2358 /* Only optimize if the argument is a label, if the argument is
2359 not a label then we cannot construct a proper CFG.
2360
2361 It may be the case that we only need to allow the LABEL_REF to
2362 appear inside an ADDR_EXPR, but we also allow the LABEL_REF to
2363 appear inside a LABEL_EXPR just to be safe. */
2364 if (val
2365 && (TREE_CODE (val) == ADDR_EXPR || TREE_CODE (val) == LABEL_EXPR)
2366 && TREE_CODE (TREE_OPERAND (val, 0)) == LABEL_DECL)
2367 return find_taken_edge_computed_goto (bb, TREE_OPERAND (val, 0));
2368 }
2369
2370 /* Otherwise we only know the taken successor edge if it's unique. */
2371 return single_succ_p (bb) ? single_succ_edge (bb) : NULL;
2372 }
2373
2374 /* Given a constant value VAL and the entry block BB to a GOTO_EXPR
2375 statement, determine which of the outgoing edges will be taken out of the
2376 block. Return NULL if either edge may be taken. */
2377
2378 static edge
find_taken_edge_computed_goto(basic_block bb,tree val)2379 find_taken_edge_computed_goto (basic_block bb, tree val)
2380 {
2381 basic_block dest;
2382 edge e = NULL;
2383
2384 dest = label_to_block (cfun, val);
2385 if (dest)
2386 e = find_edge (bb, dest);
2387
2388 /* It's possible for find_edge to return NULL here on invalid code
2389 that abuses the labels-as-values extension (e.g. code that attempts to
2390 jump *between* functions via stored labels-as-values; PR 84136).
2391 If so, then we simply return that NULL for the edge.
2392 We don't currently have a way of detecting such invalid code, so we
2393 can't assert that it was the case when a NULL edge occurs here. */
2394
2395 return e;
2396 }
2397
2398 /* Given COND_STMT and a constant value VAL for use as the predicate,
2399 determine which of the two edges will be taken out of
2400 the statement's block. Return NULL if either edge may be taken.
2401 If VAL is NULL_TREE, then the current value of COND_STMT's predicate
2402 is used. */
2403
2404 static edge
find_taken_edge_cond_expr(const gcond * cond_stmt,tree val)2405 find_taken_edge_cond_expr (const gcond *cond_stmt, tree val)
2406 {
2407 edge true_edge, false_edge;
2408
2409 if (val == NULL_TREE)
2410 {
2411 /* Use the current value of the predicate. */
2412 if (gimple_cond_true_p (cond_stmt))
2413 val = integer_one_node;
2414 else if (gimple_cond_false_p (cond_stmt))
2415 val = integer_zero_node;
2416 else
2417 return NULL;
2418 }
2419 else if (TREE_CODE (val) != INTEGER_CST)
2420 return NULL;
2421
2422 extract_true_false_edges_from_block (gimple_bb (cond_stmt),
2423 &true_edge, &false_edge);
2424
2425 return (integer_zerop (val) ? false_edge : true_edge);
2426 }
2427
2428 /* Given SWITCH_STMT and an INTEGER_CST VAL for use as the index, determine
2429 which edge will be taken out of the statement's block. Return NULL if any
2430 edge may be taken.
2431 If VAL is NULL_TREE, then the current value of SWITCH_STMT's index
2432 is used. */
2433
2434 edge
find_taken_edge_switch_expr(const gswitch * switch_stmt,tree val)2435 find_taken_edge_switch_expr (const gswitch *switch_stmt, tree val)
2436 {
2437 basic_block dest_bb;
2438 edge e;
2439 tree taken_case;
2440
2441 if (gimple_switch_num_labels (switch_stmt) == 1)
2442 taken_case = gimple_switch_default_label (switch_stmt);
2443 else
2444 {
2445 if (val == NULL_TREE)
2446 val = gimple_switch_index (switch_stmt);
2447 if (TREE_CODE (val) != INTEGER_CST)
2448 return NULL;
2449 else
2450 taken_case = find_case_label_for_value (switch_stmt, val);
2451 }
2452 dest_bb = label_to_block (cfun, CASE_LABEL (taken_case));
2453
2454 e = find_edge (gimple_bb (switch_stmt), dest_bb);
2455 gcc_assert (e);
2456 return e;
2457 }
2458
2459
2460 /* Return the CASE_LABEL_EXPR that SWITCH_STMT will take for VAL.
2461 We can make optimal use here of the fact that the case labels are
2462 sorted: We can do a binary search for a case matching VAL. */
2463
2464 tree
find_case_label_for_value(const gswitch * switch_stmt,tree val)2465 find_case_label_for_value (const gswitch *switch_stmt, tree val)
2466 {
2467 size_t low, high, n = gimple_switch_num_labels (switch_stmt);
2468 tree default_case = gimple_switch_default_label (switch_stmt);
2469
2470 for (low = 0, high = n; high - low > 1; )
2471 {
2472 size_t i = (high + low) / 2;
2473 tree t = gimple_switch_label (switch_stmt, i);
2474 int cmp;
2475
2476 /* Cache the result of comparing CASE_LOW and val. */
2477 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2478
2479 if (cmp > 0)
2480 high = i;
2481 else
2482 low = i;
2483
2484 if (CASE_HIGH (t) == NULL)
2485 {
2486 /* A singe-valued case label. */
2487 if (cmp == 0)
2488 return t;
2489 }
2490 else
2491 {
2492 /* A case range. We can only handle integer ranges. */
2493 if (cmp <= 0 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2494 return t;
2495 }
2496 }
2497
2498 return default_case;
2499 }
2500
2501
2502 /* Dump a basic block on stderr. */
2503
2504 void
gimple_debug_bb(basic_block bb)2505 gimple_debug_bb (basic_block bb)
2506 {
2507 dump_bb (stderr, bb, 0, TDF_VOPS|TDF_MEMSYMS|TDF_BLOCKS);
2508 }
2509
2510
2511 /* Dump basic block with index N on stderr. */
2512
2513 basic_block
gimple_debug_bb_n(int n)2514 gimple_debug_bb_n (int n)
2515 {
2516 gimple_debug_bb (BASIC_BLOCK_FOR_FN (cfun, n));
2517 return BASIC_BLOCK_FOR_FN (cfun, n);
2518 }
2519
2520
2521 /* Dump the CFG on stderr.
2522
2523 FLAGS are the same used by the tree dumping functions
2524 (see TDF_* in dumpfile.h). */
2525
2526 void
gimple_debug_cfg(dump_flags_t flags)2527 gimple_debug_cfg (dump_flags_t flags)
2528 {
2529 gimple_dump_cfg (stderr, flags);
2530 }
2531
2532
2533 /* Dump the program showing basic block boundaries on the given FILE.
2534
2535 FLAGS are the same used by the tree dumping functions (see TDF_* in
2536 tree.h). */
2537
2538 void
gimple_dump_cfg(FILE * file,dump_flags_t flags)2539 gimple_dump_cfg (FILE *file, dump_flags_t flags)
2540 {
2541 if (flags & TDF_DETAILS)
2542 {
2543 dump_function_header (file, current_function_decl, flags);
2544 fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
2545 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
2546 last_basic_block_for_fn (cfun));
2547
2548 brief_dump_cfg (file, flags);
2549 fprintf (file, "\n");
2550 }
2551
2552 if (flags & TDF_STATS)
2553 dump_cfg_stats (file);
2554
2555 dump_function_to_file (current_function_decl, file, flags | TDF_BLOCKS);
2556 }
2557
2558
2559 /* Dump CFG statistics on FILE. */
2560
2561 void
dump_cfg_stats(FILE * file)2562 dump_cfg_stats (FILE *file)
2563 {
2564 static long max_num_merged_labels = 0;
2565 unsigned long size, total = 0;
2566 long num_edges;
2567 basic_block bb;
2568 const char * const fmt_str = "%-30s%-13s%12s\n";
2569 const char * const fmt_str_1 = "%-30s%13d" PRsa (11) "\n";
2570 const char * const fmt_str_2 = "%-30s%13ld" PRsa (11) "\n";
2571 const char * const fmt_str_3 = "%-43s" PRsa (11) "\n";
2572 const char *funcname = current_function_name ();
2573
2574 fprintf (file, "\nCFG Statistics for %s\n\n", funcname);
2575
2576 fprintf (file, "---------------------------------------------------------\n");
2577 fprintf (file, fmt_str, "", " Number of ", "Memory");
2578 fprintf (file, fmt_str, "", " instances ", "used ");
2579 fprintf (file, "---------------------------------------------------------\n");
2580
2581 size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
2582 total += size;
2583 fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
2584 SIZE_AMOUNT (size));
2585
2586 num_edges = 0;
2587 FOR_EACH_BB_FN (bb, cfun)
2588 num_edges += EDGE_COUNT (bb->succs);
2589 size = num_edges * sizeof (struct edge_def);
2590 total += size;
2591 fprintf (file, fmt_str_2, "Edges", num_edges, SIZE_AMOUNT (size));
2592
2593 fprintf (file, "---------------------------------------------------------\n");
2594 fprintf (file, fmt_str_3, "Total memory used by CFG data",
2595 SIZE_AMOUNT (total));
2596 fprintf (file, "---------------------------------------------------------\n");
2597 fprintf (file, "\n");
2598
2599 if (cfg_stats.num_merged_labels > max_num_merged_labels)
2600 max_num_merged_labels = cfg_stats.num_merged_labels;
2601
2602 fprintf (file, "Coalesced label blocks: %ld (Max so far: %ld)\n",
2603 cfg_stats.num_merged_labels, max_num_merged_labels);
2604
2605 fprintf (file, "\n");
2606 }
2607
2608
2609 /* Dump CFG statistics on stderr. Keep extern so that it's always
2610 linked in the final executable. */
2611
2612 DEBUG_FUNCTION void
debug_cfg_stats(void)2613 debug_cfg_stats (void)
2614 {
2615 dump_cfg_stats (stderr);
2616 }
2617
2618 /*---------------------------------------------------------------------------
2619 Miscellaneous helpers
2620 ---------------------------------------------------------------------------*/
2621
2622 /* Return true if T, a GIMPLE_CALL, can make an abnormal transfer of control
2623 flow. Transfers of control flow associated with EH are excluded. */
2624
2625 static bool
call_can_make_abnormal_goto(gimple * t)2626 call_can_make_abnormal_goto (gimple *t)
2627 {
2628 /* If the function has no non-local labels, then a call cannot make an
2629 abnormal transfer of control. */
2630 if (!cfun->has_nonlocal_label
2631 && !cfun->calls_setjmp)
2632 return false;
2633
2634 /* Likewise if the call has no side effects. */
2635 if (!gimple_has_side_effects (t))
2636 return false;
2637
2638 /* Likewise if the called function is leaf. */
2639 if (gimple_call_flags (t) & ECF_LEAF)
2640 return false;
2641
2642 return true;
2643 }
2644
2645
2646 /* Return true if T can make an abnormal transfer of control flow.
2647 Transfers of control flow associated with EH are excluded. */
2648
2649 bool
stmt_can_make_abnormal_goto(gimple * t)2650 stmt_can_make_abnormal_goto (gimple *t)
2651 {
2652 if (computed_goto_p (t))
2653 return true;
2654 if (is_gimple_call (t))
2655 return call_can_make_abnormal_goto (t);
2656 return false;
2657 }
2658
2659
2660 /* Return true if T represents a stmt that always transfers control. */
2661
2662 bool
is_ctrl_stmt(gimple * t)2663 is_ctrl_stmt (gimple *t)
2664 {
2665 switch (gimple_code (t))
2666 {
2667 case GIMPLE_COND:
2668 case GIMPLE_SWITCH:
2669 case GIMPLE_GOTO:
2670 case GIMPLE_RETURN:
2671 case GIMPLE_RESX:
2672 return true;
2673 default:
2674 return false;
2675 }
2676 }
2677
2678
2679 /* Return true if T is a statement that may alter the flow of control
2680 (e.g., a call to a non-returning function). */
2681
2682 bool
is_ctrl_altering_stmt(gimple * t)2683 is_ctrl_altering_stmt (gimple *t)
2684 {
2685 gcc_assert (t);
2686
2687 switch (gimple_code (t))
2688 {
2689 case GIMPLE_CALL:
2690 /* Per stmt call flag indicates whether the call could alter
2691 controlflow. */
2692 if (gimple_call_ctrl_altering_p (t))
2693 return true;
2694 break;
2695
2696 case GIMPLE_EH_DISPATCH:
2697 /* EH_DISPATCH branches to the individual catch handlers at
2698 this level of a try or allowed-exceptions region. It can
2699 fallthru to the next statement as well. */
2700 return true;
2701
2702 case GIMPLE_ASM:
2703 if (gimple_asm_nlabels (as_a <gasm *> (t)) > 0)
2704 return true;
2705 break;
2706
2707 CASE_GIMPLE_OMP:
2708 /* OpenMP directives alter control flow. */
2709 return true;
2710
2711 case GIMPLE_TRANSACTION:
2712 /* A transaction start alters control flow. */
2713 return true;
2714
2715 default:
2716 break;
2717 }
2718
2719 /* If a statement can throw, it alters control flow. */
2720 return stmt_can_throw_internal (cfun, t);
2721 }
2722
2723
2724 /* Return true if T is a simple local goto. */
2725
2726 bool
simple_goto_p(gimple * t)2727 simple_goto_p (gimple *t)
2728 {
2729 return (gimple_code (t) == GIMPLE_GOTO
2730 && TREE_CODE (gimple_goto_dest (t)) == LABEL_DECL);
2731 }
2732
2733
2734 /* Return true if STMT should start a new basic block. PREV_STMT is
2735 the statement preceding STMT. It is used when STMT is a label or a
2736 case label. Labels should only start a new basic block if their
2737 previous statement wasn't a label. Otherwise, sequence of labels
2738 would generate unnecessary basic blocks that only contain a single
2739 label. */
2740
2741 static inline bool
stmt_starts_bb_p(gimple * stmt,gimple * prev_stmt)2742 stmt_starts_bb_p (gimple *stmt, gimple *prev_stmt)
2743 {
2744 if (stmt == NULL)
2745 return false;
2746
2747 /* PREV_STMT is only set to a debug stmt if the debug stmt is before
2748 any nondebug stmts in the block. We don't want to start another
2749 block in this case: the debug stmt will already have started the
2750 one STMT would start if we weren't outputting debug stmts. */
2751 if (prev_stmt && is_gimple_debug (prev_stmt))
2752 return false;
2753
2754 /* Labels start a new basic block only if the preceding statement
2755 wasn't a label of the same type. This prevents the creation of
2756 consecutive blocks that have nothing but a single label. */
2757 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2758 {
2759 /* Nonlocal and computed GOTO targets always start a new block. */
2760 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
2761 || FORCED_LABEL (gimple_label_label (label_stmt)))
2762 return true;
2763
2764 if (prev_stmt && gimple_code (prev_stmt) == GIMPLE_LABEL)
2765 {
2766 if (DECL_NONLOCAL (gimple_label_label (
2767 as_a <glabel *> (prev_stmt))))
2768 return true;
2769
2770 cfg_stats.num_merged_labels++;
2771 return false;
2772 }
2773 else
2774 return true;
2775 }
2776 else if (gimple_code (stmt) == GIMPLE_CALL)
2777 {
2778 if (gimple_call_flags (stmt) & ECF_RETURNS_TWICE)
2779 /* setjmp acts similar to a nonlocal GOTO target and thus should
2780 start a new block. */
2781 return true;
2782 if (gimple_call_internal_p (stmt, IFN_PHI)
2783 && prev_stmt
2784 && gimple_code (prev_stmt) != GIMPLE_LABEL
2785 && (gimple_code (prev_stmt) != GIMPLE_CALL
2786 || ! gimple_call_internal_p (prev_stmt, IFN_PHI)))
2787 /* PHI nodes start a new block unless preceeded by a label
2788 or another PHI. */
2789 return true;
2790 }
2791
2792 return false;
2793 }
2794
2795
2796 /* Return true if T should end a basic block. */
2797
2798 bool
stmt_ends_bb_p(gimple * t)2799 stmt_ends_bb_p (gimple *t)
2800 {
2801 return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t);
2802 }
2803
2804 /* Remove block annotations and other data structures. */
2805
2806 void
delete_tree_cfg_annotations(struct function * fn)2807 delete_tree_cfg_annotations (struct function *fn)
2808 {
2809 vec_free (label_to_block_map_for_fn (fn));
2810 }
2811
2812 /* Return the virtual phi in BB. */
2813
2814 gphi *
get_virtual_phi(basic_block bb)2815 get_virtual_phi (basic_block bb)
2816 {
2817 for (gphi_iterator gsi = gsi_start_phis (bb);
2818 !gsi_end_p (gsi);
2819 gsi_next (&gsi))
2820 {
2821 gphi *phi = gsi.phi ();
2822
2823 if (virtual_operand_p (PHI_RESULT (phi)))
2824 return phi;
2825 }
2826
2827 return NULL;
2828 }
2829
2830 /* Return the first statement in basic block BB. */
2831
2832 gimple *
first_stmt(basic_block bb)2833 first_stmt (basic_block bb)
2834 {
2835 gimple_stmt_iterator i = gsi_start_bb (bb);
2836 gimple *stmt = NULL;
2837
2838 while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i))))
2839 {
2840 gsi_next (&i);
2841 stmt = NULL;
2842 }
2843 return stmt;
2844 }
2845
2846 /* Return the first non-label statement in basic block BB. */
2847
2848 static gimple *
first_non_label_stmt(basic_block bb)2849 first_non_label_stmt (basic_block bb)
2850 {
2851 gimple_stmt_iterator i = gsi_start_bb (bb);
2852 while (!gsi_end_p (i) && gimple_code (gsi_stmt (i)) == GIMPLE_LABEL)
2853 gsi_next (&i);
2854 return !gsi_end_p (i) ? gsi_stmt (i) : NULL;
2855 }
2856
2857 /* Return the last statement in basic block BB. */
2858
2859 gimple *
last_stmt(basic_block bb)2860 last_stmt (basic_block bb)
2861 {
2862 gimple_stmt_iterator i = gsi_last_bb (bb);
2863 gimple *stmt = NULL;
2864
2865 while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i))))
2866 {
2867 gsi_prev (&i);
2868 stmt = NULL;
2869 }
2870 return stmt;
2871 }
2872
2873 /* Return the last statement of an otherwise empty block. Return NULL
2874 if the block is totally empty, or if it contains more than one
2875 statement. */
2876
2877 gimple *
last_and_only_stmt(basic_block bb)2878 last_and_only_stmt (basic_block bb)
2879 {
2880 gimple_stmt_iterator i = gsi_last_nondebug_bb (bb);
2881 gimple *last, *prev;
2882
2883 if (gsi_end_p (i))
2884 return NULL;
2885
2886 last = gsi_stmt (i);
2887 gsi_prev_nondebug (&i);
2888 if (gsi_end_p (i))
2889 return last;
2890
2891 /* Empty statements should no longer appear in the instruction stream.
2892 Everything that might have appeared before should be deleted by
2893 remove_useless_stmts, and the optimizers should just gsi_remove
2894 instead of smashing with build_empty_stmt.
2895
2896 Thus the only thing that should appear here in a block containing
2897 one executable statement is a label. */
2898 prev = gsi_stmt (i);
2899 if (gimple_code (prev) == GIMPLE_LABEL)
2900 return last;
2901 else
2902 return NULL;
2903 }
2904
2905 /* Reinstall those PHI arguments queued in OLD_EDGE to NEW_EDGE. */
2906
2907 static void
reinstall_phi_args(edge new_edge,edge old_edge)2908 reinstall_phi_args (edge new_edge, edge old_edge)
2909 {
2910 edge_var_map *vm;
2911 int i;
2912 gphi_iterator phis;
2913
2914 vec<edge_var_map> *v = redirect_edge_var_map_vector (old_edge);
2915 if (!v)
2916 return;
2917
2918 for (i = 0, phis = gsi_start_phis (new_edge->dest);
2919 v->iterate (i, &vm) && !gsi_end_p (phis);
2920 i++, gsi_next (&phis))
2921 {
2922 gphi *phi = phis.phi ();
2923 tree result = redirect_edge_var_map_result (vm);
2924 tree arg = redirect_edge_var_map_def (vm);
2925
2926 gcc_assert (result == gimple_phi_result (phi));
2927
2928 add_phi_arg (phi, arg, new_edge, redirect_edge_var_map_location (vm));
2929 }
2930
2931 redirect_edge_var_map_clear (old_edge);
2932 }
2933
2934 /* Returns the basic block after which the new basic block created
2935 by splitting edge EDGE_IN should be placed. Tries to keep the new block
2936 near its "logical" location. This is of most help to humans looking
2937 at debugging dumps. */
2938
2939 basic_block
split_edge_bb_loc(edge edge_in)2940 split_edge_bb_loc (edge edge_in)
2941 {
2942 basic_block dest = edge_in->dest;
2943 basic_block dest_prev = dest->prev_bb;
2944
2945 if (dest_prev)
2946 {
2947 edge e = find_edge (dest_prev, dest);
2948 if (e && !(e->flags & EDGE_COMPLEX))
2949 return edge_in->src;
2950 }
2951 return dest_prev;
2952 }
2953
2954 /* Split a (typically critical) edge EDGE_IN. Return the new block.
2955 Abort on abnormal edges. */
2956
2957 static basic_block
gimple_split_edge(edge edge_in)2958 gimple_split_edge (edge edge_in)
2959 {
2960 basic_block new_bb, after_bb, dest;
2961 edge new_edge, e;
2962
2963 /* Abnormal edges cannot be split. */
2964 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
2965
2966 dest = edge_in->dest;
2967
2968 after_bb = split_edge_bb_loc (edge_in);
2969
2970 new_bb = create_empty_bb (after_bb);
2971 new_bb->count = edge_in->count ();
2972
2973 e = redirect_edge_and_branch (edge_in, new_bb);
2974 gcc_assert (e == edge_in);
2975
2976 new_edge = make_single_succ_edge (new_bb, dest, EDGE_FALLTHRU);
2977 reinstall_phi_args (new_edge, e);
2978
2979 return new_bb;
2980 }
2981
2982
2983 /* Verify properties of the address expression T whose base should be
2984 TREE_ADDRESSABLE if VERIFY_ADDRESSABLE is true. */
2985
2986 static bool
verify_address(tree t,bool verify_addressable)2987 verify_address (tree t, bool verify_addressable)
2988 {
2989 bool old_constant;
2990 bool old_side_effects;
2991 bool new_constant;
2992 bool new_side_effects;
2993
2994 old_constant = TREE_CONSTANT (t);
2995 old_side_effects = TREE_SIDE_EFFECTS (t);
2996
2997 recompute_tree_invariant_for_addr_expr (t);
2998 new_side_effects = TREE_SIDE_EFFECTS (t);
2999 new_constant = TREE_CONSTANT (t);
3000
3001 if (old_constant != new_constant)
3002 {
3003 error ("constant not recomputed when ADDR_EXPR changed");
3004 return true;
3005 }
3006 if (old_side_effects != new_side_effects)
3007 {
3008 error ("side effects not recomputed when ADDR_EXPR changed");
3009 return true;
3010 }
3011
3012 tree base = TREE_OPERAND (t, 0);
3013 while (handled_component_p (base))
3014 base = TREE_OPERAND (base, 0);
3015
3016 if (!(VAR_P (base)
3017 || TREE_CODE (base) == PARM_DECL
3018 || TREE_CODE (base) == RESULT_DECL))
3019 return false;
3020
3021 if (DECL_GIMPLE_REG_P (base))
3022 {
3023 error ("DECL_GIMPLE_REG_P set on a variable with address taken");
3024 return true;
3025 }
3026
3027 if (verify_addressable && !TREE_ADDRESSABLE (base))
3028 {
3029 error ("address taken, but ADDRESSABLE bit not set");
3030 return true;
3031 }
3032
3033 return false;
3034 }
3035
3036
3037 /* Verify if EXPR is either a GIMPLE ID or a GIMPLE indirect reference.
3038 Returns true if there is an error, otherwise false. */
3039
3040 static bool
verify_types_in_gimple_min_lval(tree expr)3041 verify_types_in_gimple_min_lval (tree expr)
3042 {
3043 tree op;
3044
3045 if (is_gimple_id (expr))
3046 return false;
3047
3048 if (TREE_CODE (expr) != TARGET_MEM_REF
3049 && TREE_CODE (expr) != MEM_REF)
3050 {
3051 error ("invalid expression for min lvalue");
3052 return true;
3053 }
3054
3055 /* TARGET_MEM_REFs are strange beasts. */
3056 if (TREE_CODE (expr) == TARGET_MEM_REF)
3057 return false;
3058
3059 op = TREE_OPERAND (expr, 0);
3060 if (!is_gimple_val (op))
3061 {
3062 error ("invalid operand in indirect reference");
3063 debug_generic_stmt (op);
3064 return true;
3065 }
3066 /* Memory references now generally can involve a value conversion. */
3067
3068 return false;
3069 }
3070
3071 /* Verify if EXPR is a valid GIMPLE reference expression. If
3072 REQUIRE_LVALUE is true verifies it is an lvalue. Returns true
3073 if there is an error, otherwise false. */
3074
3075 static bool
verify_types_in_gimple_reference(tree expr,bool require_lvalue)3076 verify_types_in_gimple_reference (tree expr, bool require_lvalue)
3077 {
3078 if (TREE_CODE (expr) == REALPART_EXPR
3079 || TREE_CODE (expr) == IMAGPART_EXPR
3080 || TREE_CODE (expr) == BIT_FIELD_REF)
3081 {
3082 tree op = TREE_OPERAND (expr, 0);
3083 if (!is_gimple_reg_type (TREE_TYPE (expr)))
3084 {
3085 error ("non-scalar BIT_FIELD_REF, IMAGPART_EXPR or REALPART_EXPR");
3086 return true;
3087 }
3088
3089 if (TREE_CODE (expr) == BIT_FIELD_REF)
3090 {
3091 tree t1 = TREE_OPERAND (expr, 1);
3092 tree t2 = TREE_OPERAND (expr, 2);
3093 poly_uint64 size, bitpos;
3094 if (!poly_int_tree_p (t1, &size)
3095 || !poly_int_tree_p (t2, &bitpos)
3096 || !types_compatible_p (bitsizetype, TREE_TYPE (t1))
3097 || !types_compatible_p (bitsizetype, TREE_TYPE (t2)))
3098 {
3099 error ("invalid position or size operand to BIT_FIELD_REF");
3100 return true;
3101 }
3102 if (INTEGRAL_TYPE_P (TREE_TYPE (expr))
3103 && maybe_ne (TYPE_PRECISION (TREE_TYPE (expr)), size))
3104 {
3105 error ("integral result type precision does not match "
3106 "field size of BIT_FIELD_REF");
3107 return true;
3108 }
3109 else if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
3110 && TYPE_MODE (TREE_TYPE (expr)) != BLKmode
3111 && maybe_ne (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr))),
3112 size))
3113 {
3114 error ("mode size of non-integral result does not "
3115 "match field size of BIT_FIELD_REF");
3116 return true;
3117 }
3118 if (INTEGRAL_TYPE_P (TREE_TYPE (op))
3119 && !type_has_mode_precision_p (TREE_TYPE (op)))
3120 {
3121 error ("BIT_FIELD_REF of non-mode-precision operand");
3122 return true;
3123 }
3124 if (!AGGREGATE_TYPE_P (TREE_TYPE (op))
3125 && maybe_gt (size + bitpos,
3126 tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (op)))))
3127 {
3128 error ("position plus size exceeds size of referenced object in "
3129 "BIT_FIELD_REF");
3130 return true;
3131 }
3132 }
3133
3134 if ((TREE_CODE (expr) == REALPART_EXPR
3135 || TREE_CODE (expr) == IMAGPART_EXPR)
3136 && !useless_type_conversion_p (TREE_TYPE (expr),
3137 TREE_TYPE (TREE_TYPE (op))))
3138 {
3139 error ("type mismatch in real/imagpart reference");
3140 debug_generic_stmt (TREE_TYPE (expr));
3141 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3142 return true;
3143 }
3144 expr = op;
3145 }
3146
3147 while (handled_component_p (expr))
3148 {
3149 if (TREE_CODE (expr) == REALPART_EXPR
3150 || TREE_CODE (expr) == IMAGPART_EXPR
3151 || TREE_CODE (expr) == BIT_FIELD_REF)
3152 {
3153 error ("non-top-level BIT_FIELD_REF, IMAGPART_EXPR or REALPART_EXPR");
3154 return true;
3155 }
3156
3157 tree op = TREE_OPERAND (expr, 0);
3158
3159 if (TREE_CODE (expr) == ARRAY_REF
3160 || TREE_CODE (expr) == ARRAY_RANGE_REF)
3161 {
3162 if (!is_gimple_val (TREE_OPERAND (expr, 1))
3163 || (TREE_OPERAND (expr, 2)
3164 && !is_gimple_val (TREE_OPERAND (expr, 2)))
3165 || (TREE_OPERAND (expr, 3)
3166 && !is_gimple_val (TREE_OPERAND (expr, 3))))
3167 {
3168 error ("invalid operands to array reference");
3169 debug_generic_stmt (expr);
3170 return true;
3171 }
3172 }
3173
3174 /* Verify if the reference array element types are compatible. */
3175 if (TREE_CODE (expr) == ARRAY_REF
3176 && !useless_type_conversion_p (TREE_TYPE (expr),
3177 TREE_TYPE (TREE_TYPE (op))))
3178 {
3179 error ("type mismatch in array reference");
3180 debug_generic_stmt (TREE_TYPE (expr));
3181 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3182 return true;
3183 }
3184 if (TREE_CODE (expr) == ARRAY_RANGE_REF
3185 && !useless_type_conversion_p (TREE_TYPE (TREE_TYPE (expr)),
3186 TREE_TYPE (TREE_TYPE (op))))
3187 {
3188 error ("type mismatch in array range reference");
3189 debug_generic_stmt (TREE_TYPE (TREE_TYPE (expr)));
3190 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3191 return true;
3192 }
3193
3194 if (TREE_CODE (expr) == COMPONENT_REF)
3195 {
3196 if (TREE_OPERAND (expr, 2)
3197 && !is_gimple_val (TREE_OPERAND (expr, 2)))
3198 {
3199 error ("invalid COMPONENT_REF offset operator");
3200 return true;
3201 }
3202 if (!useless_type_conversion_p (TREE_TYPE (expr),
3203 TREE_TYPE (TREE_OPERAND (expr, 1))))
3204 {
3205 error ("type mismatch in component reference");
3206 debug_generic_stmt (TREE_TYPE (expr));
3207 debug_generic_stmt (TREE_TYPE (TREE_OPERAND (expr, 1)));
3208 return true;
3209 }
3210 }
3211
3212 if (TREE_CODE (expr) == VIEW_CONVERT_EXPR)
3213 {
3214 /* For VIEW_CONVERT_EXPRs which are allowed here too, we only check
3215 that their operand is not an SSA name or an invariant when
3216 requiring an lvalue (this usually means there is a SRA or IPA-SRA
3217 bug). Otherwise there is nothing to verify, gross mismatches at
3218 most invoke undefined behavior. */
3219 if (require_lvalue
3220 && (TREE_CODE (op) == SSA_NAME
3221 || is_gimple_min_invariant (op)))
3222 {
3223 error ("conversion of an SSA_NAME on the left hand side");
3224 debug_generic_stmt (expr);
3225 return true;
3226 }
3227 else if (TREE_CODE (op) == SSA_NAME
3228 && TYPE_SIZE (TREE_TYPE (expr)) != TYPE_SIZE (TREE_TYPE (op)))
3229 {
3230 error ("conversion of register to a different size");
3231 debug_generic_stmt (expr);
3232 return true;
3233 }
3234 else if (!handled_component_p (op))
3235 return false;
3236 }
3237
3238 expr = op;
3239 }
3240
3241 if (TREE_CODE (expr) == MEM_REF)
3242 {
3243 if (!is_gimple_mem_ref_addr (TREE_OPERAND (expr, 0))
3244 || (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
3245 && verify_address (TREE_OPERAND (expr, 0), false)))
3246 {
3247 error ("invalid address operand in MEM_REF");
3248 debug_generic_stmt (expr);
3249 return true;
3250 }
3251 if (!poly_int_tree_p (TREE_OPERAND (expr, 1))
3252 || !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1))))
3253 {
3254 error ("invalid offset operand in MEM_REF");
3255 debug_generic_stmt (expr);
3256 return true;
3257 }
3258 if (MR_DEPENDENCE_CLIQUE (expr) != 0
3259 && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique)
3260 {
3261 error ("invalid clique in MEM_REF");
3262 debug_generic_stmt (expr);
3263 return true;
3264 }
3265 }
3266 else if (TREE_CODE (expr) == TARGET_MEM_REF)
3267 {
3268 if (!TMR_BASE (expr)
3269 || !is_gimple_mem_ref_addr (TMR_BASE (expr))
3270 || (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
3271 && verify_address (TMR_BASE (expr), false)))
3272 {
3273 error ("invalid address operand in TARGET_MEM_REF");
3274 return true;
3275 }
3276 if (!TMR_OFFSET (expr)
3277 || !poly_int_tree_p (TMR_OFFSET (expr))
3278 || !POINTER_TYPE_P (TREE_TYPE (TMR_OFFSET (expr))))
3279 {
3280 error ("invalid offset operand in TARGET_MEM_REF");
3281 debug_generic_stmt (expr);
3282 return true;
3283 }
3284 if (MR_DEPENDENCE_CLIQUE (expr) != 0
3285 && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique)
3286 {
3287 error ("invalid clique in TARGET_MEM_REF");
3288 debug_generic_stmt (expr);
3289 return true;
3290 }
3291 }
3292 else if (TREE_CODE (expr) == INDIRECT_REF)
3293 {
3294 error ("INDIRECT_REF in gimple IL");
3295 debug_generic_stmt (expr);
3296 return true;
3297 }
3298
3299 return ((require_lvalue || !is_gimple_min_invariant (expr))
3300 && verify_types_in_gimple_min_lval (expr));
3301 }
3302
3303 /* Returns true if there is one pointer type in TYPE_POINTER_TO (SRC_OBJ)
3304 list of pointer-to types that is trivially convertible to DEST. */
3305
3306 static bool
one_pointer_to_useless_type_conversion_p(tree dest,tree src_obj)3307 one_pointer_to_useless_type_conversion_p (tree dest, tree src_obj)
3308 {
3309 tree src;
3310
3311 if (!TYPE_POINTER_TO (src_obj))
3312 return true;
3313
3314 for (src = TYPE_POINTER_TO (src_obj); src; src = TYPE_NEXT_PTR_TO (src))
3315 if (useless_type_conversion_p (dest, src))
3316 return true;
3317
3318 return false;
3319 }
3320
3321 /* Return true if TYPE1 is a fixed-point type and if conversions to and
3322 from TYPE2 can be handled by FIXED_CONVERT_EXPR. */
3323
3324 static bool
valid_fixed_convert_types_p(tree type1,tree type2)3325 valid_fixed_convert_types_p (tree type1, tree type2)
3326 {
3327 return (FIXED_POINT_TYPE_P (type1)
3328 && (INTEGRAL_TYPE_P (type2)
3329 || SCALAR_FLOAT_TYPE_P (type2)
3330 || FIXED_POINT_TYPE_P (type2)));
3331 }
3332
3333 /* Verify the contents of a GIMPLE_CALL STMT. Returns true when there
3334 is a problem, otherwise false. */
3335
3336 static bool
verify_gimple_call(gcall * stmt)3337 verify_gimple_call (gcall *stmt)
3338 {
3339 tree fn = gimple_call_fn (stmt);
3340 tree fntype, fndecl;
3341 unsigned i;
3342
3343 if (gimple_call_internal_p (stmt))
3344 {
3345 if (fn)
3346 {
3347 error ("gimple call has two targets");
3348 debug_generic_stmt (fn);
3349 return true;
3350 }
3351 }
3352 else
3353 {
3354 if (!fn)
3355 {
3356 error ("gimple call has no target");
3357 return true;
3358 }
3359 }
3360
3361 if (fn && !is_gimple_call_addr (fn))
3362 {
3363 error ("invalid function in gimple call");
3364 debug_generic_stmt (fn);
3365 return true;
3366 }
3367
3368 if (fn
3369 && (!POINTER_TYPE_P (TREE_TYPE (fn))
3370 || (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
3371 && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != METHOD_TYPE)))
3372 {
3373 error ("non-function in gimple call");
3374 return true;
3375 }
3376
3377 fndecl = gimple_call_fndecl (stmt);
3378 if (fndecl
3379 && TREE_CODE (fndecl) == FUNCTION_DECL
3380 && DECL_LOOPING_CONST_OR_PURE_P (fndecl)
3381 && !DECL_PURE_P (fndecl)
3382 && !TREE_READONLY (fndecl))
3383 {
3384 error ("invalid pure const state for function");
3385 return true;
3386 }
3387
3388 tree lhs = gimple_call_lhs (stmt);
3389 if (lhs
3390 && (!is_gimple_lvalue (lhs)
3391 || verify_types_in_gimple_reference (lhs, true)))
3392 {
3393 error ("invalid LHS in gimple call");
3394 return true;
3395 }
3396
3397 if (gimple_call_ctrl_altering_p (stmt)
3398 && gimple_call_noreturn_p (stmt)
3399 && should_remove_lhs_p (lhs))
3400 {
3401 error ("LHS in noreturn call");
3402 return true;
3403 }
3404
3405 fntype = gimple_call_fntype (stmt);
3406 if (fntype
3407 && lhs
3408 && !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (fntype))
3409 /* ??? At least C++ misses conversions at assignments from
3410 void * call results.
3411 For now simply allow arbitrary pointer type conversions. */
3412 && !(POINTER_TYPE_P (TREE_TYPE (lhs))
3413 && POINTER_TYPE_P (TREE_TYPE (fntype))))
3414 {
3415 error ("invalid conversion in gimple call");
3416 debug_generic_stmt (TREE_TYPE (lhs));
3417 debug_generic_stmt (TREE_TYPE (fntype));
3418 return true;
3419 }
3420
3421 if (gimple_call_chain (stmt)
3422 && !is_gimple_val (gimple_call_chain (stmt)))
3423 {
3424 error ("invalid static chain in gimple call");
3425 debug_generic_stmt (gimple_call_chain (stmt));
3426 return true;
3427 }
3428
3429 /* If there is a static chain argument, the call should either be
3430 indirect, or the decl should have DECL_STATIC_CHAIN set. */
3431 if (gimple_call_chain (stmt)
3432 && fndecl
3433 && !DECL_STATIC_CHAIN (fndecl))
3434 {
3435 error ("static chain with function that doesn%'t use one");
3436 return true;
3437 }
3438
3439 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
3440 {
3441 switch (DECL_FUNCTION_CODE (fndecl))
3442 {
3443 case BUILT_IN_UNREACHABLE:
3444 case BUILT_IN_TRAP:
3445 if (gimple_call_num_args (stmt) > 0)
3446 {
3447 /* Built-in unreachable with parameters might not be caught by
3448 undefined behavior sanitizer. Front-ends do check users do not
3449 call them that way but we also produce calls to
3450 __builtin_unreachable internally, for example when IPA figures
3451 out a call cannot happen in a legal program. In such cases,
3452 we must make sure arguments are stripped off. */
3453 error ("%<__builtin_unreachable%> or %<__builtin_trap%> call "
3454 "with arguments");
3455 return true;
3456 }
3457 break;
3458 default:
3459 break;
3460 }
3461 }
3462
3463 /* ??? The C frontend passes unpromoted arguments in case it
3464 didn't see a function declaration before the call. So for now
3465 leave the call arguments mostly unverified. Once we gimplify
3466 unit-at-a-time we have a chance to fix this. */
3467
3468 for (i = 0; i < gimple_call_num_args (stmt); ++i)
3469 {
3470 tree arg = gimple_call_arg (stmt, i);
3471 if ((is_gimple_reg_type (TREE_TYPE (arg))
3472 && !is_gimple_val (arg))
3473 || (!is_gimple_reg_type (TREE_TYPE (arg))
3474 && !is_gimple_lvalue (arg)))
3475 {
3476 error ("invalid argument to gimple call");
3477 debug_generic_expr (arg);
3478 return true;
3479 }
3480 }
3481
3482 return false;
3483 }
3484
3485 /* Verifies the gimple comparison with the result type TYPE and
3486 the operands OP0 and OP1, comparison code is CODE. */
3487
3488 static bool
verify_gimple_comparison(tree type,tree op0,tree op1,enum tree_code code)3489 verify_gimple_comparison (tree type, tree op0, tree op1, enum tree_code code)
3490 {
3491 tree op0_type = TREE_TYPE (op0);
3492 tree op1_type = TREE_TYPE (op1);
3493
3494 if (!is_gimple_val (op0) || !is_gimple_val (op1))
3495 {
3496 error ("invalid operands in gimple comparison");
3497 return true;
3498 }
3499
3500 /* For comparisons we do not have the operations type as the
3501 effective type the comparison is carried out in. Instead
3502 we require that either the first operand is trivially
3503 convertible into the second, or the other way around.
3504 Because we special-case pointers to void we allow
3505 comparisons of pointers with the same mode as well. */
3506 if (!useless_type_conversion_p (op0_type, op1_type)
3507 && !useless_type_conversion_p (op1_type, op0_type)
3508 && (!POINTER_TYPE_P (op0_type)
3509 || !POINTER_TYPE_P (op1_type)
3510 || TYPE_MODE (op0_type) != TYPE_MODE (op1_type)))
3511 {
3512 error ("mismatching comparison operand types");
3513 debug_generic_expr (op0_type);
3514 debug_generic_expr (op1_type);
3515 return true;
3516 }
3517
3518 /* The resulting type of a comparison may be an effective boolean type. */
3519 if (INTEGRAL_TYPE_P (type)
3520 && (TREE_CODE (type) == BOOLEAN_TYPE
3521 || TYPE_PRECISION (type) == 1))
3522 {
3523 if ((TREE_CODE (op0_type) == VECTOR_TYPE
3524 || TREE_CODE (op1_type) == VECTOR_TYPE)
3525 && code != EQ_EXPR && code != NE_EXPR
3526 && !VECTOR_BOOLEAN_TYPE_P (op0_type)
3527 && !VECTOR_INTEGER_TYPE_P (op0_type))
3528 {
3529 error ("unsupported operation or type for vector comparison"
3530 " returning a boolean");
3531 debug_generic_expr (op0_type);
3532 debug_generic_expr (op1_type);
3533 return true;
3534 }
3535 }
3536 /* Or a boolean vector type with the same element count
3537 as the comparison operand types. */
3538 else if (TREE_CODE (type) == VECTOR_TYPE
3539 && TREE_CODE (TREE_TYPE (type)) == BOOLEAN_TYPE)
3540 {
3541 if (TREE_CODE (op0_type) != VECTOR_TYPE
3542 || TREE_CODE (op1_type) != VECTOR_TYPE)
3543 {
3544 error ("non-vector operands in vector comparison");
3545 debug_generic_expr (op0_type);
3546 debug_generic_expr (op1_type);
3547 return true;
3548 }
3549
3550 if (maybe_ne (TYPE_VECTOR_SUBPARTS (type),
3551 TYPE_VECTOR_SUBPARTS (op0_type)))
3552 {
3553 error ("invalid vector comparison resulting type");
3554 debug_generic_expr (type);
3555 return true;
3556 }
3557 }
3558 else
3559 {
3560 error ("bogus comparison result type");
3561 debug_generic_expr (type);
3562 return true;
3563 }
3564
3565 return false;
3566 }
3567
3568 /* Verify a gimple assignment statement STMT with an unary rhs.
3569 Returns true if anything is wrong. */
3570
3571 static bool
verify_gimple_assign_unary(gassign * stmt)3572 verify_gimple_assign_unary (gassign *stmt)
3573 {
3574 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
3575 tree lhs = gimple_assign_lhs (stmt);
3576 tree lhs_type = TREE_TYPE (lhs);
3577 tree rhs1 = gimple_assign_rhs1 (stmt);
3578 tree rhs1_type = TREE_TYPE (rhs1);
3579
3580 if (!is_gimple_reg (lhs))
3581 {
3582 error ("non-register as LHS of unary operation");
3583 return true;
3584 }
3585
3586 if (!is_gimple_val (rhs1))
3587 {
3588 error ("invalid operand in unary operation");
3589 return true;
3590 }
3591
3592 /* First handle conversions. */
3593 switch (rhs_code)
3594 {
3595 CASE_CONVERT:
3596 {
3597 /* Allow conversions from pointer type to integral type only if
3598 there is no sign or zero extension involved.
3599 For targets were the precision of ptrofftype doesn't match that
3600 of pointers we need to allow arbitrary conversions to ptrofftype. */
3601 if ((POINTER_TYPE_P (lhs_type)
3602 && INTEGRAL_TYPE_P (rhs1_type))
3603 || (POINTER_TYPE_P (rhs1_type)
3604 && INTEGRAL_TYPE_P (lhs_type)
3605 && (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION (lhs_type)
3606 || ptrofftype_p (lhs_type))))
3607 return false;
3608
3609 /* Allow conversion from integral to offset type and vice versa. */
3610 if ((TREE_CODE (lhs_type) == OFFSET_TYPE
3611 && INTEGRAL_TYPE_P (rhs1_type))
3612 || (INTEGRAL_TYPE_P (lhs_type)
3613 && TREE_CODE (rhs1_type) == OFFSET_TYPE))
3614 return false;
3615
3616 /* Otherwise assert we are converting between types of the
3617 same kind. */
3618 if (INTEGRAL_TYPE_P (lhs_type) != INTEGRAL_TYPE_P (rhs1_type))
3619 {
3620 error ("invalid types in nop conversion");
3621 debug_generic_expr (lhs_type);
3622 debug_generic_expr (rhs1_type);
3623 return true;
3624 }
3625
3626 return false;
3627 }
3628
3629 case ADDR_SPACE_CONVERT_EXPR:
3630 {
3631 if (!POINTER_TYPE_P (rhs1_type) || !POINTER_TYPE_P (lhs_type)
3632 || (TYPE_ADDR_SPACE (TREE_TYPE (rhs1_type))
3633 == TYPE_ADDR_SPACE (TREE_TYPE (lhs_type))))
3634 {
3635 error ("invalid types in address space conversion");
3636 debug_generic_expr (lhs_type);
3637 debug_generic_expr (rhs1_type);
3638 return true;
3639 }
3640
3641 return false;
3642 }
3643
3644 case FIXED_CONVERT_EXPR:
3645 {
3646 if (!valid_fixed_convert_types_p (lhs_type, rhs1_type)
3647 && !valid_fixed_convert_types_p (rhs1_type, lhs_type))
3648 {
3649 error ("invalid types in fixed-point conversion");
3650 debug_generic_expr (lhs_type);
3651 debug_generic_expr (rhs1_type);
3652 return true;
3653 }
3654
3655 return false;
3656 }
3657
3658 case FLOAT_EXPR:
3659 {
3660 if ((!INTEGRAL_TYPE_P (rhs1_type) || !SCALAR_FLOAT_TYPE_P (lhs_type))
3661 && (!VECTOR_INTEGER_TYPE_P (rhs1_type)
3662 || !VECTOR_FLOAT_TYPE_P (lhs_type)))
3663 {
3664 error ("invalid types in conversion to floating point");
3665 debug_generic_expr (lhs_type);
3666 debug_generic_expr (rhs1_type);
3667 return true;
3668 }
3669
3670 return false;
3671 }
3672
3673 case FIX_TRUNC_EXPR:
3674 {
3675 if ((!INTEGRAL_TYPE_P (lhs_type) || !SCALAR_FLOAT_TYPE_P (rhs1_type))
3676 && (!VECTOR_INTEGER_TYPE_P (lhs_type)
3677 || !VECTOR_FLOAT_TYPE_P (rhs1_type)))
3678 {
3679 error ("invalid types in conversion to integer");
3680 debug_generic_expr (lhs_type);
3681 debug_generic_expr (rhs1_type);
3682 return true;
3683 }
3684
3685 return false;
3686 }
3687
3688 case VEC_UNPACK_HI_EXPR:
3689 case VEC_UNPACK_LO_EXPR:
3690 case VEC_UNPACK_FLOAT_HI_EXPR:
3691 case VEC_UNPACK_FLOAT_LO_EXPR:
3692 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
3693 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
3694 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3695 || TREE_CODE (lhs_type) != VECTOR_TYPE
3696 || (!INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3697 && !SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type)))
3698 || (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3699 && !SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type)))
3700 || ((rhs_code == VEC_UNPACK_HI_EXPR
3701 || rhs_code == VEC_UNPACK_LO_EXPR)
3702 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3703 != INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
3704 || ((rhs_code == VEC_UNPACK_FLOAT_HI_EXPR
3705 || rhs_code == VEC_UNPACK_FLOAT_LO_EXPR)
3706 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3707 || SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))))
3708 || ((rhs_code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
3709 || rhs_code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
3710 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3711 || SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type))))
3712 || (maybe_ne (GET_MODE_SIZE (element_mode (lhs_type)),
3713 2 * GET_MODE_SIZE (element_mode (rhs1_type)))
3714 && (!VECTOR_BOOLEAN_TYPE_P (lhs_type)
3715 || !VECTOR_BOOLEAN_TYPE_P (rhs1_type)))
3716 || maybe_ne (2 * TYPE_VECTOR_SUBPARTS (lhs_type),
3717 TYPE_VECTOR_SUBPARTS (rhs1_type)))
3718 {
3719 error ("type mismatch in vector unpack expression");
3720 debug_generic_expr (lhs_type);
3721 debug_generic_expr (rhs1_type);
3722 return true;
3723 }
3724
3725 return false;
3726
3727 case NEGATE_EXPR:
3728 case ABS_EXPR:
3729 case BIT_NOT_EXPR:
3730 case PAREN_EXPR:
3731 case CONJ_EXPR:
3732 break;
3733
3734 case ABSU_EXPR:
3735 if (!ANY_INTEGRAL_TYPE_P (lhs_type)
3736 || !TYPE_UNSIGNED (lhs_type)
3737 || !ANY_INTEGRAL_TYPE_P (rhs1_type)
3738 || TYPE_UNSIGNED (rhs1_type)
3739 || element_precision (lhs_type) != element_precision (rhs1_type))
3740 {
3741 error ("invalid types for ABSU_EXPR");
3742 debug_generic_expr (lhs_type);
3743 debug_generic_expr (rhs1_type);
3744 return true;
3745 }
3746 return false;
3747
3748 case VEC_DUPLICATE_EXPR:
3749 if (TREE_CODE (lhs_type) != VECTOR_TYPE
3750 || !useless_type_conversion_p (TREE_TYPE (lhs_type), rhs1_type))
3751 {
3752 error ("vec_duplicate should be from a scalar to a like vector");
3753 debug_generic_expr (lhs_type);
3754 debug_generic_expr (rhs1_type);
3755 return true;
3756 }
3757 return false;
3758
3759 default:
3760 gcc_unreachable ();
3761 }
3762
3763 /* For the remaining codes assert there is no conversion involved. */
3764 if (!useless_type_conversion_p (lhs_type, rhs1_type))
3765 {
3766 error ("non-trivial conversion in unary operation");
3767 debug_generic_expr (lhs_type);
3768 debug_generic_expr (rhs1_type);
3769 return true;
3770 }
3771
3772 return false;
3773 }
3774
3775 /* Verify a gimple assignment statement STMT with a binary rhs.
3776 Returns true if anything is wrong. */
3777
3778 static bool
verify_gimple_assign_binary(gassign * stmt)3779 verify_gimple_assign_binary (gassign *stmt)
3780 {
3781 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
3782 tree lhs = gimple_assign_lhs (stmt);
3783 tree lhs_type = TREE_TYPE (lhs);
3784 tree rhs1 = gimple_assign_rhs1 (stmt);
3785 tree rhs1_type = TREE_TYPE (rhs1);
3786 tree rhs2 = gimple_assign_rhs2 (stmt);
3787 tree rhs2_type = TREE_TYPE (rhs2);
3788
3789 if (!is_gimple_reg (lhs))
3790 {
3791 error ("non-register as LHS of binary operation");
3792 return true;
3793 }
3794
3795 if (!is_gimple_val (rhs1)
3796 || !is_gimple_val (rhs2))
3797 {
3798 error ("invalid operands in binary operation");
3799 return true;
3800 }
3801
3802 /* First handle operations that involve different types. */
3803 switch (rhs_code)
3804 {
3805 case COMPLEX_EXPR:
3806 {
3807 if (TREE_CODE (lhs_type) != COMPLEX_TYPE
3808 || !(INTEGRAL_TYPE_P (rhs1_type)
3809 || SCALAR_FLOAT_TYPE_P (rhs1_type))
3810 || !(INTEGRAL_TYPE_P (rhs2_type)
3811 || SCALAR_FLOAT_TYPE_P (rhs2_type)))
3812 {
3813 error ("type mismatch in complex expression");
3814 debug_generic_expr (lhs_type);
3815 debug_generic_expr (rhs1_type);
3816 debug_generic_expr (rhs2_type);
3817 return true;
3818 }
3819
3820 return false;
3821 }
3822
3823 case LSHIFT_EXPR:
3824 case RSHIFT_EXPR:
3825 case LROTATE_EXPR:
3826 case RROTATE_EXPR:
3827 {
3828 /* Shifts and rotates are ok on integral types, fixed point
3829 types and integer vector types. */
3830 if ((!INTEGRAL_TYPE_P (rhs1_type)
3831 && !FIXED_POINT_TYPE_P (rhs1_type)
3832 && !(TREE_CODE (rhs1_type) == VECTOR_TYPE
3833 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
3834 || (!INTEGRAL_TYPE_P (rhs2_type)
3835 /* Vector shifts of vectors are also ok. */
3836 && !(TREE_CODE (rhs1_type) == VECTOR_TYPE
3837 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3838 && TREE_CODE (rhs2_type) == VECTOR_TYPE
3839 && INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
3840 || !useless_type_conversion_p (lhs_type, rhs1_type))
3841 {
3842 error ("type mismatch in shift expression");
3843 debug_generic_expr (lhs_type);
3844 debug_generic_expr (rhs1_type);
3845 debug_generic_expr (rhs2_type);
3846 return true;
3847 }
3848
3849 return false;
3850 }
3851
3852 case WIDEN_LSHIFT_EXPR:
3853 {
3854 if (!INTEGRAL_TYPE_P (lhs_type)
3855 || !INTEGRAL_TYPE_P (rhs1_type)
3856 || TREE_CODE (rhs2) != INTEGER_CST
3857 || (2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)))
3858 {
3859 error ("type mismatch in widening vector shift expression");
3860 debug_generic_expr (lhs_type);
3861 debug_generic_expr (rhs1_type);
3862 debug_generic_expr (rhs2_type);
3863 return true;
3864 }
3865
3866 return false;
3867 }
3868
3869 case VEC_WIDEN_LSHIFT_HI_EXPR:
3870 case VEC_WIDEN_LSHIFT_LO_EXPR:
3871 {
3872 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3873 || TREE_CODE (lhs_type) != VECTOR_TYPE
3874 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3875 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3876 || TREE_CODE (rhs2) != INTEGER_CST
3877 || (2 * TYPE_PRECISION (TREE_TYPE (rhs1_type))
3878 > TYPE_PRECISION (TREE_TYPE (lhs_type))))
3879 {
3880 error ("type mismatch in widening vector shift expression");
3881 debug_generic_expr (lhs_type);
3882 debug_generic_expr (rhs1_type);
3883 debug_generic_expr (rhs2_type);
3884 return true;
3885 }
3886
3887 return false;
3888 }
3889
3890 case PLUS_EXPR:
3891 case MINUS_EXPR:
3892 {
3893 tree lhs_etype = lhs_type;
3894 tree rhs1_etype = rhs1_type;
3895 tree rhs2_etype = rhs2_type;
3896 if (TREE_CODE (lhs_type) == VECTOR_TYPE)
3897 {
3898 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3899 || TREE_CODE (rhs2_type) != VECTOR_TYPE)
3900 {
3901 error ("invalid non-vector operands to vector valued plus");
3902 return true;
3903 }
3904 lhs_etype = TREE_TYPE (lhs_type);
3905 rhs1_etype = TREE_TYPE (rhs1_type);
3906 rhs2_etype = TREE_TYPE (rhs2_type);
3907 }
3908 if (POINTER_TYPE_P (lhs_etype)
3909 || POINTER_TYPE_P (rhs1_etype)
3910 || POINTER_TYPE_P (rhs2_etype))
3911 {
3912 error ("invalid (pointer) operands to plus/minus");
3913 return true;
3914 }
3915
3916 /* Continue with generic binary expression handling. */
3917 break;
3918 }
3919
3920 case POINTER_PLUS_EXPR:
3921 {
3922 if (!POINTER_TYPE_P (rhs1_type)
3923 || !useless_type_conversion_p (lhs_type, rhs1_type)
3924 || !ptrofftype_p (rhs2_type))
3925 {
3926 error ("type mismatch in pointer plus expression");
3927 debug_generic_stmt (lhs_type);
3928 debug_generic_stmt (rhs1_type);
3929 debug_generic_stmt (rhs2_type);
3930 return true;
3931 }
3932
3933 return false;
3934 }
3935
3936 case POINTER_DIFF_EXPR:
3937 {
3938 if (!POINTER_TYPE_P (rhs1_type)
3939 || !POINTER_TYPE_P (rhs2_type)
3940 /* Because we special-case pointers to void we allow difference
3941 of arbitrary pointers with the same mode. */
3942 || TYPE_MODE (rhs1_type) != TYPE_MODE (rhs2_type)
3943 || !INTEGRAL_TYPE_P (lhs_type)
3944 || TYPE_UNSIGNED (lhs_type)
3945 || TYPE_PRECISION (lhs_type) != TYPE_PRECISION (rhs1_type))
3946 {
3947 error ("type mismatch in pointer diff expression");
3948 debug_generic_stmt (lhs_type);
3949 debug_generic_stmt (rhs1_type);
3950 debug_generic_stmt (rhs2_type);
3951 return true;
3952 }
3953
3954 return false;
3955 }
3956
3957 case TRUTH_ANDIF_EXPR:
3958 case TRUTH_ORIF_EXPR:
3959 case TRUTH_AND_EXPR:
3960 case TRUTH_OR_EXPR:
3961 case TRUTH_XOR_EXPR:
3962
3963 gcc_unreachable ();
3964
3965 case LT_EXPR:
3966 case LE_EXPR:
3967 case GT_EXPR:
3968 case GE_EXPR:
3969 case EQ_EXPR:
3970 case NE_EXPR:
3971 case UNORDERED_EXPR:
3972 case ORDERED_EXPR:
3973 case UNLT_EXPR:
3974 case UNLE_EXPR:
3975 case UNGT_EXPR:
3976 case UNGE_EXPR:
3977 case UNEQ_EXPR:
3978 case LTGT_EXPR:
3979 /* Comparisons are also binary, but the result type is not
3980 connected to the operand types. */
3981 return verify_gimple_comparison (lhs_type, rhs1, rhs2, rhs_code);
3982
3983 case WIDEN_MULT_EXPR:
3984 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
3985 return true;
3986 return ((2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type))
3987 || (TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type)));
3988
3989 case WIDEN_SUM_EXPR:
3990 {
3991 if (((TREE_CODE (rhs1_type) != VECTOR_TYPE
3992 || TREE_CODE (lhs_type) != VECTOR_TYPE)
3993 && ((!INTEGRAL_TYPE_P (rhs1_type)
3994 && !SCALAR_FLOAT_TYPE_P (rhs1_type))
3995 || (!INTEGRAL_TYPE_P (lhs_type)
3996 && !SCALAR_FLOAT_TYPE_P (lhs_type))))
3997 || !useless_type_conversion_p (lhs_type, rhs2_type)
3998 || maybe_lt (GET_MODE_SIZE (element_mode (rhs2_type)),
3999 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
4000 {
4001 error ("type mismatch in widening sum reduction");
4002 debug_generic_expr (lhs_type);
4003 debug_generic_expr (rhs1_type);
4004 debug_generic_expr (rhs2_type);
4005 return true;
4006 }
4007 return false;
4008 }
4009
4010 case VEC_WIDEN_MULT_HI_EXPR:
4011 case VEC_WIDEN_MULT_LO_EXPR:
4012 case VEC_WIDEN_MULT_EVEN_EXPR:
4013 case VEC_WIDEN_MULT_ODD_EXPR:
4014 {
4015 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4016 || TREE_CODE (lhs_type) != VECTOR_TYPE
4017 || !types_compatible_p (rhs1_type, rhs2_type)
4018 || maybe_ne (GET_MODE_SIZE (element_mode (lhs_type)),
4019 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
4020 {
4021 error ("type mismatch in vector widening multiplication");
4022 debug_generic_expr (lhs_type);
4023 debug_generic_expr (rhs1_type);
4024 debug_generic_expr (rhs2_type);
4025 return true;
4026 }
4027 return false;
4028 }
4029
4030 case VEC_PACK_TRUNC_EXPR:
4031 /* ??? We currently use VEC_PACK_TRUNC_EXPR to simply concat
4032 vector boolean types. */
4033 if (VECTOR_BOOLEAN_TYPE_P (lhs_type)
4034 && VECTOR_BOOLEAN_TYPE_P (rhs1_type)
4035 && types_compatible_p (rhs1_type, rhs2_type)
4036 && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
4037 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
4038 return false;
4039
4040 /* Fallthru. */
4041 case VEC_PACK_SAT_EXPR:
4042 case VEC_PACK_FIX_TRUNC_EXPR:
4043 {
4044 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4045 || TREE_CODE (lhs_type) != VECTOR_TYPE
4046 || !((rhs_code == VEC_PACK_FIX_TRUNC_EXPR
4047 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))
4048 && INTEGRAL_TYPE_P (TREE_TYPE (lhs_type)))
4049 || (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4050 == INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))))
4051 || !types_compatible_p (rhs1_type, rhs2_type)
4052 || maybe_ne (GET_MODE_SIZE (element_mode (rhs1_type)),
4053 2 * GET_MODE_SIZE (element_mode (lhs_type)))
4054 || maybe_ne (2 * TYPE_VECTOR_SUBPARTS (rhs1_type),
4055 TYPE_VECTOR_SUBPARTS (lhs_type)))
4056 {
4057 error ("type mismatch in vector pack expression");
4058 debug_generic_expr (lhs_type);
4059 debug_generic_expr (rhs1_type);
4060 debug_generic_expr (rhs2_type);
4061 return true;
4062 }
4063
4064 return false;
4065 }
4066
4067 case VEC_PACK_FLOAT_EXPR:
4068 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4069 || TREE_CODE (lhs_type) != VECTOR_TYPE
4070 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4071 || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type))
4072 || !types_compatible_p (rhs1_type, rhs2_type)
4073 || maybe_ne (GET_MODE_SIZE (element_mode (rhs1_type)),
4074 2 * GET_MODE_SIZE (element_mode (lhs_type)))
4075 || maybe_ne (2 * TYPE_VECTOR_SUBPARTS (rhs1_type),
4076 TYPE_VECTOR_SUBPARTS (lhs_type)))
4077 {
4078 error ("type mismatch in vector pack expression");
4079 debug_generic_expr (lhs_type);
4080 debug_generic_expr (rhs1_type);
4081 debug_generic_expr (rhs2_type);
4082 return true;
4083 }
4084
4085 return false;
4086
4087 case MULT_EXPR:
4088 case MULT_HIGHPART_EXPR:
4089 case TRUNC_DIV_EXPR:
4090 case CEIL_DIV_EXPR:
4091 case FLOOR_DIV_EXPR:
4092 case ROUND_DIV_EXPR:
4093 case TRUNC_MOD_EXPR:
4094 case CEIL_MOD_EXPR:
4095 case FLOOR_MOD_EXPR:
4096 case ROUND_MOD_EXPR:
4097 case RDIV_EXPR:
4098 case EXACT_DIV_EXPR:
4099 case MIN_EXPR:
4100 case MAX_EXPR:
4101 case BIT_IOR_EXPR:
4102 case BIT_XOR_EXPR:
4103 case BIT_AND_EXPR:
4104 /* Continue with generic binary expression handling. */
4105 break;
4106
4107 case VEC_SERIES_EXPR:
4108 if (!useless_type_conversion_p (rhs1_type, rhs2_type))
4109 {
4110 error ("type mismatch in series expression");
4111 debug_generic_expr (rhs1_type);
4112 debug_generic_expr (rhs2_type);
4113 return true;
4114 }
4115 if (TREE_CODE (lhs_type) != VECTOR_TYPE
4116 || !useless_type_conversion_p (TREE_TYPE (lhs_type), rhs1_type))
4117 {
4118 error ("vector type expected in series expression");
4119 debug_generic_expr (lhs_type);
4120 return true;
4121 }
4122 return false;
4123
4124 default:
4125 gcc_unreachable ();
4126 }
4127
4128 if (!useless_type_conversion_p (lhs_type, rhs1_type)
4129 || !useless_type_conversion_p (lhs_type, rhs2_type))
4130 {
4131 error ("type mismatch in binary expression");
4132 debug_generic_stmt (lhs_type);
4133 debug_generic_stmt (rhs1_type);
4134 debug_generic_stmt (rhs2_type);
4135 return true;
4136 }
4137
4138 return false;
4139 }
4140
4141 /* Verify a gimple assignment statement STMT with a ternary rhs.
4142 Returns true if anything is wrong. */
4143
4144 static bool
verify_gimple_assign_ternary(gassign * stmt)4145 verify_gimple_assign_ternary (gassign *stmt)
4146 {
4147 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
4148 tree lhs = gimple_assign_lhs (stmt);
4149 tree lhs_type = TREE_TYPE (lhs);
4150 tree rhs1 = gimple_assign_rhs1 (stmt);
4151 tree rhs1_type = TREE_TYPE (rhs1);
4152 tree rhs2 = gimple_assign_rhs2 (stmt);
4153 tree rhs2_type = TREE_TYPE (rhs2);
4154 tree rhs3 = gimple_assign_rhs3 (stmt);
4155 tree rhs3_type = TREE_TYPE (rhs3);
4156
4157 if (!is_gimple_reg (lhs))
4158 {
4159 error ("non-register as LHS of ternary operation");
4160 return true;
4161 }
4162
4163 if (((rhs_code == VEC_COND_EXPR || rhs_code == COND_EXPR)
4164 ? !is_gimple_condexpr (rhs1) : !is_gimple_val (rhs1))
4165 || !is_gimple_val (rhs2)
4166 || !is_gimple_val (rhs3))
4167 {
4168 error ("invalid operands in ternary operation");
4169 return true;
4170 }
4171
4172 /* First handle operations that involve different types. */
4173 switch (rhs_code)
4174 {
4175 case WIDEN_MULT_PLUS_EXPR:
4176 case WIDEN_MULT_MINUS_EXPR:
4177 if ((!INTEGRAL_TYPE_P (rhs1_type)
4178 && !FIXED_POINT_TYPE_P (rhs1_type))
4179 || !useless_type_conversion_p (rhs1_type, rhs2_type)
4180 || !useless_type_conversion_p (lhs_type, rhs3_type)
4181 || 2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)
4182 || TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type))
4183 {
4184 error ("type mismatch in widening multiply-accumulate expression");
4185 debug_generic_expr (lhs_type);
4186 debug_generic_expr (rhs1_type);
4187 debug_generic_expr (rhs2_type);
4188 debug_generic_expr (rhs3_type);
4189 return true;
4190 }
4191 break;
4192
4193 case VEC_COND_EXPR:
4194 if (!VECTOR_BOOLEAN_TYPE_P (rhs1_type)
4195 || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
4196 TYPE_VECTOR_SUBPARTS (lhs_type)))
4197 {
4198 error ("the first argument of a VEC_COND_EXPR must be of a "
4199 "boolean vector type of the same number of elements "
4200 "as the result");
4201 debug_generic_expr (lhs_type);
4202 debug_generic_expr (rhs1_type);
4203 return true;
4204 }
4205 /* Fallthrough. */
4206 case COND_EXPR:
4207 if (!is_gimple_val (rhs1)
4208 && verify_gimple_comparison (TREE_TYPE (rhs1),
4209 TREE_OPERAND (rhs1, 0),
4210 TREE_OPERAND (rhs1, 1),
4211 TREE_CODE (rhs1)))
4212 return true;
4213 if (!useless_type_conversion_p (lhs_type, rhs2_type)
4214 || !useless_type_conversion_p (lhs_type, rhs3_type))
4215 {
4216 error ("type mismatch in conditional expression");
4217 debug_generic_expr (lhs_type);
4218 debug_generic_expr (rhs2_type);
4219 debug_generic_expr (rhs3_type);
4220 return true;
4221 }
4222 break;
4223
4224 case VEC_PERM_EXPR:
4225 if (!useless_type_conversion_p (lhs_type, rhs1_type)
4226 || !useless_type_conversion_p (lhs_type, rhs2_type))
4227 {
4228 error ("type mismatch in vector permute expression");
4229 debug_generic_expr (lhs_type);
4230 debug_generic_expr (rhs1_type);
4231 debug_generic_expr (rhs2_type);
4232 debug_generic_expr (rhs3_type);
4233 return true;
4234 }
4235
4236 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4237 || TREE_CODE (rhs2_type) != VECTOR_TYPE
4238 || TREE_CODE (rhs3_type) != VECTOR_TYPE)
4239 {
4240 error ("vector types expected in vector permute expression");
4241 debug_generic_expr (lhs_type);
4242 debug_generic_expr (rhs1_type);
4243 debug_generic_expr (rhs2_type);
4244 debug_generic_expr (rhs3_type);
4245 return true;
4246 }
4247
4248 if (maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
4249 TYPE_VECTOR_SUBPARTS (rhs2_type))
4250 || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs2_type),
4251 TYPE_VECTOR_SUBPARTS (rhs3_type))
4252 || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs3_type),
4253 TYPE_VECTOR_SUBPARTS (lhs_type)))
4254 {
4255 error ("vectors with different element number found "
4256 "in vector permute expression");
4257 debug_generic_expr (lhs_type);
4258 debug_generic_expr (rhs1_type);
4259 debug_generic_expr (rhs2_type);
4260 debug_generic_expr (rhs3_type);
4261 return true;
4262 }
4263
4264 if (TREE_CODE (TREE_TYPE (rhs3_type)) != INTEGER_TYPE
4265 || (TREE_CODE (rhs3) != VECTOR_CST
4266 && (GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE
4267 (TREE_TYPE (rhs3_type)))
4268 != GET_MODE_BITSIZE (SCALAR_TYPE_MODE
4269 (TREE_TYPE (rhs1_type))))))
4270 {
4271 error ("invalid mask type in vector permute expression");
4272 debug_generic_expr (lhs_type);
4273 debug_generic_expr (rhs1_type);
4274 debug_generic_expr (rhs2_type);
4275 debug_generic_expr (rhs3_type);
4276 return true;
4277 }
4278
4279 return false;
4280
4281 case SAD_EXPR:
4282 if (!useless_type_conversion_p (rhs1_type, rhs2_type)
4283 || !useless_type_conversion_p (lhs_type, rhs3_type)
4284 || 2 * GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1_type)))
4285 > GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (lhs_type))))
4286 {
4287 error ("type mismatch in sad expression");
4288 debug_generic_expr (lhs_type);
4289 debug_generic_expr (rhs1_type);
4290 debug_generic_expr (rhs2_type);
4291 debug_generic_expr (rhs3_type);
4292 return true;
4293 }
4294
4295 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4296 || TREE_CODE (rhs2_type) != VECTOR_TYPE
4297 || TREE_CODE (rhs3_type) != VECTOR_TYPE)
4298 {
4299 error ("vector types expected in sad expression");
4300 debug_generic_expr (lhs_type);
4301 debug_generic_expr (rhs1_type);
4302 debug_generic_expr (rhs2_type);
4303 debug_generic_expr (rhs3_type);
4304 return true;
4305 }
4306
4307 return false;
4308
4309 case BIT_INSERT_EXPR:
4310 if (! useless_type_conversion_p (lhs_type, rhs1_type))
4311 {
4312 error ("type mismatch in BIT_INSERT_EXPR");
4313 debug_generic_expr (lhs_type);
4314 debug_generic_expr (rhs1_type);
4315 return true;
4316 }
4317 if (! ((INTEGRAL_TYPE_P (rhs1_type)
4318 && INTEGRAL_TYPE_P (rhs2_type))
4319 || (VECTOR_TYPE_P (rhs1_type)
4320 && types_compatible_p (TREE_TYPE (rhs1_type), rhs2_type))))
4321 {
4322 error ("not allowed type combination in BIT_INSERT_EXPR");
4323 debug_generic_expr (rhs1_type);
4324 debug_generic_expr (rhs2_type);
4325 return true;
4326 }
4327 if (! tree_fits_uhwi_p (rhs3)
4328 || ! types_compatible_p (bitsizetype, TREE_TYPE (rhs3))
4329 || ! tree_fits_uhwi_p (TYPE_SIZE (rhs2_type)))
4330 {
4331 error ("invalid position or size in BIT_INSERT_EXPR");
4332 return true;
4333 }
4334 if (INTEGRAL_TYPE_P (rhs1_type)
4335 && !type_has_mode_precision_p (rhs1_type))
4336 {
4337 error ("BIT_INSERT_EXPR into non-mode-precision operand");
4338 return true;
4339 }
4340 if (INTEGRAL_TYPE_P (rhs1_type))
4341 {
4342 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
4343 if (bitpos >= TYPE_PRECISION (rhs1_type)
4344 || (bitpos + TYPE_PRECISION (rhs2_type)
4345 > TYPE_PRECISION (rhs1_type)))
4346 {
4347 error ("insertion out of range in BIT_INSERT_EXPR");
4348 return true;
4349 }
4350 }
4351 else if (VECTOR_TYPE_P (rhs1_type))
4352 {
4353 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
4354 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TYPE_SIZE (rhs2_type));
4355 if (bitpos % bitsize != 0)
4356 {
4357 error ("vector insertion not at element boundary");
4358 return true;
4359 }
4360 }
4361 return false;
4362
4363 case DOT_PROD_EXPR:
4364 {
4365 if (((TREE_CODE (rhs1_type) != VECTOR_TYPE
4366 || TREE_CODE (lhs_type) != VECTOR_TYPE)
4367 && ((!INTEGRAL_TYPE_P (rhs1_type)
4368 && !SCALAR_FLOAT_TYPE_P (rhs1_type))
4369 || (!INTEGRAL_TYPE_P (lhs_type)
4370 && !SCALAR_FLOAT_TYPE_P (lhs_type))))
4371 || !types_compatible_p (rhs1_type, rhs2_type)
4372 || !useless_type_conversion_p (lhs_type, rhs3_type)
4373 || maybe_lt (GET_MODE_SIZE (element_mode (rhs3_type)),
4374 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
4375 {
4376 error ("type mismatch in dot product reduction");
4377 debug_generic_expr (lhs_type);
4378 debug_generic_expr (rhs1_type);
4379 debug_generic_expr (rhs2_type);
4380 return true;
4381 }
4382 return false;
4383 }
4384
4385 case REALIGN_LOAD_EXPR:
4386 /* FIXME. */
4387 return false;
4388
4389 default:
4390 gcc_unreachable ();
4391 }
4392 return false;
4393 }
4394
4395 /* Verify a gimple assignment statement STMT with a single rhs.
4396 Returns true if anything is wrong. */
4397
4398 static bool
verify_gimple_assign_single(gassign * stmt)4399 verify_gimple_assign_single (gassign *stmt)
4400 {
4401 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
4402 tree lhs = gimple_assign_lhs (stmt);
4403 tree lhs_type = TREE_TYPE (lhs);
4404 tree rhs1 = gimple_assign_rhs1 (stmt);
4405 tree rhs1_type = TREE_TYPE (rhs1);
4406 bool res = false;
4407
4408 if (!useless_type_conversion_p (lhs_type, rhs1_type))
4409 {
4410 error ("non-trivial conversion at assignment");
4411 debug_generic_expr (lhs_type);
4412 debug_generic_expr (rhs1_type);
4413 return true;
4414 }
4415
4416 if (gimple_clobber_p (stmt)
4417 && !(DECL_P (lhs) || TREE_CODE (lhs) == MEM_REF))
4418 {
4419 error ("non-decl/MEM_REF LHS in clobber statement");
4420 debug_generic_expr (lhs);
4421 return true;
4422 }
4423
4424 if (handled_component_p (lhs)
4425 || TREE_CODE (lhs) == MEM_REF
4426 || TREE_CODE (lhs) == TARGET_MEM_REF)
4427 res |= verify_types_in_gimple_reference (lhs, true);
4428
4429 /* Special codes we cannot handle via their class. */
4430 switch (rhs_code)
4431 {
4432 case ADDR_EXPR:
4433 {
4434 tree op = TREE_OPERAND (rhs1, 0);
4435 if (!is_gimple_addressable (op))
4436 {
4437 error ("invalid operand in unary expression");
4438 return true;
4439 }
4440
4441 /* Technically there is no longer a need for matching types, but
4442 gimple hygiene asks for this check. In LTO we can end up
4443 combining incompatible units and thus end up with addresses
4444 of globals that change their type to a common one. */
4445 if (!in_lto_p
4446 && !types_compatible_p (TREE_TYPE (op),
4447 TREE_TYPE (TREE_TYPE (rhs1)))
4448 && !one_pointer_to_useless_type_conversion_p (TREE_TYPE (rhs1),
4449 TREE_TYPE (op)))
4450 {
4451 error ("type mismatch in address expression");
4452 debug_generic_stmt (TREE_TYPE (rhs1));
4453 debug_generic_stmt (TREE_TYPE (op));
4454 return true;
4455 }
4456
4457 return (verify_address (rhs1, true)
4458 || verify_types_in_gimple_reference (op, true));
4459 }
4460
4461 /* tcc_reference */
4462 case INDIRECT_REF:
4463 error ("INDIRECT_REF in gimple IL");
4464 return true;
4465
4466 case COMPONENT_REF:
4467 case BIT_FIELD_REF:
4468 case ARRAY_REF:
4469 case ARRAY_RANGE_REF:
4470 case VIEW_CONVERT_EXPR:
4471 case REALPART_EXPR:
4472 case IMAGPART_EXPR:
4473 case TARGET_MEM_REF:
4474 case MEM_REF:
4475 if (!is_gimple_reg (lhs)
4476 && is_gimple_reg_type (TREE_TYPE (lhs)))
4477 {
4478 error ("invalid rhs for gimple memory store");
4479 debug_generic_stmt (lhs);
4480 debug_generic_stmt (rhs1);
4481 return true;
4482 }
4483 return res || verify_types_in_gimple_reference (rhs1, false);
4484
4485 /* tcc_constant */
4486 case SSA_NAME:
4487 case INTEGER_CST:
4488 case REAL_CST:
4489 case FIXED_CST:
4490 case COMPLEX_CST:
4491 case VECTOR_CST:
4492 case STRING_CST:
4493 return res;
4494
4495 /* tcc_declaration */
4496 case CONST_DECL:
4497 return res;
4498 case VAR_DECL:
4499 case PARM_DECL:
4500 if (!is_gimple_reg (lhs)
4501 && !is_gimple_reg (rhs1)
4502 && is_gimple_reg_type (TREE_TYPE (lhs)))
4503 {
4504 error ("invalid rhs for gimple memory store");
4505 debug_generic_stmt (lhs);
4506 debug_generic_stmt (rhs1);
4507 return true;
4508 }
4509 return res;
4510
4511 case CONSTRUCTOR:
4512 if (TREE_CODE (rhs1_type) == VECTOR_TYPE)
4513 {
4514 unsigned int i;
4515 tree elt_i, elt_v, elt_t = NULL_TREE;
4516
4517 if (CONSTRUCTOR_NELTS (rhs1) == 0)
4518 return res;
4519 /* For vector CONSTRUCTORs we require that either it is empty
4520 CONSTRUCTOR, or it is a CONSTRUCTOR of smaller vector elements
4521 (then the element count must be correct to cover the whole
4522 outer vector and index must be NULL on all elements, or it is
4523 a CONSTRUCTOR of scalar elements, where we as an exception allow
4524 smaller number of elements (assuming zero filling) and
4525 consecutive indexes as compared to NULL indexes (such
4526 CONSTRUCTORs can appear in the IL from FEs). */
4527 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), i, elt_i, elt_v)
4528 {
4529 if (elt_t == NULL_TREE)
4530 {
4531 elt_t = TREE_TYPE (elt_v);
4532 if (TREE_CODE (elt_t) == VECTOR_TYPE)
4533 {
4534 tree elt_t = TREE_TYPE (elt_v);
4535 if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
4536 TREE_TYPE (elt_t)))
4537 {
4538 error ("incorrect type of vector CONSTRUCTOR"
4539 " elements");
4540 debug_generic_stmt (rhs1);
4541 return true;
4542 }
4543 else if (maybe_ne (CONSTRUCTOR_NELTS (rhs1)
4544 * TYPE_VECTOR_SUBPARTS (elt_t),
4545 TYPE_VECTOR_SUBPARTS (rhs1_type)))
4546 {
4547 error ("incorrect number of vector CONSTRUCTOR"
4548 " elements");
4549 debug_generic_stmt (rhs1);
4550 return true;
4551 }
4552 }
4553 else if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
4554 elt_t))
4555 {
4556 error ("incorrect type of vector CONSTRUCTOR elements");
4557 debug_generic_stmt (rhs1);
4558 return true;
4559 }
4560 else if (maybe_gt (CONSTRUCTOR_NELTS (rhs1),
4561 TYPE_VECTOR_SUBPARTS (rhs1_type)))
4562 {
4563 error ("incorrect number of vector CONSTRUCTOR elements");
4564 debug_generic_stmt (rhs1);
4565 return true;
4566 }
4567 }
4568 else if (!useless_type_conversion_p (elt_t, TREE_TYPE (elt_v)))
4569 {
4570 error ("incorrect type of vector CONSTRUCTOR elements");
4571 debug_generic_stmt (rhs1);
4572 return true;
4573 }
4574 if (elt_i != NULL_TREE
4575 && (TREE_CODE (elt_t) == VECTOR_TYPE
4576 || TREE_CODE (elt_i) != INTEGER_CST
4577 || compare_tree_int (elt_i, i) != 0))
4578 {
4579 error ("vector CONSTRUCTOR with non-NULL element index");
4580 debug_generic_stmt (rhs1);
4581 return true;
4582 }
4583 if (!is_gimple_val (elt_v))
4584 {
4585 error ("vector CONSTRUCTOR element is not a GIMPLE value");
4586 debug_generic_stmt (rhs1);
4587 return true;
4588 }
4589 }
4590 }
4591 else if (CONSTRUCTOR_NELTS (rhs1) != 0)
4592 {
4593 error ("non-vector CONSTRUCTOR with elements");
4594 debug_generic_stmt (rhs1);
4595 return true;
4596 }
4597 return res;
4598
4599 case ASSERT_EXPR:
4600 /* FIXME. */
4601 rhs1 = fold (ASSERT_EXPR_COND (rhs1));
4602 if (rhs1 == boolean_false_node)
4603 {
4604 error ("ASSERT_EXPR with an always-false condition");
4605 debug_generic_stmt (rhs1);
4606 return true;
4607 }
4608 break;
4609
4610 case OBJ_TYPE_REF:
4611 case WITH_SIZE_EXPR:
4612 /* FIXME. */
4613 return res;
4614
4615 default:;
4616 }
4617
4618 return res;
4619 }
4620
4621 /* Verify the contents of a GIMPLE_ASSIGN STMT. Returns true when there
4622 is a problem, otherwise false. */
4623
4624 static bool
verify_gimple_assign(gassign * stmt)4625 verify_gimple_assign (gassign *stmt)
4626 {
4627 switch (gimple_assign_rhs_class (stmt))
4628 {
4629 case GIMPLE_SINGLE_RHS:
4630 return verify_gimple_assign_single (stmt);
4631
4632 case GIMPLE_UNARY_RHS:
4633 return verify_gimple_assign_unary (stmt);
4634
4635 case GIMPLE_BINARY_RHS:
4636 return verify_gimple_assign_binary (stmt);
4637
4638 case GIMPLE_TERNARY_RHS:
4639 return verify_gimple_assign_ternary (stmt);
4640
4641 default:
4642 gcc_unreachable ();
4643 }
4644 }
4645
4646 /* Verify the contents of a GIMPLE_RETURN STMT. Returns true when there
4647 is a problem, otherwise false. */
4648
4649 static bool
verify_gimple_return(greturn * stmt)4650 verify_gimple_return (greturn *stmt)
4651 {
4652 tree op = gimple_return_retval (stmt);
4653 tree restype = TREE_TYPE (TREE_TYPE (cfun->decl));
4654
4655 /* We cannot test for present return values as we do not fix up missing
4656 return values from the original source. */
4657 if (op == NULL)
4658 return false;
4659
4660 if (!is_gimple_val (op)
4661 && TREE_CODE (op) != RESULT_DECL)
4662 {
4663 error ("invalid operand in return statement");
4664 debug_generic_stmt (op);
4665 return true;
4666 }
4667
4668 if ((TREE_CODE (op) == RESULT_DECL
4669 && DECL_BY_REFERENCE (op))
4670 || (TREE_CODE (op) == SSA_NAME
4671 && SSA_NAME_VAR (op)
4672 && TREE_CODE (SSA_NAME_VAR (op)) == RESULT_DECL
4673 && DECL_BY_REFERENCE (SSA_NAME_VAR (op))))
4674 op = TREE_TYPE (op);
4675
4676 if (!useless_type_conversion_p (restype, TREE_TYPE (op)))
4677 {
4678 error ("invalid conversion in return statement");
4679 debug_generic_stmt (restype);
4680 debug_generic_stmt (TREE_TYPE (op));
4681 return true;
4682 }
4683
4684 return false;
4685 }
4686
4687
4688 /* Verify the contents of a GIMPLE_GOTO STMT. Returns true when there
4689 is a problem, otherwise false. */
4690
4691 static bool
verify_gimple_goto(ggoto * stmt)4692 verify_gimple_goto (ggoto *stmt)
4693 {
4694 tree dest = gimple_goto_dest (stmt);
4695
4696 /* ??? We have two canonical forms of direct goto destinations, a
4697 bare LABEL_DECL and an ADDR_EXPR of a LABEL_DECL. */
4698 if (TREE_CODE (dest) != LABEL_DECL
4699 && (!is_gimple_val (dest)
4700 || !POINTER_TYPE_P (TREE_TYPE (dest))))
4701 {
4702 error ("goto destination is neither a label nor a pointer");
4703 return true;
4704 }
4705
4706 return false;
4707 }
4708
4709 /* Verify the contents of a GIMPLE_SWITCH STMT. Returns true when there
4710 is a problem, otherwise false. */
4711
4712 static bool
verify_gimple_switch(gswitch * stmt)4713 verify_gimple_switch (gswitch *stmt)
4714 {
4715 unsigned int i, n;
4716 tree elt, prev_upper_bound = NULL_TREE;
4717 tree index_type, elt_type = NULL_TREE;
4718
4719 if (!is_gimple_val (gimple_switch_index (stmt)))
4720 {
4721 error ("invalid operand to switch statement");
4722 debug_generic_stmt (gimple_switch_index (stmt));
4723 return true;
4724 }
4725
4726 index_type = TREE_TYPE (gimple_switch_index (stmt));
4727 if (! INTEGRAL_TYPE_P (index_type))
4728 {
4729 error ("non-integral type switch statement");
4730 debug_generic_expr (index_type);
4731 return true;
4732 }
4733
4734 elt = gimple_switch_label (stmt, 0);
4735 if (CASE_LOW (elt) != NULL_TREE
4736 || CASE_HIGH (elt) != NULL_TREE
4737 || CASE_CHAIN (elt) != NULL_TREE)
4738 {
4739 error ("invalid default case label in switch statement");
4740 debug_generic_expr (elt);
4741 return true;
4742 }
4743
4744 n = gimple_switch_num_labels (stmt);
4745 for (i = 1; i < n; i++)
4746 {
4747 elt = gimple_switch_label (stmt, i);
4748
4749 if (CASE_CHAIN (elt))
4750 {
4751 error ("invalid CASE_CHAIN");
4752 debug_generic_expr (elt);
4753 return true;
4754 }
4755 if (! CASE_LOW (elt))
4756 {
4757 error ("invalid case label in switch statement");
4758 debug_generic_expr (elt);
4759 return true;
4760 }
4761 if (CASE_HIGH (elt)
4762 && ! tree_int_cst_lt (CASE_LOW (elt), CASE_HIGH (elt)))
4763 {
4764 error ("invalid case range in switch statement");
4765 debug_generic_expr (elt);
4766 return true;
4767 }
4768
4769 if (elt_type)
4770 {
4771 if (TREE_TYPE (CASE_LOW (elt)) != elt_type
4772 || (CASE_HIGH (elt) && TREE_TYPE (CASE_HIGH (elt)) != elt_type))
4773 {
4774 error ("type mismatch for case label in switch statement");
4775 debug_generic_expr (elt);
4776 return true;
4777 }
4778 }
4779 else
4780 {
4781 elt_type = TREE_TYPE (CASE_LOW (elt));
4782 if (TYPE_PRECISION (index_type) < TYPE_PRECISION (elt_type))
4783 {
4784 error ("type precision mismatch in switch statement");
4785 return true;
4786 }
4787 }
4788
4789 if (prev_upper_bound)
4790 {
4791 if (! tree_int_cst_lt (prev_upper_bound, CASE_LOW (elt)))
4792 {
4793 error ("case labels not sorted in switch statement");
4794 return true;
4795 }
4796 }
4797
4798 prev_upper_bound = CASE_HIGH (elt);
4799 if (! prev_upper_bound)
4800 prev_upper_bound = CASE_LOW (elt);
4801 }
4802
4803 return false;
4804 }
4805
4806 /* Verify a gimple debug statement STMT.
4807 Returns true if anything is wrong. */
4808
4809 static bool
verify_gimple_debug(gimple * stmt ATTRIBUTE_UNUSED)4810 verify_gimple_debug (gimple *stmt ATTRIBUTE_UNUSED)
4811 {
4812 /* There isn't much that could be wrong in a gimple debug stmt. A
4813 gimple debug bind stmt, for example, maps a tree, that's usually
4814 a VAR_DECL or a PARM_DECL, but that could also be some scalarized
4815 component or member of an aggregate type, to another tree, that
4816 can be an arbitrary expression. These stmts expand into debug
4817 insns, and are converted to debug notes by var-tracking.c. */
4818 return false;
4819 }
4820
4821 /* Verify a gimple label statement STMT.
4822 Returns true if anything is wrong. */
4823
4824 static bool
verify_gimple_label(glabel * stmt)4825 verify_gimple_label (glabel *stmt)
4826 {
4827 tree decl = gimple_label_label (stmt);
4828 int uid;
4829 bool err = false;
4830
4831 if (TREE_CODE (decl) != LABEL_DECL)
4832 return true;
4833 if (!DECL_NONLOCAL (decl) && !FORCED_LABEL (decl)
4834 && DECL_CONTEXT (decl) != current_function_decl)
4835 {
4836 error ("label%'s context is not the current function decl");
4837 err |= true;
4838 }
4839
4840 uid = LABEL_DECL_UID (decl);
4841 if (cfun->cfg
4842 && (uid == -1
4843 || (*label_to_block_map_for_fn (cfun))[uid] != gimple_bb (stmt)))
4844 {
4845 error ("incorrect entry in label_to_block_map");
4846 err |= true;
4847 }
4848
4849 uid = EH_LANDING_PAD_NR (decl);
4850 if (uid)
4851 {
4852 eh_landing_pad lp = get_eh_landing_pad_from_number (uid);
4853 if (decl != lp->post_landing_pad)
4854 {
4855 error ("incorrect setting of landing pad number");
4856 err |= true;
4857 }
4858 }
4859
4860 return err;
4861 }
4862
4863 /* Verify a gimple cond statement STMT.
4864 Returns true if anything is wrong. */
4865
4866 static bool
verify_gimple_cond(gcond * stmt)4867 verify_gimple_cond (gcond *stmt)
4868 {
4869 if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
4870 {
4871 error ("invalid comparison code in gimple cond");
4872 return true;
4873 }
4874 if (!(!gimple_cond_true_label (stmt)
4875 || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
4876 || !(!gimple_cond_false_label (stmt)
4877 || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
4878 {
4879 error ("invalid labels in gimple cond");
4880 return true;
4881 }
4882
4883 return verify_gimple_comparison (boolean_type_node,
4884 gimple_cond_lhs (stmt),
4885 gimple_cond_rhs (stmt),
4886 gimple_cond_code (stmt));
4887 }
4888
4889 /* Verify the GIMPLE statement STMT. Returns true if there is an
4890 error, otherwise false. */
4891
4892 static bool
verify_gimple_stmt(gimple * stmt)4893 verify_gimple_stmt (gimple *stmt)
4894 {
4895 switch (gimple_code (stmt))
4896 {
4897 case GIMPLE_ASSIGN:
4898 return verify_gimple_assign (as_a <gassign *> (stmt));
4899
4900 case GIMPLE_LABEL:
4901 return verify_gimple_label (as_a <glabel *> (stmt));
4902
4903 case GIMPLE_CALL:
4904 return verify_gimple_call (as_a <gcall *> (stmt));
4905
4906 case GIMPLE_COND:
4907 return verify_gimple_cond (as_a <gcond *> (stmt));
4908
4909 case GIMPLE_GOTO:
4910 return verify_gimple_goto (as_a <ggoto *> (stmt));
4911
4912 case GIMPLE_SWITCH:
4913 return verify_gimple_switch (as_a <gswitch *> (stmt));
4914
4915 case GIMPLE_RETURN:
4916 return verify_gimple_return (as_a <greturn *> (stmt));
4917
4918 case GIMPLE_ASM:
4919 return false;
4920
4921 case GIMPLE_TRANSACTION:
4922 return verify_gimple_transaction (as_a <gtransaction *> (stmt));
4923
4924 /* Tuples that do not have tree operands. */
4925 case GIMPLE_NOP:
4926 case GIMPLE_PREDICT:
4927 case GIMPLE_RESX:
4928 case GIMPLE_EH_DISPATCH:
4929 case GIMPLE_EH_MUST_NOT_THROW:
4930 return false;
4931
4932 CASE_GIMPLE_OMP:
4933 /* OpenMP directives are validated by the FE and never operated
4934 on by the optimizers. Furthermore, GIMPLE_OMP_FOR may contain
4935 non-gimple expressions when the main index variable has had
4936 its address taken. This does not affect the loop itself
4937 because the header of an GIMPLE_OMP_FOR is merely used to determine
4938 how to setup the parallel iteration. */
4939 return false;
4940
4941 case GIMPLE_DEBUG:
4942 return verify_gimple_debug (stmt);
4943
4944 default:
4945 gcc_unreachable ();
4946 }
4947 }
4948
4949 /* Verify the contents of a GIMPLE_PHI. Returns true if there is a problem,
4950 and false otherwise. */
4951
4952 static bool
verify_gimple_phi(gphi * phi)4953 verify_gimple_phi (gphi *phi)
4954 {
4955 bool err = false;
4956 unsigned i;
4957 tree phi_result = gimple_phi_result (phi);
4958 bool virtual_p;
4959
4960 if (!phi_result)
4961 {
4962 error ("invalid PHI result");
4963 return true;
4964 }
4965
4966 virtual_p = virtual_operand_p (phi_result);
4967 if (TREE_CODE (phi_result) != SSA_NAME
4968 || (virtual_p
4969 && SSA_NAME_VAR (phi_result) != gimple_vop (cfun)))
4970 {
4971 error ("invalid PHI result");
4972 err = true;
4973 }
4974
4975 for (i = 0; i < gimple_phi_num_args (phi); i++)
4976 {
4977 tree t = gimple_phi_arg_def (phi, i);
4978
4979 if (!t)
4980 {
4981 error ("missing PHI def");
4982 err |= true;
4983 continue;
4984 }
4985 /* Addressable variables do have SSA_NAMEs but they
4986 are not considered gimple values. */
4987 else if ((TREE_CODE (t) == SSA_NAME
4988 && virtual_p != virtual_operand_p (t))
4989 || (virtual_p
4990 && (TREE_CODE (t) != SSA_NAME
4991 || SSA_NAME_VAR (t) != gimple_vop (cfun)))
4992 || (!virtual_p
4993 && !is_gimple_val (t)))
4994 {
4995 error ("invalid PHI argument");
4996 debug_generic_expr (t);
4997 err |= true;
4998 }
4999 #ifdef ENABLE_TYPES_CHECKING
5000 if (!useless_type_conversion_p (TREE_TYPE (phi_result), TREE_TYPE (t)))
5001 {
5002 error ("incompatible types in PHI argument %u", i);
5003 debug_generic_stmt (TREE_TYPE (phi_result));
5004 debug_generic_stmt (TREE_TYPE (t));
5005 err |= true;
5006 }
5007 #endif
5008 }
5009
5010 return err;
5011 }
5012
5013 /* Verify the GIMPLE statements inside the sequence STMTS. */
5014
5015 static bool
verify_gimple_in_seq_2(gimple_seq stmts)5016 verify_gimple_in_seq_2 (gimple_seq stmts)
5017 {
5018 gimple_stmt_iterator ittr;
5019 bool err = false;
5020
5021 for (ittr = gsi_start (stmts); !gsi_end_p (ittr); gsi_next (&ittr))
5022 {
5023 gimple *stmt = gsi_stmt (ittr);
5024
5025 switch (gimple_code (stmt))
5026 {
5027 case GIMPLE_BIND:
5028 err |= verify_gimple_in_seq_2 (
5029 gimple_bind_body (as_a <gbind *> (stmt)));
5030 break;
5031
5032 case GIMPLE_TRY:
5033 err |= verify_gimple_in_seq_2 (gimple_try_eval (stmt));
5034 err |= verify_gimple_in_seq_2 (gimple_try_cleanup (stmt));
5035 break;
5036
5037 case GIMPLE_EH_FILTER:
5038 err |= verify_gimple_in_seq_2 (gimple_eh_filter_failure (stmt));
5039 break;
5040
5041 case GIMPLE_EH_ELSE:
5042 {
5043 geh_else *eh_else = as_a <geh_else *> (stmt);
5044 err |= verify_gimple_in_seq_2 (gimple_eh_else_n_body (eh_else));
5045 err |= verify_gimple_in_seq_2 (gimple_eh_else_e_body (eh_else));
5046 }
5047 break;
5048
5049 case GIMPLE_CATCH:
5050 err |= verify_gimple_in_seq_2 (gimple_catch_handler (
5051 as_a <gcatch *> (stmt)));
5052 break;
5053
5054 case GIMPLE_TRANSACTION:
5055 err |= verify_gimple_transaction (as_a <gtransaction *> (stmt));
5056 break;
5057
5058 default:
5059 {
5060 bool err2 = verify_gimple_stmt (stmt);
5061 if (err2)
5062 debug_gimple_stmt (stmt);
5063 err |= err2;
5064 }
5065 }
5066 }
5067
5068 return err;
5069 }
5070
5071 /* Verify the contents of a GIMPLE_TRANSACTION. Returns true if there
5072 is a problem, otherwise false. */
5073
5074 static bool
verify_gimple_transaction(gtransaction * stmt)5075 verify_gimple_transaction (gtransaction *stmt)
5076 {
5077 tree lab;
5078
5079 lab = gimple_transaction_label_norm (stmt);
5080 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5081 return true;
5082 lab = gimple_transaction_label_uninst (stmt);
5083 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5084 return true;
5085 lab = gimple_transaction_label_over (stmt);
5086 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5087 return true;
5088
5089 return verify_gimple_in_seq_2 (gimple_transaction_body (stmt));
5090 }
5091
5092
5093 /* Verify the GIMPLE statements inside the statement list STMTS. */
5094
5095 DEBUG_FUNCTION void
verify_gimple_in_seq(gimple_seq stmts)5096 verify_gimple_in_seq (gimple_seq stmts)
5097 {
5098 timevar_push (TV_TREE_STMT_VERIFY);
5099 if (verify_gimple_in_seq_2 (stmts))
5100 internal_error ("verify_gimple failed");
5101 timevar_pop (TV_TREE_STMT_VERIFY);
5102 }
5103
5104 /* Return true when the T can be shared. */
5105
5106 static bool
tree_node_can_be_shared(tree t)5107 tree_node_can_be_shared (tree t)
5108 {
5109 if (IS_TYPE_OR_DECL_P (t)
5110 || TREE_CODE (t) == SSA_NAME
5111 || TREE_CODE (t) == IDENTIFIER_NODE
5112 || TREE_CODE (t) == CASE_LABEL_EXPR
5113 || is_gimple_min_invariant (t))
5114 return true;
5115
5116 if (t == error_mark_node)
5117 return true;
5118
5119 return false;
5120 }
5121
5122 /* Called via walk_tree. Verify tree sharing. */
5123
5124 static tree
verify_node_sharing_1(tree * tp,int * walk_subtrees,void * data)5125 verify_node_sharing_1 (tree *tp, int *walk_subtrees, void *data)
5126 {
5127 hash_set<void *> *visited = (hash_set<void *> *) data;
5128
5129 if (tree_node_can_be_shared (*tp))
5130 {
5131 *walk_subtrees = false;
5132 return NULL;
5133 }
5134
5135 if (visited->add (*tp))
5136 return *tp;
5137
5138 return NULL;
5139 }
5140
5141 /* Called via walk_gimple_stmt. Verify tree sharing. */
5142
5143 static tree
verify_node_sharing(tree * tp,int * walk_subtrees,void * data)5144 verify_node_sharing (tree *tp, int *walk_subtrees, void *data)
5145 {
5146 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5147 return verify_node_sharing_1 (tp, walk_subtrees, wi->info);
5148 }
5149
5150 static bool eh_error_found;
5151 bool
verify_eh_throw_stmt_node(gimple * const & stmt,const int &,hash_set<gimple * > * visited)5152 verify_eh_throw_stmt_node (gimple *const &stmt, const int &,
5153 hash_set<gimple *> *visited)
5154 {
5155 if (!visited->contains (stmt))
5156 {
5157 error ("dead STMT in EH table");
5158 debug_gimple_stmt (stmt);
5159 eh_error_found = true;
5160 }
5161 return true;
5162 }
5163
5164 /* Verify if the location LOCs block is in BLOCKS. */
5165
5166 static bool
verify_location(hash_set<tree> * blocks,location_t loc)5167 verify_location (hash_set<tree> *blocks, location_t loc)
5168 {
5169 tree block = LOCATION_BLOCK (loc);
5170 if (block != NULL_TREE
5171 && !blocks->contains (block))
5172 {
5173 error ("location references block not in block tree");
5174 return true;
5175 }
5176 if (block != NULL_TREE)
5177 return verify_location (blocks, BLOCK_SOURCE_LOCATION (block));
5178 return false;
5179 }
5180
5181 /* Called via walk_tree. Verify that expressions have no blocks. */
5182
5183 static tree
verify_expr_no_block(tree * tp,int * walk_subtrees,void *)5184 verify_expr_no_block (tree *tp, int *walk_subtrees, void *)
5185 {
5186 if (!EXPR_P (*tp))
5187 {
5188 *walk_subtrees = false;
5189 return NULL;
5190 }
5191
5192 location_t loc = EXPR_LOCATION (*tp);
5193 if (LOCATION_BLOCK (loc) != NULL)
5194 return *tp;
5195
5196 return NULL;
5197 }
5198
5199 /* Called via walk_tree. Verify locations of expressions. */
5200
5201 static tree
verify_expr_location_1(tree * tp,int * walk_subtrees,void * data)5202 verify_expr_location_1 (tree *tp, int *walk_subtrees, void *data)
5203 {
5204 hash_set<tree> *blocks = (hash_set<tree> *) data;
5205 tree t = *tp;
5206
5207 /* ??? This doesn't really belong here but there's no good place to
5208 stick this remainder of old verify_expr. */
5209 /* ??? This barfs on debug stmts which contain binds to vars with
5210 different function context. */
5211 #if 0
5212 if (VAR_P (t)
5213 || TREE_CODE (t) == PARM_DECL
5214 || TREE_CODE (t) == RESULT_DECL)
5215 {
5216 tree context = decl_function_context (t);
5217 if (context != cfun->decl
5218 && !SCOPE_FILE_SCOPE_P (context)
5219 && !TREE_STATIC (t)
5220 && !DECL_EXTERNAL (t))
5221 {
5222 error ("local declaration from a different function");
5223 return t;
5224 }
5225 }
5226 #endif
5227
5228 if (VAR_P (t) && DECL_HAS_DEBUG_EXPR_P (t))
5229 {
5230 tree x = DECL_DEBUG_EXPR (t);
5231 tree addr = walk_tree (&x, verify_expr_no_block, NULL, NULL);
5232 if (addr)
5233 return addr;
5234 }
5235 if ((VAR_P (t)
5236 || TREE_CODE (t) == PARM_DECL
5237 || TREE_CODE (t) == RESULT_DECL)
5238 && DECL_HAS_VALUE_EXPR_P (t))
5239 {
5240 tree x = DECL_VALUE_EXPR (t);
5241 tree addr = walk_tree (&x, verify_expr_no_block, NULL, NULL);
5242 if (addr)
5243 return addr;
5244 }
5245
5246 if (!EXPR_P (t))
5247 {
5248 *walk_subtrees = false;
5249 return NULL;
5250 }
5251
5252 location_t loc = EXPR_LOCATION (t);
5253 if (verify_location (blocks, loc))
5254 return t;
5255
5256 return NULL;
5257 }
5258
5259 /* Called via walk_gimple_op. Verify locations of expressions. */
5260
5261 static tree
verify_expr_location(tree * tp,int * walk_subtrees,void * data)5262 verify_expr_location (tree *tp, int *walk_subtrees, void *data)
5263 {
5264 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5265 return verify_expr_location_1 (tp, walk_subtrees, wi->info);
5266 }
5267
5268 /* Insert all subblocks of BLOCK into BLOCKS and recurse. */
5269
5270 static void
collect_subblocks(hash_set<tree> * blocks,tree block)5271 collect_subblocks (hash_set<tree> *blocks, tree block)
5272 {
5273 tree t;
5274 for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
5275 {
5276 blocks->add (t);
5277 collect_subblocks (blocks, t);
5278 }
5279 }
5280
5281 /* Verify the GIMPLE statements in the CFG of FN. */
5282
5283 DEBUG_FUNCTION void
verify_gimple_in_cfg(struct function * fn,bool verify_nothrow)5284 verify_gimple_in_cfg (struct function *fn, bool verify_nothrow)
5285 {
5286 basic_block bb;
5287 bool err = false;
5288
5289 timevar_push (TV_TREE_STMT_VERIFY);
5290 hash_set<void *> visited;
5291 hash_set<gimple *> visited_throwing_stmts;
5292
5293 /* Collect all BLOCKs referenced by the BLOCK tree of FN. */
5294 hash_set<tree> blocks;
5295 if (DECL_INITIAL (fn->decl))
5296 {
5297 blocks.add (DECL_INITIAL (fn->decl));
5298 collect_subblocks (&blocks, DECL_INITIAL (fn->decl));
5299 }
5300
5301 FOR_EACH_BB_FN (bb, fn)
5302 {
5303 gimple_stmt_iterator gsi;
5304 edge_iterator ei;
5305 edge e;
5306
5307 for (gphi_iterator gpi = gsi_start_phis (bb);
5308 !gsi_end_p (gpi);
5309 gsi_next (&gpi))
5310 {
5311 gphi *phi = gpi.phi ();
5312 bool err2 = false;
5313 unsigned i;
5314
5315 if (gimple_bb (phi) != bb)
5316 {
5317 error ("gimple_bb (phi) is set to a wrong basic block");
5318 err2 = true;
5319 }
5320
5321 err2 |= verify_gimple_phi (phi);
5322
5323 /* Only PHI arguments have locations. */
5324 if (gimple_location (phi) != UNKNOWN_LOCATION)
5325 {
5326 error ("PHI node with location");
5327 err2 = true;
5328 }
5329
5330 for (i = 0; i < gimple_phi_num_args (phi); i++)
5331 {
5332 tree arg = gimple_phi_arg_def (phi, i);
5333 tree addr = walk_tree (&arg, verify_node_sharing_1,
5334 &visited, NULL);
5335 if (addr)
5336 {
5337 error ("incorrect sharing of tree nodes");
5338 debug_generic_expr (addr);
5339 err2 |= true;
5340 }
5341 location_t loc = gimple_phi_arg_location (phi, i);
5342 if (virtual_operand_p (gimple_phi_result (phi))
5343 && loc != UNKNOWN_LOCATION)
5344 {
5345 error ("virtual PHI with argument locations");
5346 err2 = true;
5347 }
5348 addr = walk_tree (&arg, verify_expr_location_1, &blocks, NULL);
5349 if (addr)
5350 {
5351 debug_generic_expr (addr);
5352 err2 = true;
5353 }
5354 err2 |= verify_location (&blocks, loc);
5355 }
5356
5357 if (err2)
5358 debug_gimple_stmt (phi);
5359 err |= err2;
5360 }
5361
5362 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5363 {
5364 gimple *stmt = gsi_stmt (gsi);
5365 bool err2 = false;
5366 struct walk_stmt_info wi;
5367 tree addr;
5368 int lp_nr;
5369
5370 if (gimple_bb (stmt) != bb)
5371 {
5372 error ("gimple_bb (stmt) is set to a wrong basic block");
5373 err2 = true;
5374 }
5375
5376 err2 |= verify_gimple_stmt (stmt);
5377 err2 |= verify_location (&blocks, gimple_location (stmt));
5378
5379 memset (&wi, 0, sizeof (wi));
5380 wi.info = (void *) &visited;
5381 addr = walk_gimple_op (stmt, verify_node_sharing, &wi);
5382 if (addr)
5383 {
5384 error ("incorrect sharing of tree nodes");
5385 debug_generic_expr (addr);
5386 err2 |= true;
5387 }
5388
5389 memset (&wi, 0, sizeof (wi));
5390 wi.info = (void *) &blocks;
5391 addr = walk_gimple_op (stmt, verify_expr_location, &wi);
5392 if (addr)
5393 {
5394 debug_generic_expr (addr);
5395 err2 |= true;
5396 }
5397
5398 /* If the statement is marked as part of an EH region, then it is
5399 expected that the statement could throw. Verify that when we
5400 have optimizations that simplify statements such that we prove
5401 that they cannot throw, that we update other data structures
5402 to match. */
5403 lp_nr = lookup_stmt_eh_lp (stmt);
5404 if (lp_nr != 0)
5405 visited_throwing_stmts.add (stmt);
5406 if (lp_nr > 0)
5407 {
5408 if (!stmt_could_throw_p (cfun, stmt))
5409 {
5410 if (verify_nothrow)
5411 {
5412 error ("statement marked for throw, but doesn%'t");
5413 err2 |= true;
5414 }
5415 }
5416 else if (!gsi_one_before_end_p (gsi))
5417 {
5418 error ("statement marked for throw in middle of block");
5419 err2 |= true;
5420 }
5421 }
5422
5423 if (err2)
5424 debug_gimple_stmt (stmt);
5425 err |= err2;
5426 }
5427
5428 FOR_EACH_EDGE (e, ei, bb->succs)
5429 if (e->goto_locus != UNKNOWN_LOCATION)
5430 err |= verify_location (&blocks, e->goto_locus);
5431 }
5432
5433 hash_map<gimple *, int> *eh_table = get_eh_throw_stmt_table (cfun);
5434 eh_error_found = false;
5435 if (eh_table)
5436 eh_table->traverse<hash_set<gimple *> *, verify_eh_throw_stmt_node>
5437 (&visited_throwing_stmts);
5438
5439 if (err || eh_error_found)
5440 internal_error ("verify_gimple failed");
5441
5442 verify_histograms ();
5443 timevar_pop (TV_TREE_STMT_VERIFY);
5444 }
5445
5446
5447 /* Verifies that the flow information is OK. */
5448
5449 static int
gimple_verify_flow_info(void)5450 gimple_verify_flow_info (void)
5451 {
5452 int err = 0;
5453 basic_block bb;
5454 gimple_stmt_iterator gsi;
5455 gimple *stmt;
5456 edge e;
5457 edge_iterator ei;
5458
5459 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
5460 || ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
5461 {
5462 error ("ENTRY_BLOCK has IL associated with it");
5463 err = 1;
5464 }
5465
5466 if (EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
5467 || EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
5468 {
5469 error ("EXIT_BLOCK has IL associated with it");
5470 err = 1;
5471 }
5472
5473 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
5474 if (e->flags & EDGE_FALLTHRU)
5475 {
5476 error ("fallthru to exit from bb %d", e->src->index);
5477 err = 1;
5478 }
5479
5480 FOR_EACH_BB_FN (bb, cfun)
5481 {
5482 bool found_ctrl_stmt = false;
5483
5484 stmt = NULL;
5485
5486 /* Skip labels on the start of basic block. */
5487 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5488 {
5489 tree label;
5490 gimple *prev_stmt = stmt;
5491
5492 stmt = gsi_stmt (gsi);
5493
5494 if (gimple_code (stmt) != GIMPLE_LABEL)
5495 break;
5496
5497 label = gimple_label_label (as_a <glabel *> (stmt));
5498 if (prev_stmt && DECL_NONLOCAL (label))
5499 {
5500 error ("nonlocal label ");
5501 print_generic_expr (stderr, label);
5502 fprintf (stderr, " is not first in a sequence of labels in bb %d",
5503 bb->index);
5504 err = 1;
5505 }
5506
5507 if (prev_stmt && EH_LANDING_PAD_NR (label) != 0)
5508 {
5509 error ("EH landing pad label ");
5510 print_generic_expr (stderr, label);
5511 fprintf (stderr, " is not first in a sequence of labels in bb %d",
5512 bb->index);
5513 err = 1;
5514 }
5515
5516 if (label_to_block (cfun, label) != bb)
5517 {
5518 error ("label ");
5519 print_generic_expr (stderr, label);
5520 fprintf (stderr, " to block does not match in bb %d",
5521 bb->index);
5522 err = 1;
5523 }
5524
5525 if (decl_function_context (label) != current_function_decl)
5526 {
5527 error ("label ");
5528 print_generic_expr (stderr, label);
5529 fprintf (stderr, " has incorrect context in bb %d",
5530 bb->index);
5531 err = 1;
5532 }
5533 }
5534
5535 /* Verify that body of basic block BB is free of control flow. */
5536 for (; !gsi_end_p (gsi); gsi_next (&gsi))
5537 {
5538 gimple *stmt = gsi_stmt (gsi);
5539
5540 if (found_ctrl_stmt)
5541 {
5542 error ("control flow in the middle of basic block %d",
5543 bb->index);
5544 err = 1;
5545 }
5546
5547 if (stmt_ends_bb_p (stmt))
5548 found_ctrl_stmt = true;
5549
5550 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
5551 {
5552 error ("label ");
5553 print_generic_expr (stderr, gimple_label_label (label_stmt));
5554 fprintf (stderr, " in the middle of basic block %d", bb->index);
5555 err = 1;
5556 }
5557 }
5558
5559 gsi = gsi_last_nondebug_bb (bb);
5560 if (gsi_end_p (gsi))
5561 continue;
5562
5563 stmt = gsi_stmt (gsi);
5564
5565 if (gimple_code (stmt) == GIMPLE_LABEL)
5566 continue;
5567
5568 err |= verify_eh_edges (stmt);
5569
5570 if (is_ctrl_stmt (stmt))
5571 {
5572 FOR_EACH_EDGE (e, ei, bb->succs)
5573 if (e->flags & EDGE_FALLTHRU)
5574 {
5575 error ("fallthru edge after a control statement in bb %d",
5576 bb->index);
5577 err = 1;
5578 }
5579 }
5580
5581 if (gimple_code (stmt) != GIMPLE_COND)
5582 {
5583 /* Verify that there are no edges with EDGE_TRUE/FALSE_FLAG set
5584 after anything else but if statement. */
5585 FOR_EACH_EDGE (e, ei, bb->succs)
5586 if (e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))
5587 {
5588 error ("true/false edge after a non-GIMPLE_COND in bb %d",
5589 bb->index);
5590 err = 1;
5591 }
5592 }
5593
5594 switch (gimple_code (stmt))
5595 {
5596 case GIMPLE_COND:
5597 {
5598 edge true_edge;
5599 edge false_edge;
5600
5601 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
5602
5603 if (!true_edge
5604 || !false_edge
5605 || !(true_edge->flags & EDGE_TRUE_VALUE)
5606 || !(false_edge->flags & EDGE_FALSE_VALUE)
5607 || (true_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL))
5608 || (false_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL))
5609 || EDGE_COUNT (bb->succs) >= 3)
5610 {
5611 error ("wrong outgoing edge flags at end of bb %d",
5612 bb->index);
5613 err = 1;
5614 }
5615 }
5616 break;
5617
5618 case GIMPLE_GOTO:
5619 if (simple_goto_p (stmt))
5620 {
5621 error ("explicit goto at end of bb %d", bb->index);
5622 err = 1;
5623 }
5624 else
5625 {
5626 /* FIXME. We should double check that the labels in the
5627 destination blocks have their address taken. */
5628 FOR_EACH_EDGE (e, ei, bb->succs)
5629 if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE
5630 | EDGE_FALSE_VALUE))
5631 || !(e->flags & EDGE_ABNORMAL))
5632 {
5633 error ("wrong outgoing edge flags at end of bb %d",
5634 bb->index);
5635 err = 1;
5636 }
5637 }
5638 break;
5639
5640 case GIMPLE_CALL:
5641 if (!gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
5642 break;
5643 /* fallthru */
5644 case GIMPLE_RETURN:
5645 if (!single_succ_p (bb)
5646 || (single_succ_edge (bb)->flags
5647 & (EDGE_FALLTHRU | EDGE_ABNORMAL
5648 | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
5649 {
5650 error ("wrong outgoing edge flags at end of bb %d", bb->index);
5651 err = 1;
5652 }
5653 if (single_succ (bb) != EXIT_BLOCK_PTR_FOR_FN (cfun))
5654 {
5655 error ("return edge does not point to exit in bb %d",
5656 bb->index);
5657 err = 1;
5658 }
5659 break;
5660
5661 case GIMPLE_SWITCH:
5662 {
5663 gswitch *switch_stmt = as_a <gswitch *> (stmt);
5664 tree prev;
5665 edge e;
5666 size_t i, n;
5667
5668 n = gimple_switch_num_labels (switch_stmt);
5669
5670 /* Mark all the destination basic blocks. */
5671 for (i = 0; i < n; ++i)
5672 {
5673 basic_block label_bb = gimple_switch_label_bb (cfun, switch_stmt, i);
5674 gcc_assert (!label_bb->aux || label_bb->aux == (void *)1);
5675 label_bb->aux = (void *)1;
5676 }
5677
5678 /* Verify that the case labels are sorted. */
5679 prev = gimple_switch_label (switch_stmt, 0);
5680 for (i = 1; i < n; ++i)
5681 {
5682 tree c = gimple_switch_label (switch_stmt, i);
5683 if (!CASE_LOW (c))
5684 {
5685 error ("found default case not at the start of "
5686 "case vector");
5687 err = 1;
5688 continue;
5689 }
5690 if (CASE_LOW (prev)
5691 && !tree_int_cst_lt (CASE_LOW (prev), CASE_LOW (c)))
5692 {
5693 error ("case labels not sorted: ");
5694 print_generic_expr (stderr, prev);
5695 fprintf (stderr," is greater than ");
5696 print_generic_expr (stderr, c);
5697 fprintf (stderr," but comes before it.\n");
5698 err = 1;
5699 }
5700 prev = c;
5701 }
5702 /* VRP will remove the default case if it can prove it will
5703 never be executed. So do not verify there always exists
5704 a default case here. */
5705
5706 FOR_EACH_EDGE (e, ei, bb->succs)
5707 {
5708 if (!e->dest->aux)
5709 {
5710 error ("extra outgoing edge %d->%d",
5711 bb->index, e->dest->index);
5712 err = 1;
5713 }
5714
5715 e->dest->aux = (void *)2;
5716 if ((e->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL
5717 | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
5718 {
5719 error ("wrong outgoing edge flags at end of bb %d",
5720 bb->index);
5721 err = 1;
5722 }
5723 }
5724
5725 /* Check that we have all of them. */
5726 for (i = 0; i < n; ++i)
5727 {
5728 basic_block label_bb = gimple_switch_label_bb (cfun,
5729 switch_stmt, i);
5730
5731 if (label_bb->aux != (void *)2)
5732 {
5733 error ("missing edge %i->%i", bb->index, label_bb->index);
5734 err = 1;
5735 }
5736 }
5737
5738 FOR_EACH_EDGE (e, ei, bb->succs)
5739 e->dest->aux = (void *)0;
5740 }
5741 break;
5742
5743 case GIMPLE_EH_DISPATCH:
5744 err |= verify_eh_dispatch_edge (as_a <geh_dispatch *> (stmt));
5745 break;
5746
5747 default:
5748 break;
5749 }
5750 }
5751
5752 if (dom_info_state (CDI_DOMINATORS) >= DOM_NO_FAST_QUERY)
5753 verify_dominators (CDI_DOMINATORS);
5754
5755 return err;
5756 }
5757
5758
5759 /* Updates phi nodes after creating a forwarder block joined
5760 by edge FALLTHRU. */
5761
5762 static void
gimple_make_forwarder_block(edge fallthru)5763 gimple_make_forwarder_block (edge fallthru)
5764 {
5765 edge e;
5766 edge_iterator ei;
5767 basic_block dummy, bb;
5768 tree var;
5769 gphi_iterator gsi;
5770
5771 dummy = fallthru->src;
5772 bb = fallthru->dest;
5773
5774 if (single_pred_p (bb))
5775 return;
5776
5777 /* If we redirected a branch we must create new PHI nodes at the
5778 start of BB. */
5779 for (gsi = gsi_start_phis (dummy); !gsi_end_p (gsi); gsi_next (&gsi))
5780 {
5781 gphi *phi, *new_phi;
5782
5783 phi = gsi.phi ();
5784 var = gimple_phi_result (phi);
5785 new_phi = create_phi_node (var, bb);
5786 gimple_phi_set_result (phi, copy_ssa_name (var, phi));
5787 add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
5788 UNKNOWN_LOCATION);
5789 }
5790
5791 /* Add the arguments we have stored on edges. */
5792 FOR_EACH_EDGE (e, ei, bb->preds)
5793 {
5794 if (e == fallthru)
5795 continue;
5796
5797 flush_pending_stmts (e);
5798 }
5799 }
5800
5801
5802 /* Return a non-special label in the head of basic block BLOCK.
5803 Create one if it doesn't exist. */
5804
5805 tree
gimple_block_label(basic_block bb)5806 gimple_block_label (basic_block bb)
5807 {
5808 gimple_stmt_iterator i, s = gsi_start_bb (bb);
5809 bool first = true;
5810 tree label;
5811 glabel *stmt;
5812
5813 for (i = s; !gsi_end_p (i); first = false, gsi_next (&i))
5814 {
5815 stmt = dyn_cast <glabel *> (gsi_stmt (i));
5816 if (!stmt)
5817 break;
5818 label = gimple_label_label (stmt);
5819 if (!DECL_NONLOCAL (label))
5820 {
5821 if (!first)
5822 gsi_move_before (&i, &s);
5823 return label;
5824 }
5825 }
5826
5827 label = create_artificial_label (UNKNOWN_LOCATION);
5828 stmt = gimple_build_label (label);
5829 gsi_insert_before (&s, stmt, GSI_NEW_STMT);
5830 return label;
5831 }
5832
5833
5834 /* Attempt to perform edge redirection by replacing a possibly complex
5835 jump instruction by a goto or by removing the jump completely.
5836 This can apply only if all edges now point to the same block. The
5837 parameters and return values are equivalent to
5838 redirect_edge_and_branch. */
5839
5840 static edge
gimple_try_redirect_by_replacing_jump(edge e,basic_block target)5841 gimple_try_redirect_by_replacing_jump (edge e, basic_block target)
5842 {
5843 basic_block src = e->src;
5844 gimple_stmt_iterator i;
5845 gimple *stmt;
5846
5847 /* We can replace or remove a complex jump only when we have exactly
5848 two edges. */
5849 if (EDGE_COUNT (src->succs) != 2
5850 /* Verify that all targets will be TARGET. Specifically, the
5851 edge that is not E must also go to TARGET. */
5852 || EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target)
5853 return NULL;
5854
5855 i = gsi_last_bb (src);
5856 if (gsi_end_p (i))
5857 return NULL;
5858
5859 stmt = gsi_stmt (i);
5860
5861 if (gimple_code (stmt) == GIMPLE_COND || gimple_code (stmt) == GIMPLE_SWITCH)
5862 {
5863 gsi_remove (&i, true);
5864 e = ssa_redirect_edge (e, target);
5865 e->flags = EDGE_FALLTHRU;
5866 return e;
5867 }
5868
5869 return NULL;
5870 }
5871
5872
5873 /* Redirect E to DEST. Return NULL on failure. Otherwise, return the
5874 edge representing the redirected branch. */
5875
5876 static edge
gimple_redirect_edge_and_branch(edge e,basic_block dest)5877 gimple_redirect_edge_and_branch (edge e, basic_block dest)
5878 {
5879 basic_block bb = e->src;
5880 gimple_stmt_iterator gsi;
5881 edge ret;
5882 gimple *stmt;
5883
5884 if (e->flags & EDGE_ABNORMAL)
5885 return NULL;
5886
5887 if (e->dest == dest)
5888 return NULL;
5889
5890 if (e->flags & EDGE_EH)
5891 return redirect_eh_edge (e, dest);
5892
5893 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
5894 {
5895 ret = gimple_try_redirect_by_replacing_jump (e, dest);
5896 if (ret)
5897 return ret;
5898 }
5899
5900 gsi = gsi_last_nondebug_bb (bb);
5901 stmt = gsi_end_p (gsi) ? NULL : gsi_stmt (gsi);
5902
5903 switch (stmt ? gimple_code (stmt) : GIMPLE_ERROR_MARK)
5904 {
5905 case GIMPLE_COND:
5906 /* For COND_EXPR, we only need to redirect the edge. */
5907 break;
5908
5909 case GIMPLE_GOTO:
5910 /* No non-abnormal edges should lead from a non-simple goto, and
5911 simple ones should be represented implicitly. */
5912 gcc_unreachable ();
5913
5914 case GIMPLE_SWITCH:
5915 {
5916 gswitch *switch_stmt = as_a <gswitch *> (stmt);
5917 tree label = gimple_block_label (dest);
5918 tree cases = get_cases_for_edge (e, switch_stmt);
5919
5920 /* If we have a list of cases associated with E, then use it
5921 as it's a lot faster than walking the entire case vector. */
5922 if (cases)
5923 {
5924 edge e2 = find_edge (e->src, dest);
5925 tree last, first;
5926
5927 first = cases;
5928 while (cases)
5929 {
5930 last = cases;
5931 CASE_LABEL (cases) = label;
5932 cases = CASE_CHAIN (cases);
5933 }
5934
5935 /* If there was already an edge in the CFG, then we need
5936 to move all the cases associated with E to E2. */
5937 if (e2)
5938 {
5939 tree cases2 = get_cases_for_edge (e2, switch_stmt);
5940
5941 CASE_CHAIN (last) = CASE_CHAIN (cases2);
5942 CASE_CHAIN (cases2) = first;
5943 }
5944 bitmap_set_bit (touched_switch_bbs, gimple_bb (stmt)->index);
5945 }
5946 else
5947 {
5948 size_t i, n = gimple_switch_num_labels (switch_stmt);
5949
5950 for (i = 0; i < n; i++)
5951 {
5952 tree elt = gimple_switch_label (switch_stmt, i);
5953 if (label_to_block (cfun, CASE_LABEL (elt)) == e->dest)
5954 CASE_LABEL (elt) = label;
5955 }
5956 }
5957 }
5958 break;
5959
5960 case GIMPLE_ASM:
5961 {
5962 gasm *asm_stmt = as_a <gasm *> (stmt);
5963 int i, n = gimple_asm_nlabels (asm_stmt);
5964 tree label = NULL;
5965
5966 for (i = 0; i < n; ++i)
5967 {
5968 tree cons = gimple_asm_label_op (asm_stmt, i);
5969 if (label_to_block (cfun, TREE_VALUE (cons)) == e->dest)
5970 {
5971 if (!label)
5972 label = gimple_block_label (dest);
5973 TREE_VALUE (cons) = label;
5974 }
5975 }
5976
5977 /* If we didn't find any label matching the former edge in the
5978 asm labels, we must be redirecting the fallthrough
5979 edge. */
5980 gcc_assert (label || (e->flags & EDGE_FALLTHRU));
5981 }
5982 break;
5983
5984 case GIMPLE_RETURN:
5985 gsi_remove (&gsi, true);
5986 e->flags |= EDGE_FALLTHRU;
5987 break;
5988
5989 case GIMPLE_OMP_RETURN:
5990 case GIMPLE_OMP_CONTINUE:
5991 case GIMPLE_OMP_SECTIONS_SWITCH:
5992 case GIMPLE_OMP_FOR:
5993 /* The edges from OMP constructs can be simply redirected. */
5994 break;
5995
5996 case GIMPLE_EH_DISPATCH:
5997 if (!(e->flags & EDGE_FALLTHRU))
5998 redirect_eh_dispatch_edge (as_a <geh_dispatch *> (stmt), e, dest);
5999 break;
6000
6001 case GIMPLE_TRANSACTION:
6002 if (e->flags & EDGE_TM_ABORT)
6003 gimple_transaction_set_label_over (as_a <gtransaction *> (stmt),
6004 gimple_block_label (dest));
6005 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
6006 gimple_transaction_set_label_uninst (as_a <gtransaction *> (stmt),
6007 gimple_block_label (dest));
6008 else
6009 gimple_transaction_set_label_norm (as_a <gtransaction *> (stmt),
6010 gimple_block_label (dest));
6011 break;
6012
6013 default:
6014 /* Otherwise it must be a fallthru edge, and we don't need to
6015 do anything besides redirecting it. */
6016 gcc_assert (e->flags & EDGE_FALLTHRU);
6017 break;
6018 }
6019
6020 /* Update/insert PHI nodes as necessary. */
6021
6022 /* Now update the edges in the CFG. */
6023 e = ssa_redirect_edge (e, dest);
6024
6025 return e;
6026 }
6027
6028 /* Returns true if it is possible to remove edge E by redirecting
6029 it to the destination of the other edge from E->src. */
6030
6031 static bool
gimple_can_remove_branch_p(const_edge e)6032 gimple_can_remove_branch_p (const_edge e)
6033 {
6034 if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
6035 return false;
6036
6037 return true;
6038 }
6039
6040 /* Simple wrapper, as we can always redirect fallthru edges. */
6041
6042 static basic_block
gimple_redirect_edge_and_branch_force(edge e,basic_block dest)6043 gimple_redirect_edge_and_branch_force (edge e, basic_block dest)
6044 {
6045 e = gimple_redirect_edge_and_branch (e, dest);
6046 gcc_assert (e);
6047
6048 return NULL;
6049 }
6050
6051
6052 /* Splits basic block BB after statement STMT (but at least after the
6053 labels). If STMT is NULL, BB is split just after the labels. */
6054
6055 static basic_block
gimple_split_block(basic_block bb,void * stmt)6056 gimple_split_block (basic_block bb, void *stmt)
6057 {
6058 gimple_stmt_iterator gsi;
6059 gimple_stmt_iterator gsi_tgt;
6060 gimple_seq list;
6061 basic_block new_bb;
6062 edge e;
6063 edge_iterator ei;
6064
6065 new_bb = create_empty_bb (bb);
6066
6067 /* Redirect the outgoing edges. */
6068 new_bb->succs = bb->succs;
6069 bb->succs = NULL;
6070 FOR_EACH_EDGE (e, ei, new_bb->succs)
6071 e->src = new_bb;
6072
6073 /* Get a stmt iterator pointing to the first stmt to move. */
6074 if (!stmt || gimple_code ((gimple *) stmt) == GIMPLE_LABEL)
6075 gsi = gsi_after_labels (bb);
6076 else
6077 {
6078 gsi = gsi_for_stmt ((gimple *) stmt);
6079 gsi_next (&gsi);
6080 }
6081
6082 /* Move everything from GSI to the new basic block. */
6083 if (gsi_end_p (gsi))
6084 return new_bb;
6085
6086 /* Split the statement list - avoid re-creating new containers as this
6087 brings ugly quadratic memory consumption in the inliner.
6088 (We are still quadratic since we need to update stmt BB pointers,
6089 sadly.) */
6090 gsi_split_seq_before (&gsi, &list);
6091 set_bb_seq (new_bb, list);
6092 for (gsi_tgt = gsi_start (list);
6093 !gsi_end_p (gsi_tgt); gsi_next (&gsi_tgt))
6094 gimple_set_bb (gsi_stmt (gsi_tgt), new_bb);
6095
6096 return new_bb;
6097 }
6098
6099
6100 /* Moves basic block BB after block AFTER. */
6101
6102 static bool
gimple_move_block_after(basic_block bb,basic_block after)6103 gimple_move_block_after (basic_block bb, basic_block after)
6104 {
6105 if (bb->prev_bb == after)
6106 return true;
6107
6108 unlink_block (bb);
6109 link_block (bb, after);
6110
6111 return true;
6112 }
6113
6114
6115 /* Return TRUE if block BB has no executable statements, otherwise return
6116 FALSE. */
6117
6118 static bool
gimple_empty_block_p(basic_block bb)6119 gimple_empty_block_p (basic_block bb)
6120 {
6121 /* BB must have no executable statements. */
6122 gimple_stmt_iterator gsi = gsi_after_labels (bb);
6123 if (phi_nodes (bb))
6124 return false;
6125 while (!gsi_end_p (gsi))
6126 {
6127 gimple *stmt = gsi_stmt (gsi);
6128 if (is_gimple_debug (stmt))
6129 ;
6130 else if (gimple_code (stmt) == GIMPLE_NOP
6131 || gimple_code (stmt) == GIMPLE_PREDICT)
6132 ;
6133 else
6134 return false;
6135 gsi_next (&gsi);
6136 }
6137 return true;
6138 }
6139
6140
6141 /* Split a basic block if it ends with a conditional branch and if the
6142 other part of the block is not empty. */
6143
6144 static basic_block
gimple_split_block_before_cond_jump(basic_block bb)6145 gimple_split_block_before_cond_jump (basic_block bb)
6146 {
6147 gimple *last, *split_point;
6148 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
6149 if (gsi_end_p (gsi))
6150 return NULL;
6151 last = gsi_stmt (gsi);
6152 if (gimple_code (last) != GIMPLE_COND
6153 && gimple_code (last) != GIMPLE_SWITCH)
6154 return NULL;
6155 gsi_prev (&gsi);
6156 split_point = gsi_stmt (gsi);
6157 return split_block (bb, split_point)->dest;
6158 }
6159
6160
6161 /* Return true if basic_block can be duplicated. */
6162
6163 static bool
gimple_can_duplicate_bb_p(const_basic_block bb ATTRIBUTE_UNUSED)6164 gimple_can_duplicate_bb_p (const_basic_block bb ATTRIBUTE_UNUSED)
6165 {
6166 return true;
6167 }
6168
6169 /* Create a duplicate of the basic block BB. NOTE: This does not
6170 preserve SSA form. */
6171
6172 static basic_block
gimple_duplicate_bb(basic_block bb,copy_bb_data * id)6173 gimple_duplicate_bb (basic_block bb, copy_bb_data *id)
6174 {
6175 basic_block new_bb;
6176 gimple_stmt_iterator gsi_tgt;
6177
6178 new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
6179
6180 /* Copy the PHI nodes. We ignore PHI node arguments here because
6181 the incoming edges have not been setup yet. */
6182 for (gphi_iterator gpi = gsi_start_phis (bb);
6183 !gsi_end_p (gpi);
6184 gsi_next (&gpi))
6185 {
6186 gphi *phi, *copy;
6187 phi = gpi.phi ();
6188 copy = create_phi_node (NULL_TREE, new_bb);
6189 create_new_def_for (gimple_phi_result (phi), copy,
6190 gimple_phi_result_ptr (copy));
6191 gimple_set_uid (copy, gimple_uid (phi));
6192 }
6193
6194 gsi_tgt = gsi_start_bb (new_bb);
6195 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
6196 !gsi_end_p (gsi);
6197 gsi_next (&gsi))
6198 {
6199 def_operand_p def_p;
6200 ssa_op_iter op_iter;
6201 tree lhs;
6202 gimple *stmt, *copy;
6203
6204 stmt = gsi_stmt (gsi);
6205 if (gimple_code (stmt) == GIMPLE_LABEL)
6206 continue;
6207
6208 /* Don't duplicate label debug stmts. */
6209 if (gimple_debug_bind_p (stmt)
6210 && TREE_CODE (gimple_debug_bind_get_var (stmt))
6211 == LABEL_DECL)
6212 continue;
6213
6214 /* Create a new copy of STMT and duplicate STMT's virtual
6215 operands. */
6216 copy = gimple_copy (stmt);
6217 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
6218
6219 maybe_duplicate_eh_stmt (copy, stmt);
6220 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
6221
6222 /* When copying around a stmt writing into a local non-user
6223 aggregate, make sure it won't share stack slot with other
6224 vars. */
6225 lhs = gimple_get_lhs (stmt);
6226 if (lhs && TREE_CODE (lhs) != SSA_NAME)
6227 {
6228 tree base = get_base_address (lhs);
6229 if (base
6230 && (VAR_P (base) || TREE_CODE (base) == RESULT_DECL)
6231 && DECL_IGNORED_P (base)
6232 && !TREE_STATIC (base)
6233 && !DECL_EXTERNAL (base)
6234 && (!VAR_P (base) || !DECL_HAS_VALUE_EXPR_P (base)))
6235 DECL_NONSHAREABLE (base) = 1;
6236 }
6237
6238 /* If requested remap dependence info of cliques brought in
6239 via inlining. */
6240 if (id)
6241 for (unsigned i = 0; i < gimple_num_ops (copy); ++i)
6242 {
6243 tree op = gimple_op (copy, i);
6244 if (!op)
6245 continue;
6246 if (TREE_CODE (op) == ADDR_EXPR
6247 || TREE_CODE (op) == WITH_SIZE_EXPR)
6248 op = TREE_OPERAND (op, 0);
6249 while (handled_component_p (op))
6250 op = TREE_OPERAND (op, 0);
6251 if ((TREE_CODE (op) == MEM_REF
6252 || TREE_CODE (op) == TARGET_MEM_REF)
6253 && MR_DEPENDENCE_CLIQUE (op) > 1
6254 && MR_DEPENDENCE_CLIQUE (op) != bb->loop_father->owned_clique)
6255 {
6256 if (!id->dependence_map)
6257 id->dependence_map = new hash_map<dependence_hash,
6258 unsigned short>;
6259 bool existed;
6260 unsigned short &newc = id->dependence_map->get_or_insert
6261 (MR_DEPENDENCE_CLIQUE (op), &existed);
6262 if (!existed)
6263 {
6264 gcc_assert (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
6265 newc = ++cfun->last_clique;
6266 }
6267 MR_DEPENDENCE_CLIQUE (op) = newc;
6268 }
6269 }
6270
6271 /* Create new names for all the definitions created by COPY and
6272 add replacement mappings for each new name. */
6273 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
6274 create_new_def_for (DEF_FROM_PTR (def_p), copy, def_p);
6275 }
6276
6277 return new_bb;
6278 }
6279
6280 /* Adds phi node arguments for edge E_COPY after basic block duplication. */
6281
6282 static void
add_phi_args_after_copy_edge(edge e_copy)6283 add_phi_args_after_copy_edge (edge e_copy)
6284 {
6285 basic_block bb, bb_copy = e_copy->src, dest;
6286 edge e;
6287 edge_iterator ei;
6288 gphi *phi, *phi_copy;
6289 tree def;
6290 gphi_iterator psi, psi_copy;
6291
6292 if (gimple_seq_empty_p (phi_nodes (e_copy->dest)))
6293 return;
6294
6295 bb = bb_copy->flags & BB_DUPLICATED ? get_bb_original (bb_copy) : bb_copy;
6296
6297 if (e_copy->dest->flags & BB_DUPLICATED)
6298 dest = get_bb_original (e_copy->dest);
6299 else
6300 dest = e_copy->dest;
6301
6302 e = find_edge (bb, dest);
6303 if (!e)
6304 {
6305 /* During loop unrolling the target of the latch edge is copied.
6306 In this case we are not looking for edge to dest, but to
6307 duplicated block whose original was dest. */
6308 FOR_EACH_EDGE (e, ei, bb->succs)
6309 {
6310 if ((e->dest->flags & BB_DUPLICATED)
6311 && get_bb_original (e->dest) == dest)
6312 break;
6313 }
6314
6315 gcc_assert (e != NULL);
6316 }
6317
6318 for (psi = gsi_start_phis (e->dest),
6319 psi_copy = gsi_start_phis (e_copy->dest);
6320 !gsi_end_p (psi);
6321 gsi_next (&psi), gsi_next (&psi_copy))
6322 {
6323 phi = psi.phi ();
6324 phi_copy = psi_copy.phi ();
6325 def = PHI_ARG_DEF_FROM_EDGE (phi, e);
6326 add_phi_arg (phi_copy, def, e_copy,
6327 gimple_phi_arg_location_from_edge (phi, e));
6328 }
6329 }
6330
6331
6332 /* Basic block BB_COPY was created by code duplication. Add phi node
6333 arguments for edges going out of BB_COPY. The blocks that were
6334 duplicated have BB_DUPLICATED set. */
6335
6336 void
add_phi_args_after_copy_bb(basic_block bb_copy)6337 add_phi_args_after_copy_bb (basic_block bb_copy)
6338 {
6339 edge e_copy;
6340 edge_iterator ei;
6341
6342 FOR_EACH_EDGE (e_copy, ei, bb_copy->succs)
6343 {
6344 add_phi_args_after_copy_edge (e_copy);
6345 }
6346 }
6347
6348 /* Blocks in REGION_COPY array of length N_REGION were created by
6349 duplication of basic blocks. Add phi node arguments for edges
6350 going from these blocks. If E_COPY is not NULL, also add
6351 phi node arguments for its destination.*/
6352
6353 void
add_phi_args_after_copy(basic_block * region_copy,unsigned n_region,edge e_copy)6354 add_phi_args_after_copy (basic_block *region_copy, unsigned n_region,
6355 edge e_copy)
6356 {
6357 unsigned i;
6358
6359 for (i = 0; i < n_region; i++)
6360 region_copy[i]->flags |= BB_DUPLICATED;
6361
6362 for (i = 0; i < n_region; i++)
6363 add_phi_args_after_copy_bb (region_copy[i]);
6364 if (e_copy)
6365 add_phi_args_after_copy_edge (e_copy);
6366
6367 for (i = 0; i < n_region; i++)
6368 region_copy[i]->flags &= ~BB_DUPLICATED;
6369 }
6370
6371 /* Duplicates a REGION (set of N_REGION basic blocks) with just a single
6372 important exit edge EXIT. By important we mean that no SSA name defined
6373 inside region is live over the other exit edges of the region. All entry
6374 edges to the region must go to ENTRY->dest. The edge ENTRY is redirected
6375 to the duplicate of the region. Dominance and loop information is
6376 updated if UPDATE_DOMINANCE is true, but not the SSA web. If
6377 UPDATE_DOMINANCE is false then we assume that the caller will update the
6378 dominance information after calling this function. The new basic
6379 blocks are stored to REGION_COPY in the same order as they had in REGION,
6380 provided that REGION_COPY is not NULL.
6381 The function returns false if it is unable to copy the region,
6382 true otherwise. */
6383
6384 bool
gimple_duplicate_sese_region(edge entry,edge exit,basic_block * region,unsigned n_region,basic_block * region_copy,bool update_dominance)6385 gimple_duplicate_sese_region (edge entry, edge exit,
6386 basic_block *region, unsigned n_region,
6387 basic_block *region_copy,
6388 bool update_dominance)
6389 {
6390 unsigned i;
6391 bool free_region_copy = false, copying_header = false;
6392 struct loop *loop = entry->dest->loop_father;
6393 edge exit_copy;
6394 vec<basic_block> doms = vNULL;
6395 edge redirected;
6396 profile_count total_count = profile_count::uninitialized ();
6397 profile_count entry_count = profile_count::uninitialized ();
6398
6399 if (!can_copy_bbs_p (region, n_region))
6400 return false;
6401
6402 /* Some sanity checking. Note that we do not check for all possible
6403 missuses of the functions. I.e. if you ask to copy something weird,
6404 it will work, but the state of structures probably will not be
6405 correct. */
6406 for (i = 0; i < n_region; i++)
6407 {
6408 /* We do not handle subloops, i.e. all the blocks must belong to the
6409 same loop. */
6410 if (region[i]->loop_father != loop)
6411 return false;
6412
6413 if (region[i] != entry->dest
6414 && region[i] == loop->header)
6415 return false;
6416 }
6417
6418 /* In case the function is used for loop header copying (which is the primary
6419 use), ensure that EXIT and its copy will be new latch and entry edges. */
6420 if (loop->header == entry->dest)
6421 {
6422 copying_header = true;
6423
6424 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
6425 return false;
6426
6427 for (i = 0; i < n_region; i++)
6428 if (region[i] != exit->src
6429 && dominated_by_p (CDI_DOMINATORS, region[i], exit->src))
6430 return false;
6431 }
6432
6433 initialize_original_copy_tables ();
6434
6435 if (copying_header)
6436 set_loop_copy (loop, loop_outer (loop));
6437 else
6438 set_loop_copy (loop, loop);
6439
6440 if (!region_copy)
6441 {
6442 region_copy = XNEWVEC (basic_block, n_region);
6443 free_region_copy = true;
6444 }
6445
6446 /* Record blocks outside the region that are dominated by something
6447 inside. */
6448 if (update_dominance)
6449 {
6450 doms.create (0);
6451 doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
6452 }
6453
6454 if (entry->dest->count.initialized_p ())
6455 {
6456 total_count = entry->dest->count;
6457 entry_count = entry->count ();
6458 /* Fix up corner cases, to avoid division by zero or creation of negative
6459 frequencies. */
6460 if (entry_count > total_count)
6461 entry_count = total_count;
6462 }
6463
6464 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
6465 split_edge_bb_loc (entry), update_dominance);
6466 if (total_count.initialized_p () && entry_count.initialized_p ())
6467 {
6468 scale_bbs_frequencies_profile_count (region, n_region,
6469 total_count - entry_count,
6470 total_count);
6471 scale_bbs_frequencies_profile_count (region_copy, n_region, entry_count,
6472 total_count);
6473 }
6474
6475 if (copying_header)
6476 {
6477 loop->header = exit->dest;
6478 loop->latch = exit->src;
6479 }
6480
6481 /* Redirect the entry and add the phi node arguments. */
6482 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
6483 gcc_assert (redirected != NULL);
6484 flush_pending_stmts (entry);
6485
6486 /* Concerning updating of dominators: We must recount dominators
6487 for entry block and its copy. Anything that is outside of the
6488 region, but was dominated by something inside needs recounting as
6489 well. */
6490 if (update_dominance)
6491 {
6492 set_immediate_dominator (CDI_DOMINATORS, entry->dest, entry->src);
6493 doms.safe_push (get_bb_original (entry->dest));
6494 iterate_fix_dominators (CDI_DOMINATORS, doms, false);
6495 doms.release ();
6496 }
6497
6498 /* Add the other PHI node arguments. */
6499 add_phi_args_after_copy (region_copy, n_region, NULL);
6500
6501 if (free_region_copy)
6502 free (region_copy);
6503
6504 free_original_copy_tables ();
6505 return true;
6506 }
6507
6508 /* Checks if BB is part of the region defined by N_REGION BBS. */
6509 static bool
bb_part_of_region_p(basic_block bb,basic_block * bbs,unsigned n_region)6510 bb_part_of_region_p (basic_block bb, basic_block* bbs, unsigned n_region)
6511 {
6512 unsigned int n;
6513
6514 for (n = 0; n < n_region; n++)
6515 {
6516 if (bb == bbs[n])
6517 return true;
6518 }
6519 return false;
6520 }
6521
6522 /* Duplicates REGION consisting of N_REGION blocks. The new blocks
6523 are stored to REGION_COPY in the same order in that they appear
6524 in REGION, if REGION_COPY is not NULL. ENTRY is the entry to
6525 the region, EXIT an exit from it. The condition guarding EXIT
6526 is moved to ENTRY. Returns true if duplication succeeds, false
6527 otherwise.
6528
6529 For example,
6530
6531 some_code;
6532 if (cond)
6533 A;
6534 else
6535 B;
6536
6537 is transformed to
6538
6539 if (cond)
6540 {
6541 some_code;
6542 A;
6543 }
6544 else
6545 {
6546 some_code;
6547 B;
6548 }
6549 */
6550
6551 bool
gimple_duplicate_sese_tail(edge entry,edge exit,basic_block * region,unsigned n_region,basic_block * region_copy)6552 gimple_duplicate_sese_tail (edge entry, edge exit,
6553 basic_block *region, unsigned n_region,
6554 basic_block *region_copy)
6555 {
6556 unsigned i;
6557 bool free_region_copy = false;
6558 struct loop *loop = exit->dest->loop_father;
6559 struct loop *orig_loop = entry->dest->loop_father;
6560 basic_block switch_bb, entry_bb, nentry_bb;
6561 vec<basic_block> doms;
6562 profile_count total_count = profile_count::uninitialized (),
6563 exit_count = profile_count::uninitialized ();
6564 edge exits[2], nexits[2], e;
6565 gimple_stmt_iterator gsi;
6566 gimple *cond_stmt;
6567 edge sorig, snew;
6568 basic_block exit_bb;
6569 gphi_iterator psi;
6570 gphi *phi;
6571 tree def;
6572 struct loop *target, *aloop, *cloop;
6573
6574 gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
6575 exits[0] = exit;
6576 exits[1] = EDGE_SUCC (exit->src, EDGE_SUCC (exit->src, 0) == exit);
6577
6578 if (!can_copy_bbs_p (region, n_region))
6579 return false;
6580
6581 initialize_original_copy_tables ();
6582 set_loop_copy (orig_loop, loop);
6583
6584 target= loop;
6585 for (aloop = orig_loop->inner; aloop; aloop = aloop->next)
6586 {
6587 if (bb_part_of_region_p (aloop->header, region, n_region))
6588 {
6589 cloop = duplicate_loop (aloop, target);
6590 duplicate_subloops (aloop, cloop);
6591 }
6592 }
6593
6594 if (!region_copy)
6595 {
6596 region_copy = XNEWVEC (basic_block, n_region);
6597 free_region_copy = true;
6598 }
6599
6600 gcc_assert (!need_ssa_update_p (cfun));
6601
6602 /* Record blocks outside the region that are dominated by something
6603 inside. */
6604 doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
6605
6606 total_count = exit->src->count;
6607 exit_count = exit->count ();
6608 /* Fix up corner cases, to avoid division by zero or creation of negative
6609 frequencies. */
6610 if (exit_count > total_count)
6611 exit_count = total_count;
6612
6613 copy_bbs (region, n_region, region_copy, exits, 2, nexits, orig_loop,
6614 split_edge_bb_loc (exit), true);
6615 if (total_count.initialized_p () && exit_count.initialized_p ())
6616 {
6617 scale_bbs_frequencies_profile_count (region, n_region,
6618 total_count - exit_count,
6619 total_count);
6620 scale_bbs_frequencies_profile_count (region_copy, n_region, exit_count,
6621 total_count);
6622 }
6623
6624 /* Create the switch block, and put the exit condition to it. */
6625 entry_bb = entry->dest;
6626 nentry_bb = get_bb_copy (entry_bb);
6627 if (!last_stmt (entry->src)
6628 || !stmt_ends_bb_p (last_stmt (entry->src)))
6629 switch_bb = entry->src;
6630 else
6631 switch_bb = split_edge (entry);
6632 set_immediate_dominator (CDI_DOMINATORS, nentry_bb, switch_bb);
6633
6634 gsi = gsi_last_bb (switch_bb);
6635 cond_stmt = last_stmt (exit->src);
6636 gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND);
6637 cond_stmt = gimple_copy (cond_stmt);
6638
6639 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
6640
6641 sorig = single_succ_edge (switch_bb);
6642 sorig->flags = exits[1]->flags;
6643 sorig->probability = exits[1]->probability;
6644 snew = make_edge (switch_bb, nentry_bb, exits[0]->flags);
6645 snew->probability = exits[0]->probability;
6646
6647
6648 /* Register the new edge from SWITCH_BB in loop exit lists. */
6649 rescan_loop_exit (snew, true, false);
6650
6651 /* Add the PHI node arguments. */
6652 add_phi_args_after_copy (region_copy, n_region, snew);
6653
6654 /* Get rid of now superfluous conditions and associated edges (and phi node
6655 arguments). */
6656 exit_bb = exit->dest;
6657
6658 e = redirect_edge_and_branch (exits[0], exits[1]->dest);
6659 PENDING_STMT (e) = NULL;
6660
6661 /* The latch of ORIG_LOOP was copied, and so was the backedge
6662 to the original header. We redirect this backedge to EXIT_BB. */
6663 for (i = 0; i < n_region; i++)
6664 if (get_bb_original (region_copy[i]) == orig_loop->latch)
6665 {
6666 gcc_assert (single_succ_edge (region_copy[i]));
6667 e = redirect_edge_and_branch (single_succ_edge (region_copy[i]), exit_bb);
6668 PENDING_STMT (e) = NULL;
6669 for (psi = gsi_start_phis (exit_bb);
6670 !gsi_end_p (psi);
6671 gsi_next (&psi))
6672 {
6673 phi = psi.phi ();
6674 def = PHI_ARG_DEF (phi, nexits[0]->dest_idx);
6675 add_phi_arg (phi, def, e, gimple_phi_arg_location_from_edge (phi, e));
6676 }
6677 }
6678 e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
6679 PENDING_STMT (e) = NULL;
6680
6681 /* Anything that is outside of the region, but was dominated by something
6682 inside needs to update dominance info. */
6683 iterate_fix_dominators (CDI_DOMINATORS, doms, false);
6684 doms.release ();
6685 /* Update the SSA web. */
6686 update_ssa (TODO_update_ssa);
6687
6688 if (free_region_copy)
6689 free (region_copy);
6690
6691 free_original_copy_tables ();
6692 return true;
6693 }
6694
6695 /* Add all the blocks dominated by ENTRY to the array BBS_P. Stop
6696 adding blocks when the dominator traversal reaches EXIT. This
6697 function silently assumes that ENTRY strictly dominates EXIT. */
6698
6699 void
gather_blocks_in_sese_region(basic_block entry,basic_block exit,vec<basic_block> * bbs_p)6700 gather_blocks_in_sese_region (basic_block entry, basic_block exit,
6701 vec<basic_block> *bbs_p)
6702 {
6703 basic_block son;
6704
6705 for (son = first_dom_son (CDI_DOMINATORS, entry);
6706 son;
6707 son = next_dom_son (CDI_DOMINATORS, son))
6708 {
6709 bbs_p->safe_push (son);
6710 if (son != exit)
6711 gather_blocks_in_sese_region (son, exit, bbs_p);
6712 }
6713 }
6714
6715 /* Replaces *TP with a duplicate (belonging to function TO_CONTEXT).
6716 The duplicates are recorded in VARS_MAP. */
6717
6718 static void
replace_by_duplicate_decl(tree * tp,hash_map<tree,tree> * vars_map,tree to_context)6719 replace_by_duplicate_decl (tree *tp, hash_map<tree, tree> *vars_map,
6720 tree to_context)
6721 {
6722 tree t = *tp, new_t;
6723 struct function *f = DECL_STRUCT_FUNCTION (to_context);
6724
6725 if (DECL_CONTEXT (t) == to_context)
6726 return;
6727
6728 bool existed;
6729 tree &loc = vars_map->get_or_insert (t, &existed);
6730
6731 if (!existed)
6732 {
6733 if (SSA_VAR_P (t))
6734 {
6735 new_t = copy_var_decl (t, DECL_NAME (t), TREE_TYPE (t));
6736 add_local_decl (f, new_t);
6737 }
6738 else
6739 {
6740 gcc_assert (TREE_CODE (t) == CONST_DECL);
6741 new_t = copy_node (t);
6742 }
6743 DECL_CONTEXT (new_t) = to_context;
6744
6745 loc = new_t;
6746 }
6747 else
6748 new_t = loc;
6749
6750 *tp = new_t;
6751 }
6752
6753
6754 /* Creates an ssa name in TO_CONTEXT equivalent to NAME.
6755 VARS_MAP maps old ssa names and var_decls to the new ones. */
6756
6757 static tree
replace_ssa_name(tree name,hash_map<tree,tree> * vars_map,tree to_context)6758 replace_ssa_name (tree name, hash_map<tree, tree> *vars_map,
6759 tree to_context)
6760 {
6761 tree new_name;
6762
6763 gcc_assert (!virtual_operand_p (name));
6764
6765 tree *loc = vars_map->get (name);
6766
6767 if (!loc)
6768 {
6769 tree decl = SSA_NAME_VAR (name);
6770 if (decl)
6771 {
6772 gcc_assert (!SSA_NAME_IS_DEFAULT_DEF (name));
6773 replace_by_duplicate_decl (&decl, vars_map, to_context);
6774 new_name = make_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
6775 decl, SSA_NAME_DEF_STMT (name));
6776 }
6777 else
6778 new_name = copy_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
6779 name, SSA_NAME_DEF_STMT (name));
6780
6781 /* Now that we've used the def stmt to define new_name, make sure it
6782 doesn't define name anymore. */
6783 SSA_NAME_DEF_STMT (name) = NULL;
6784
6785 vars_map->put (name, new_name);
6786 }
6787 else
6788 new_name = *loc;
6789
6790 return new_name;
6791 }
6792
6793 struct move_stmt_d
6794 {
6795 tree orig_block;
6796 tree new_block;
6797 tree from_context;
6798 tree to_context;
6799 hash_map<tree, tree> *vars_map;
6800 htab_t new_label_map;
6801 hash_map<void *, void *> *eh_map;
6802 bool remap_decls_p;
6803 };
6804
6805 /* Helper for move_block_to_fn. Set TREE_BLOCK in every expression
6806 contained in *TP if it has been ORIG_BLOCK previously and change the
6807 DECL_CONTEXT of every local variable referenced in *TP. */
6808
6809 static tree
move_stmt_op(tree * tp,int * walk_subtrees,void * data)6810 move_stmt_op (tree *tp, int *walk_subtrees, void *data)
6811 {
6812 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6813 struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
6814 tree t = *tp;
6815
6816 if (EXPR_P (t))
6817 {
6818 tree block = TREE_BLOCK (t);
6819 if (block == NULL_TREE)
6820 ;
6821 else if (block == p->orig_block
6822 || p->orig_block == NULL_TREE)
6823 {
6824 /* tree_node_can_be_shared says we can share invariant
6825 addresses but unshare_expr copies them anyways. Make sure
6826 to unshare before adjusting the block in place - we do not
6827 always see a copy here. */
6828 if (TREE_CODE (t) == ADDR_EXPR
6829 && is_gimple_min_invariant (t))
6830 *tp = t = unshare_expr (t);
6831 TREE_SET_BLOCK (t, p->new_block);
6832 }
6833 else if (flag_checking)
6834 {
6835 while (block && TREE_CODE (block) == BLOCK && block != p->orig_block)
6836 block = BLOCK_SUPERCONTEXT (block);
6837 gcc_assert (block == p->orig_block);
6838 }
6839 }
6840 else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
6841 {
6842 if (TREE_CODE (t) == SSA_NAME)
6843 *tp = replace_ssa_name (t, p->vars_map, p->to_context);
6844 else if (TREE_CODE (t) == PARM_DECL
6845 && gimple_in_ssa_p (cfun))
6846 *tp = *(p->vars_map->get (t));
6847 else if (TREE_CODE (t) == LABEL_DECL)
6848 {
6849 if (p->new_label_map)
6850 {
6851 struct tree_map in, *out;
6852 in.base.from = t;
6853 out = (struct tree_map *)
6854 htab_find_with_hash (p->new_label_map, &in, DECL_UID (t));
6855 if (out)
6856 *tp = t = out->to;
6857 }
6858
6859 /* For FORCED_LABELs we can end up with references from other
6860 functions if some SESE regions are outlined. It is UB to
6861 jump in between them, but they could be used just for printing
6862 addresses etc. In that case, DECL_CONTEXT on the label should
6863 be the function containing the glabel stmt with that LABEL_DECL,
6864 rather than whatever function a reference to the label was seen
6865 last time. */
6866 if (!FORCED_LABEL (t) && !DECL_NONLOCAL (t))
6867 DECL_CONTEXT (t) = p->to_context;
6868 }
6869 else if (p->remap_decls_p)
6870 {
6871 /* Replace T with its duplicate. T should no longer appear in the
6872 parent function, so this looks wasteful; however, it may appear
6873 in referenced_vars, and more importantly, as virtual operands of
6874 statements, and in alias lists of other variables. It would be
6875 quite difficult to expunge it from all those places. ??? It might
6876 suffice to do this for addressable variables. */
6877 if ((VAR_P (t) && !is_global_var (t))
6878 || TREE_CODE (t) == CONST_DECL)
6879 replace_by_duplicate_decl (tp, p->vars_map, p->to_context);
6880 }
6881 *walk_subtrees = 0;
6882 }
6883 else if (TYPE_P (t))
6884 *walk_subtrees = 0;
6885
6886 return NULL_TREE;
6887 }
6888
6889 /* Helper for move_stmt_r. Given an EH region number for the source
6890 function, map that to the duplicate EH regio number in the dest. */
6891
6892 static int
move_stmt_eh_region_nr(int old_nr,struct move_stmt_d * p)6893 move_stmt_eh_region_nr (int old_nr, struct move_stmt_d *p)
6894 {
6895 eh_region old_r, new_r;
6896
6897 old_r = get_eh_region_from_number (old_nr);
6898 new_r = static_cast<eh_region> (*p->eh_map->get (old_r));
6899
6900 return new_r->index;
6901 }
6902
6903 /* Similar, but operate on INTEGER_CSTs. */
6904
6905 static tree
move_stmt_eh_region_tree_nr(tree old_t_nr,struct move_stmt_d * p)6906 move_stmt_eh_region_tree_nr (tree old_t_nr, struct move_stmt_d *p)
6907 {
6908 int old_nr, new_nr;
6909
6910 old_nr = tree_to_shwi (old_t_nr);
6911 new_nr = move_stmt_eh_region_nr (old_nr, p);
6912
6913 return build_int_cst (integer_type_node, new_nr);
6914 }
6915
6916 /* Like move_stmt_op, but for gimple statements.
6917
6918 Helper for move_block_to_fn. Set GIMPLE_BLOCK in every expression
6919 contained in the current statement in *GSI_P and change the
6920 DECL_CONTEXT of every local variable referenced in the current
6921 statement. */
6922
6923 static tree
move_stmt_r(gimple_stmt_iterator * gsi_p,bool * handled_ops_p,struct walk_stmt_info * wi)6924 move_stmt_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6925 struct walk_stmt_info *wi)
6926 {
6927 struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
6928 gimple *stmt = gsi_stmt (*gsi_p);
6929 tree block = gimple_block (stmt);
6930
6931 if (block == p->orig_block
6932 || (p->orig_block == NULL_TREE
6933 && block != NULL_TREE))
6934 gimple_set_block (stmt, p->new_block);
6935
6936 switch (gimple_code (stmt))
6937 {
6938 case GIMPLE_CALL:
6939 /* Remap the region numbers for __builtin_eh_{pointer,filter}. */
6940 {
6941 tree r, fndecl = gimple_call_fndecl (stmt);
6942 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
6943 switch (DECL_FUNCTION_CODE (fndecl))
6944 {
6945 case BUILT_IN_EH_COPY_VALUES:
6946 r = gimple_call_arg (stmt, 1);
6947 r = move_stmt_eh_region_tree_nr (r, p);
6948 gimple_call_set_arg (stmt, 1, r);
6949 /* FALLTHRU */
6950
6951 case BUILT_IN_EH_POINTER:
6952 case BUILT_IN_EH_FILTER:
6953 r = gimple_call_arg (stmt, 0);
6954 r = move_stmt_eh_region_tree_nr (r, p);
6955 gimple_call_set_arg (stmt, 0, r);
6956 break;
6957
6958 default:
6959 break;
6960 }
6961 }
6962 break;
6963
6964 case GIMPLE_RESX:
6965 {
6966 gresx *resx_stmt = as_a <gresx *> (stmt);
6967 int r = gimple_resx_region (resx_stmt);
6968 r = move_stmt_eh_region_nr (r, p);
6969 gimple_resx_set_region (resx_stmt, r);
6970 }
6971 break;
6972
6973 case GIMPLE_EH_DISPATCH:
6974 {
6975 geh_dispatch *eh_dispatch_stmt = as_a <geh_dispatch *> (stmt);
6976 int r = gimple_eh_dispatch_region (eh_dispatch_stmt);
6977 r = move_stmt_eh_region_nr (r, p);
6978 gimple_eh_dispatch_set_region (eh_dispatch_stmt, r);
6979 }
6980 break;
6981
6982 case GIMPLE_OMP_RETURN:
6983 case GIMPLE_OMP_CONTINUE:
6984 break;
6985
6986 case GIMPLE_LABEL:
6987 {
6988 /* For FORCED_LABEL, move_stmt_op doesn't adjust DECL_CONTEXT,
6989 so that such labels can be referenced from other regions.
6990 Make sure to update it when seeing a GIMPLE_LABEL though,
6991 that is the owner of the label. */
6992 walk_gimple_op (stmt, move_stmt_op, wi);
6993 *handled_ops_p = true;
6994 tree label = gimple_label_label (as_a <glabel *> (stmt));
6995 if (FORCED_LABEL (label) || DECL_NONLOCAL (label))
6996 DECL_CONTEXT (label) = p->to_context;
6997 }
6998 break;
6999
7000 default:
7001 if (is_gimple_omp (stmt))
7002 {
7003 /* Do not remap variables inside OMP directives. Variables
7004 referenced in clauses and directive header belong to the
7005 parent function and should not be moved into the child
7006 function. */
7007 bool save_remap_decls_p = p->remap_decls_p;
7008 p->remap_decls_p = false;
7009 *handled_ops_p = true;
7010
7011 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), move_stmt_r,
7012 move_stmt_op, wi);
7013
7014 p->remap_decls_p = save_remap_decls_p;
7015 }
7016 break;
7017 }
7018
7019 return NULL_TREE;
7020 }
7021
7022 /* Move basic block BB from function CFUN to function DEST_FN. The
7023 block is moved out of the original linked list and placed after
7024 block AFTER in the new list. Also, the block is removed from the
7025 original array of blocks and placed in DEST_FN's array of blocks.
7026 If UPDATE_EDGE_COUNT_P is true, the edge counts on both CFGs is
7027 updated to reflect the moved edges.
7028
7029 The local variables are remapped to new instances, VARS_MAP is used
7030 to record the mapping. */
7031
7032 static void
move_block_to_fn(struct function * dest_cfun,basic_block bb,basic_block after,bool update_edge_count_p,struct move_stmt_d * d)7033 move_block_to_fn (struct function *dest_cfun, basic_block bb,
7034 basic_block after, bool update_edge_count_p,
7035 struct move_stmt_d *d)
7036 {
7037 struct control_flow_graph *cfg;
7038 edge_iterator ei;
7039 edge e;
7040 gimple_stmt_iterator si;
7041 unsigned old_len, new_len;
7042
7043 /* Remove BB from dominance structures. */
7044 delete_from_dominance_info (CDI_DOMINATORS, bb);
7045
7046 /* Move BB from its current loop to the copy in the new function. */
7047 if (current_loops)
7048 {
7049 struct loop *new_loop = (struct loop *)bb->loop_father->aux;
7050 if (new_loop)
7051 bb->loop_father = new_loop;
7052 }
7053
7054 /* Link BB to the new linked list. */
7055 move_block_after (bb, after);
7056
7057 /* Update the edge count in the corresponding flowgraphs. */
7058 if (update_edge_count_p)
7059 FOR_EACH_EDGE (e, ei, bb->succs)
7060 {
7061 cfun->cfg->x_n_edges--;
7062 dest_cfun->cfg->x_n_edges++;
7063 }
7064
7065 /* Remove BB from the original basic block array. */
7066 (*cfun->cfg->x_basic_block_info)[bb->index] = NULL;
7067 cfun->cfg->x_n_basic_blocks--;
7068
7069 /* Grow DEST_CFUN's basic block array if needed. */
7070 cfg = dest_cfun->cfg;
7071 cfg->x_n_basic_blocks++;
7072 if (bb->index >= cfg->x_last_basic_block)
7073 cfg->x_last_basic_block = bb->index + 1;
7074
7075 old_len = vec_safe_length (cfg->x_basic_block_info);
7076 if ((unsigned) cfg->x_last_basic_block >= old_len)
7077 {
7078 new_len = cfg->x_last_basic_block + (cfg->x_last_basic_block + 3) / 4;
7079 vec_safe_grow_cleared (cfg->x_basic_block_info, new_len);
7080 }
7081
7082 (*cfg->x_basic_block_info)[bb->index] = bb;
7083
7084 /* Remap the variables in phi nodes. */
7085 for (gphi_iterator psi = gsi_start_phis (bb);
7086 !gsi_end_p (psi); )
7087 {
7088 gphi *phi = psi.phi ();
7089 use_operand_p use;
7090 tree op = PHI_RESULT (phi);
7091 ssa_op_iter oi;
7092 unsigned i;
7093
7094 if (virtual_operand_p (op))
7095 {
7096 /* Remove the phi nodes for virtual operands (alias analysis will be
7097 run for the new function, anyway). But replace all uses that
7098 might be outside of the region we move. */
7099 use_operand_p use_p;
7100 imm_use_iterator iter;
7101 gimple *use_stmt;
7102 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
7103 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7104 SET_USE (use_p, SSA_NAME_VAR (op));
7105 remove_phi_node (&psi, true);
7106 continue;
7107 }
7108
7109 SET_PHI_RESULT (phi,
7110 replace_ssa_name (op, d->vars_map, dest_cfun->decl));
7111 FOR_EACH_PHI_ARG (use, phi, oi, SSA_OP_USE)
7112 {
7113 op = USE_FROM_PTR (use);
7114 if (TREE_CODE (op) == SSA_NAME)
7115 SET_USE (use, replace_ssa_name (op, d->vars_map, dest_cfun->decl));
7116 }
7117
7118 for (i = 0; i < EDGE_COUNT (bb->preds); i++)
7119 {
7120 location_t locus = gimple_phi_arg_location (phi, i);
7121 tree block = LOCATION_BLOCK (locus);
7122
7123 if (locus == UNKNOWN_LOCATION)
7124 continue;
7125 if (d->orig_block == NULL_TREE || block == d->orig_block)
7126 {
7127 locus = set_block (locus, d->new_block);
7128 gimple_phi_arg_set_location (phi, i, locus);
7129 }
7130 }
7131
7132 gsi_next (&psi);
7133 }
7134
7135 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
7136 {
7137 gimple *stmt = gsi_stmt (si);
7138 struct walk_stmt_info wi;
7139
7140 memset (&wi, 0, sizeof (wi));
7141 wi.info = d;
7142 walk_gimple_stmt (&si, move_stmt_r, move_stmt_op, &wi);
7143
7144 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
7145 {
7146 tree label = gimple_label_label (label_stmt);
7147 int uid = LABEL_DECL_UID (label);
7148
7149 gcc_assert (uid > -1);
7150
7151 old_len = vec_safe_length (cfg->x_label_to_block_map);
7152 if (old_len <= (unsigned) uid)
7153 {
7154 new_len = 3 * uid / 2 + 1;
7155 vec_safe_grow_cleared (cfg->x_label_to_block_map, new_len);
7156 }
7157
7158 (*cfg->x_label_to_block_map)[uid] = bb;
7159 (*cfun->cfg->x_label_to_block_map)[uid] = NULL;
7160
7161 gcc_assert (DECL_CONTEXT (label) == dest_cfun->decl);
7162
7163 if (uid >= dest_cfun->cfg->last_label_uid)
7164 dest_cfun->cfg->last_label_uid = uid + 1;
7165 }
7166
7167 maybe_duplicate_eh_stmt_fn (dest_cfun, stmt, cfun, stmt, d->eh_map, 0);
7168 remove_stmt_from_eh_lp_fn (cfun, stmt);
7169
7170 gimple_duplicate_stmt_histograms (dest_cfun, stmt, cfun, stmt);
7171 gimple_remove_stmt_histograms (cfun, stmt);
7172
7173 /* We cannot leave any operands allocated from the operand caches of
7174 the current function. */
7175 free_stmt_operands (cfun, stmt);
7176 push_cfun (dest_cfun);
7177 update_stmt (stmt);
7178 if (is_gimple_call (stmt))
7179 notice_special_calls (as_a <gcall *> (stmt));
7180 pop_cfun ();
7181 }
7182
7183 FOR_EACH_EDGE (e, ei, bb->succs)
7184 if (e->goto_locus != UNKNOWN_LOCATION)
7185 {
7186 tree block = LOCATION_BLOCK (e->goto_locus);
7187 if (d->orig_block == NULL_TREE
7188 || block == d->orig_block)
7189 e->goto_locus = set_block (e->goto_locus, d->new_block);
7190 }
7191 }
7192
7193 /* Examine the statements in BB (which is in SRC_CFUN); find and return
7194 the outermost EH region. Use REGION as the incoming base EH region.
7195 If there is no single outermost region, return NULL and set *ALL to
7196 true. */
7197
7198 static eh_region
find_outermost_region_in_block(struct function * src_cfun,basic_block bb,eh_region region,bool * all)7199 find_outermost_region_in_block (struct function *src_cfun,
7200 basic_block bb, eh_region region,
7201 bool *all)
7202 {
7203 gimple_stmt_iterator si;
7204
7205 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
7206 {
7207 gimple *stmt = gsi_stmt (si);
7208 eh_region stmt_region;
7209 int lp_nr;
7210
7211 lp_nr = lookup_stmt_eh_lp_fn (src_cfun, stmt);
7212 stmt_region = get_eh_region_from_lp_number_fn (src_cfun, lp_nr);
7213 if (stmt_region)
7214 {
7215 if (region == NULL)
7216 region = stmt_region;
7217 else if (stmt_region != region)
7218 {
7219 region = eh_region_outermost (src_cfun, stmt_region, region);
7220 if (region == NULL)
7221 {
7222 *all = true;
7223 return NULL;
7224 }
7225 }
7226 }
7227 }
7228
7229 return region;
7230 }
7231
7232 static tree
new_label_mapper(tree decl,void * data)7233 new_label_mapper (tree decl, void *data)
7234 {
7235 htab_t hash = (htab_t) data;
7236 struct tree_map *m;
7237 void **slot;
7238
7239 gcc_assert (TREE_CODE (decl) == LABEL_DECL);
7240
7241 m = XNEW (struct tree_map);
7242 m->hash = DECL_UID (decl);
7243 m->base.from = decl;
7244 m->to = create_artificial_label (UNKNOWN_LOCATION);
7245 LABEL_DECL_UID (m->to) = LABEL_DECL_UID (decl);
7246 if (LABEL_DECL_UID (m->to) >= cfun->cfg->last_label_uid)
7247 cfun->cfg->last_label_uid = LABEL_DECL_UID (m->to) + 1;
7248
7249 slot = htab_find_slot_with_hash (hash, m, m->hash, INSERT);
7250 gcc_assert (*slot == NULL);
7251
7252 *slot = m;
7253
7254 return m->to;
7255 }
7256
7257 /* Tree walker to replace the decls used inside value expressions by
7258 duplicates. */
7259
7260 static tree
replace_block_vars_by_duplicates_1(tree * tp,int * walk_subtrees,void * data)7261 replace_block_vars_by_duplicates_1 (tree *tp, int *walk_subtrees, void *data)
7262 {
7263 struct replace_decls_d *rd = (struct replace_decls_d *)data;
7264
7265 switch (TREE_CODE (*tp))
7266 {
7267 case VAR_DECL:
7268 case PARM_DECL:
7269 case RESULT_DECL:
7270 replace_by_duplicate_decl (tp, rd->vars_map, rd->to_context);
7271 break;
7272 default:
7273 break;
7274 }
7275
7276 if (IS_TYPE_OR_DECL_P (*tp))
7277 *walk_subtrees = false;
7278
7279 return NULL;
7280 }
7281
7282 /* Change DECL_CONTEXT of all BLOCK_VARS in block, including
7283 subblocks. */
7284
7285 static void
replace_block_vars_by_duplicates(tree block,hash_map<tree,tree> * vars_map,tree to_context)7286 replace_block_vars_by_duplicates (tree block, hash_map<tree, tree> *vars_map,
7287 tree to_context)
7288 {
7289 tree *tp, t;
7290
7291 for (tp = &BLOCK_VARS (block); *tp; tp = &DECL_CHAIN (*tp))
7292 {
7293 t = *tp;
7294 if (!VAR_P (t) && TREE_CODE (t) != CONST_DECL)
7295 continue;
7296 replace_by_duplicate_decl (&t, vars_map, to_context);
7297 if (t != *tp)
7298 {
7299 if (VAR_P (*tp) && DECL_HAS_VALUE_EXPR_P (*tp))
7300 {
7301 tree x = DECL_VALUE_EXPR (*tp);
7302 struct replace_decls_d rd = { vars_map, to_context };
7303 unshare_expr (x);
7304 walk_tree (&x, replace_block_vars_by_duplicates_1, &rd, NULL);
7305 SET_DECL_VALUE_EXPR (t, x);
7306 DECL_HAS_VALUE_EXPR_P (t) = 1;
7307 }
7308 DECL_CHAIN (t) = DECL_CHAIN (*tp);
7309 *tp = t;
7310 }
7311 }
7312
7313 for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
7314 replace_block_vars_by_duplicates (block, vars_map, to_context);
7315 }
7316
7317 /* Fixup the loop arrays and numbers after moving LOOP and its subloops
7318 from FN1 to FN2. */
7319
7320 static void
fixup_loop_arrays_after_move(struct function * fn1,struct function * fn2,struct loop * loop)7321 fixup_loop_arrays_after_move (struct function *fn1, struct function *fn2,
7322 struct loop *loop)
7323 {
7324 /* Discard it from the old loop array. */
7325 (*get_loops (fn1))[loop->num] = NULL;
7326
7327 /* Place it in the new loop array, assigning it a new number. */
7328 loop->num = number_of_loops (fn2);
7329 vec_safe_push (loops_for_fn (fn2)->larray, loop);
7330
7331 /* Recurse to children. */
7332 for (loop = loop->inner; loop; loop = loop->next)
7333 fixup_loop_arrays_after_move (fn1, fn2, loop);
7334 }
7335
7336 /* Verify that the blocks in BBS_P are a single-entry, single-exit region
7337 delimited by ENTRY_BB and EXIT_BB, possibly containing noreturn blocks. */
7338
7339 DEBUG_FUNCTION void
verify_sese(basic_block entry,basic_block exit,vec<basic_block> * bbs_p)7340 verify_sese (basic_block entry, basic_block exit, vec<basic_block> *bbs_p)
7341 {
7342 basic_block bb;
7343 edge_iterator ei;
7344 edge e;
7345 bitmap bbs = BITMAP_ALLOC (NULL);
7346 int i;
7347
7348 gcc_assert (entry != NULL);
7349 gcc_assert (entry != exit);
7350 gcc_assert (bbs_p != NULL);
7351
7352 gcc_assert (bbs_p->length () > 0);
7353
7354 FOR_EACH_VEC_ELT (*bbs_p, i, bb)
7355 bitmap_set_bit (bbs, bb->index);
7356
7357 gcc_assert (bitmap_bit_p (bbs, entry->index));
7358 gcc_assert (exit == NULL || bitmap_bit_p (bbs, exit->index));
7359
7360 FOR_EACH_VEC_ELT (*bbs_p, i, bb)
7361 {
7362 if (bb == entry)
7363 {
7364 gcc_assert (single_pred_p (entry));
7365 gcc_assert (!bitmap_bit_p (bbs, single_pred (entry)->index));
7366 }
7367 else
7368 for (ei = ei_start (bb->preds); !ei_end_p (ei); ei_next (&ei))
7369 {
7370 e = ei_edge (ei);
7371 gcc_assert (bitmap_bit_p (bbs, e->src->index));
7372 }
7373
7374 if (bb == exit)
7375 {
7376 gcc_assert (single_succ_p (exit));
7377 gcc_assert (!bitmap_bit_p (bbs, single_succ (exit)->index));
7378 }
7379 else
7380 for (ei = ei_start (bb->succs); !ei_end_p (ei); ei_next (&ei))
7381 {
7382 e = ei_edge (ei);
7383 gcc_assert (bitmap_bit_p (bbs, e->dest->index));
7384 }
7385 }
7386
7387 BITMAP_FREE (bbs);
7388 }
7389
7390 /* If FROM is an SSA_NAME, mark the version in bitmap DATA. */
7391
7392 bool
gather_ssa_name_hash_map_from(tree const & from,tree const &,void * data)7393 gather_ssa_name_hash_map_from (tree const &from, tree const &, void *data)
7394 {
7395 bitmap release_names = (bitmap)data;
7396
7397 if (TREE_CODE (from) != SSA_NAME)
7398 return true;
7399
7400 bitmap_set_bit (release_names, SSA_NAME_VERSION (from));
7401 return true;
7402 }
7403
7404 /* Return LOOP_DIST_ALIAS call if present in BB. */
7405
7406 static gimple *
find_loop_dist_alias(basic_block bb)7407 find_loop_dist_alias (basic_block bb)
7408 {
7409 gimple *g = last_stmt (bb);
7410 if (g == NULL || gimple_code (g) != GIMPLE_COND)
7411 return NULL;
7412
7413 gimple_stmt_iterator gsi = gsi_for_stmt (g);
7414 gsi_prev (&gsi);
7415 if (gsi_end_p (gsi))
7416 return NULL;
7417
7418 g = gsi_stmt (gsi);
7419 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS))
7420 return g;
7421 return NULL;
7422 }
7423
7424 /* Fold loop internal call G like IFN_LOOP_VECTORIZED/IFN_LOOP_DIST_ALIAS
7425 to VALUE and update any immediate uses of it's LHS. */
7426
7427 void
fold_loop_internal_call(gimple * g,tree value)7428 fold_loop_internal_call (gimple *g, tree value)
7429 {
7430 tree lhs = gimple_call_lhs (g);
7431 use_operand_p use_p;
7432 imm_use_iterator iter;
7433 gimple *use_stmt;
7434 gimple_stmt_iterator gsi = gsi_for_stmt (g);
7435
7436 update_call_from_tree (&gsi, value);
7437 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7438 {
7439 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7440 SET_USE (use_p, value);
7441 update_stmt (use_stmt);
7442 }
7443 }
7444
7445 /* Move a single-entry, single-exit region delimited by ENTRY_BB and
7446 EXIT_BB to function DEST_CFUN. The whole region is replaced by a
7447 single basic block in the original CFG and the new basic block is
7448 returned. DEST_CFUN must not have a CFG yet.
7449
7450 Note that the region need not be a pure SESE region. Blocks inside
7451 the region may contain calls to abort/exit. The only restriction
7452 is that ENTRY_BB should be the only entry point and it must
7453 dominate EXIT_BB.
7454
7455 Change TREE_BLOCK of all statements in ORIG_BLOCK to the new
7456 functions outermost BLOCK, move all subblocks of ORIG_BLOCK
7457 to the new function.
7458
7459 All local variables referenced in the region are assumed to be in
7460 the corresponding BLOCK_VARS and unexpanded variable lists
7461 associated with DEST_CFUN.
7462
7463 TODO: investigate whether we can reuse gimple_duplicate_sese_region to
7464 reimplement move_sese_region_to_fn by duplicating the region rather than
7465 moving it. */
7466
7467 basic_block
move_sese_region_to_fn(struct function * dest_cfun,basic_block entry_bb,basic_block exit_bb,tree orig_block)7468 move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
7469 basic_block exit_bb, tree orig_block)
7470 {
7471 vec<basic_block> bbs, dom_bbs;
7472 basic_block dom_entry = get_immediate_dominator (CDI_DOMINATORS, entry_bb);
7473 basic_block after, bb, *entry_pred, *exit_succ, abb;
7474 struct function *saved_cfun = cfun;
7475 int *entry_flag, *exit_flag;
7476 profile_probability *entry_prob, *exit_prob;
7477 unsigned i, num_entry_edges, num_exit_edges, num_nodes;
7478 edge e;
7479 edge_iterator ei;
7480 htab_t new_label_map;
7481 hash_map<void *, void *> *eh_map;
7482 struct loop *loop = entry_bb->loop_father;
7483 struct loop *loop0 = get_loop (saved_cfun, 0);
7484 struct move_stmt_d d;
7485
7486 /* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
7487 region. */
7488 gcc_assert (entry_bb != exit_bb
7489 && (!exit_bb
7490 || dominated_by_p (CDI_DOMINATORS, exit_bb, entry_bb)));
7491
7492 /* Collect all the blocks in the region. Manually add ENTRY_BB
7493 because it won't be added by dfs_enumerate_from. */
7494 bbs.create (0);
7495 bbs.safe_push (entry_bb);
7496 gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
7497
7498 if (flag_checking)
7499 verify_sese (entry_bb, exit_bb, &bbs);
7500
7501 /* The blocks that used to be dominated by something in BBS will now be
7502 dominated by the new block. */
7503 dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
7504 bbs.address (),
7505 bbs.length ());
7506
7507 /* Detach ENTRY_BB and EXIT_BB from CFUN->CFG. We need to remember
7508 the predecessor edges to ENTRY_BB and the successor edges to
7509 EXIT_BB so that we can re-attach them to the new basic block that
7510 will replace the region. */
7511 num_entry_edges = EDGE_COUNT (entry_bb->preds);
7512 entry_pred = XNEWVEC (basic_block, num_entry_edges);
7513 entry_flag = XNEWVEC (int, num_entry_edges);
7514 entry_prob = XNEWVEC (profile_probability, num_entry_edges);
7515 i = 0;
7516 for (ei = ei_start (entry_bb->preds); (e = ei_safe_edge (ei)) != NULL;)
7517 {
7518 entry_prob[i] = e->probability;
7519 entry_flag[i] = e->flags;
7520 entry_pred[i++] = e->src;
7521 remove_edge (e);
7522 }
7523
7524 if (exit_bb)
7525 {
7526 num_exit_edges = EDGE_COUNT (exit_bb->succs);
7527 exit_succ = XNEWVEC (basic_block, num_exit_edges);
7528 exit_flag = XNEWVEC (int, num_exit_edges);
7529 exit_prob = XNEWVEC (profile_probability, num_exit_edges);
7530 i = 0;
7531 for (ei = ei_start (exit_bb->succs); (e = ei_safe_edge (ei)) != NULL;)
7532 {
7533 exit_prob[i] = e->probability;
7534 exit_flag[i] = e->flags;
7535 exit_succ[i++] = e->dest;
7536 remove_edge (e);
7537 }
7538 }
7539 else
7540 {
7541 num_exit_edges = 0;
7542 exit_succ = NULL;
7543 exit_flag = NULL;
7544 exit_prob = NULL;
7545 }
7546
7547 /* Switch context to the child function to initialize DEST_FN's CFG. */
7548 gcc_assert (dest_cfun->cfg == NULL);
7549 push_cfun (dest_cfun);
7550
7551 init_empty_tree_cfg ();
7552
7553 /* Initialize EH information for the new function. */
7554 eh_map = NULL;
7555 new_label_map = NULL;
7556 if (saved_cfun->eh)
7557 {
7558 eh_region region = NULL;
7559 bool all = false;
7560
7561 FOR_EACH_VEC_ELT (bbs, i, bb)
7562 {
7563 region = find_outermost_region_in_block (saved_cfun, bb, region, &all);
7564 if (all)
7565 break;
7566 }
7567
7568 init_eh_for_function ();
7569 if (region != NULL || all)
7570 {
7571 new_label_map = htab_create (17, tree_map_hash, tree_map_eq, free);
7572 eh_map = duplicate_eh_regions (saved_cfun, region, 0,
7573 new_label_mapper, new_label_map);
7574 }
7575 }
7576
7577 /* Initialize an empty loop tree. */
7578 struct loops *loops = ggc_cleared_alloc<struct loops> ();
7579 init_loops_structure (dest_cfun, loops, 1);
7580 loops->state = LOOPS_MAY_HAVE_MULTIPLE_LATCHES;
7581 set_loops_for_fn (dest_cfun, loops);
7582
7583 vec<loop_p, va_gc> *larray = get_loops (saved_cfun)->copy ();
7584
7585 /* Move the outlined loop tree part. */
7586 num_nodes = bbs.length ();
7587 FOR_EACH_VEC_ELT (bbs, i, bb)
7588 {
7589 if (bb->loop_father->header == bb)
7590 {
7591 struct loop *this_loop = bb->loop_father;
7592 struct loop *outer = loop_outer (this_loop);
7593 if (outer == loop
7594 /* If the SESE region contains some bbs ending with
7595 a noreturn call, those are considered to belong
7596 to the outermost loop in saved_cfun, rather than
7597 the entry_bb's loop_father. */
7598 || outer == loop0)
7599 {
7600 if (outer != loop)
7601 num_nodes -= this_loop->num_nodes;
7602 flow_loop_tree_node_remove (bb->loop_father);
7603 flow_loop_tree_node_add (get_loop (dest_cfun, 0), this_loop);
7604 fixup_loop_arrays_after_move (saved_cfun, cfun, this_loop);
7605 }
7606 }
7607 else if (bb->loop_father == loop0 && loop0 != loop)
7608 num_nodes--;
7609
7610 /* Remove loop exits from the outlined region. */
7611 if (loops_for_fn (saved_cfun)->exits)
7612 FOR_EACH_EDGE (e, ei, bb->succs)
7613 {
7614 struct loops *l = loops_for_fn (saved_cfun);
7615 loop_exit **slot
7616 = l->exits->find_slot_with_hash (e, htab_hash_pointer (e),
7617 NO_INSERT);
7618 if (slot)
7619 l->exits->clear_slot (slot);
7620 }
7621 }
7622
7623 /* Adjust the number of blocks in the tree root of the outlined part. */
7624 get_loop (dest_cfun, 0)->num_nodes = bbs.length () + 2;
7625
7626 /* Setup a mapping to be used by move_block_to_fn. */
7627 loop->aux = current_loops->tree_root;
7628 loop0->aux = current_loops->tree_root;
7629
7630 /* Fix up orig_loop_num. If the block referenced in it has been moved
7631 to dest_cfun, update orig_loop_num field, otherwise clear it. */
7632 struct loop *dloop;
7633 signed char *moved_orig_loop_num = NULL;
7634 FOR_EACH_LOOP_FN (dest_cfun, dloop, 0)
7635 if (dloop->orig_loop_num)
7636 {
7637 if (moved_orig_loop_num == NULL)
7638 moved_orig_loop_num
7639 = XCNEWVEC (signed char, vec_safe_length (larray));
7640 if ((*larray)[dloop->orig_loop_num] != NULL
7641 && get_loop (saved_cfun, dloop->orig_loop_num) == NULL)
7642 {
7643 if (moved_orig_loop_num[dloop->orig_loop_num] >= 0
7644 && moved_orig_loop_num[dloop->orig_loop_num] < 2)
7645 moved_orig_loop_num[dloop->orig_loop_num]++;
7646 dloop->orig_loop_num = (*larray)[dloop->orig_loop_num]->num;
7647 }
7648 else
7649 {
7650 moved_orig_loop_num[dloop->orig_loop_num] = -1;
7651 dloop->orig_loop_num = 0;
7652 }
7653 }
7654 pop_cfun ();
7655
7656 if (moved_orig_loop_num)
7657 {
7658 FOR_EACH_VEC_ELT (bbs, i, bb)
7659 {
7660 gimple *g = find_loop_dist_alias (bb);
7661 if (g == NULL)
7662 continue;
7663
7664 int orig_loop_num = tree_to_shwi (gimple_call_arg (g, 0));
7665 gcc_assert (orig_loop_num
7666 && (unsigned) orig_loop_num < vec_safe_length (larray));
7667 if (moved_orig_loop_num[orig_loop_num] == 2)
7668 {
7669 /* If we have moved both loops with this orig_loop_num into
7670 dest_cfun and the LOOP_DIST_ALIAS call is being moved there
7671 too, update the first argument. */
7672 gcc_assert ((*larray)[dloop->orig_loop_num] != NULL
7673 && (get_loop (saved_cfun, dloop->orig_loop_num)
7674 == NULL));
7675 tree t = build_int_cst (integer_type_node,
7676 (*larray)[dloop->orig_loop_num]->num);
7677 gimple_call_set_arg (g, 0, t);
7678 update_stmt (g);
7679 /* Make sure the following loop will not update it. */
7680 moved_orig_loop_num[orig_loop_num] = 0;
7681 }
7682 else
7683 /* Otherwise at least one of the loops stayed in saved_cfun.
7684 Remove the LOOP_DIST_ALIAS call. */
7685 fold_loop_internal_call (g, gimple_call_arg (g, 1));
7686 }
7687 FOR_EACH_BB_FN (bb, saved_cfun)
7688 {
7689 gimple *g = find_loop_dist_alias (bb);
7690 if (g == NULL)
7691 continue;
7692 int orig_loop_num = tree_to_shwi (gimple_call_arg (g, 0));
7693 gcc_assert (orig_loop_num
7694 && (unsigned) orig_loop_num < vec_safe_length (larray));
7695 if (moved_orig_loop_num[orig_loop_num])
7696 /* LOOP_DIST_ALIAS call remained in saved_cfun, if at least one
7697 of the corresponding loops was moved, remove it. */
7698 fold_loop_internal_call (g, gimple_call_arg (g, 1));
7699 }
7700 XDELETEVEC (moved_orig_loop_num);
7701 }
7702 ggc_free (larray);
7703
7704 /* Move blocks from BBS into DEST_CFUN. */
7705 gcc_assert (bbs.length () >= 2);
7706 after = dest_cfun->cfg->x_entry_block_ptr;
7707 hash_map<tree, tree> vars_map;
7708
7709 memset (&d, 0, sizeof (d));
7710 d.orig_block = orig_block;
7711 d.new_block = DECL_INITIAL (dest_cfun->decl);
7712 d.from_context = cfun->decl;
7713 d.to_context = dest_cfun->decl;
7714 d.vars_map = &vars_map;
7715 d.new_label_map = new_label_map;
7716 d.eh_map = eh_map;
7717 d.remap_decls_p = true;
7718
7719 if (gimple_in_ssa_p (cfun))
7720 for (tree arg = DECL_ARGUMENTS (d.to_context); arg; arg = DECL_CHAIN (arg))
7721 {
7722 tree narg = make_ssa_name_fn (dest_cfun, arg, gimple_build_nop ());
7723 set_ssa_default_def (dest_cfun, arg, narg);
7724 vars_map.put (arg, narg);
7725 }
7726
7727 FOR_EACH_VEC_ELT (bbs, i, bb)
7728 {
7729 /* No need to update edge counts on the last block. It has
7730 already been updated earlier when we detached the region from
7731 the original CFG. */
7732 move_block_to_fn (dest_cfun, bb, after, bb != exit_bb, &d);
7733 after = bb;
7734 }
7735
7736 /* Adjust the maximum clique used. */
7737 dest_cfun->last_clique = saved_cfun->last_clique;
7738
7739 loop->aux = NULL;
7740 loop0->aux = NULL;
7741 /* Loop sizes are no longer correct, fix them up. */
7742 loop->num_nodes -= num_nodes;
7743 for (struct loop *outer = loop_outer (loop);
7744 outer; outer = loop_outer (outer))
7745 outer->num_nodes -= num_nodes;
7746 loop0->num_nodes -= bbs.length () - num_nodes;
7747
7748 if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vectorize_loops)
7749 {
7750 struct loop *aloop;
7751 for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
7752 if (aloop != NULL)
7753 {
7754 if (aloop->simduid)
7755 {
7756 replace_by_duplicate_decl (&aloop->simduid, d.vars_map,
7757 d.to_context);
7758 dest_cfun->has_simduid_loops = true;
7759 }
7760 if (aloop->force_vectorize)
7761 dest_cfun->has_force_vectorize_loops = true;
7762 }
7763 }
7764
7765 /* Rewire BLOCK_SUBBLOCKS of orig_block. */
7766 if (orig_block)
7767 {
7768 tree block;
7769 gcc_assert (BLOCK_SUBBLOCKS (DECL_INITIAL (dest_cfun->decl))
7770 == NULL_TREE);
7771 BLOCK_SUBBLOCKS (DECL_INITIAL (dest_cfun->decl))
7772 = BLOCK_SUBBLOCKS (orig_block);
7773 for (block = BLOCK_SUBBLOCKS (orig_block);
7774 block; block = BLOCK_CHAIN (block))
7775 BLOCK_SUPERCONTEXT (block) = DECL_INITIAL (dest_cfun->decl);
7776 BLOCK_SUBBLOCKS (orig_block) = NULL_TREE;
7777 }
7778
7779 replace_block_vars_by_duplicates (DECL_INITIAL (dest_cfun->decl),
7780 &vars_map, dest_cfun->decl);
7781
7782 if (new_label_map)
7783 htab_delete (new_label_map);
7784 if (eh_map)
7785 delete eh_map;
7786
7787 if (gimple_in_ssa_p (cfun))
7788 {
7789 /* We need to release ssa-names in a defined order, so first find them,
7790 and then iterate in ascending version order. */
7791 bitmap release_names = BITMAP_ALLOC (NULL);
7792 vars_map.traverse<void *, gather_ssa_name_hash_map_from> (release_names);
7793 bitmap_iterator bi;
7794 unsigned i;
7795 EXECUTE_IF_SET_IN_BITMAP (release_names, 0, i, bi)
7796 release_ssa_name (ssa_name (i));
7797 BITMAP_FREE (release_names);
7798 }
7799
7800 /* Rewire the entry and exit blocks. The successor to the entry
7801 block turns into the successor of DEST_FN's ENTRY_BLOCK_PTR in
7802 the child function. Similarly, the predecessor of DEST_FN's
7803 EXIT_BLOCK_PTR turns into the predecessor of EXIT_BLOCK_PTR. We
7804 need to switch CFUN between DEST_CFUN and SAVED_CFUN so that the
7805 various CFG manipulation function get to the right CFG.
7806
7807 FIXME, this is silly. The CFG ought to become a parameter to
7808 these helpers. */
7809 push_cfun (dest_cfun);
7810 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = entry_bb->count;
7811 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), entry_bb, EDGE_FALLTHRU);
7812 if (exit_bb)
7813 {
7814 make_single_succ_edge (exit_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
7815 EXIT_BLOCK_PTR_FOR_FN (cfun)->count = exit_bb->count;
7816 }
7817 else
7818 EXIT_BLOCK_PTR_FOR_FN (cfun)->count = profile_count::zero ();
7819 pop_cfun ();
7820
7821 /* Back in the original function, the SESE region has disappeared,
7822 create a new basic block in its place. */
7823 bb = create_empty_bb (entry_pred[0]);
7824 if (current_loops)
7825 add_bb_to_loop (bb, loop);
7826 for (i = 0; i < num_entry_edges; i++)
7827 {
7828 e = make_edge (entry_pred[i], bb, entry_flag[i]);
7829 e->probability = entry_prob[i];
7830 }
7831
7832 for (i = 0; i < num_exit_edges; i++)
7833 {
7834 e = make_edge (bb, exit_succ[i], exit_flag[i]);
7835 e->probability = exit_prob[i];
7836 }
7837
7838 set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
7839 FOR_EACH_VEC_ELT (dom_bbs, i, abb)
7840 set_immediate_dominator (CDI_DOMINATORS, abb, bb);
7841 dom_bbs.release ();
7842
7843 if (exit_bb)
7844 {
7845 free (exit_prob);
7846 free (exit_flag);
7847 free (exit_succ);
7848 }
7849 free (entry_prob);
7850 free (entry_flag);
7851 free (entry_pred);
7852 bbs.release ();
7853
7854 return bb;
7855 }
7856
7857 /* Dump default def DEF to file FILE using FLAGS and indentation
7858 SPC. */
7859
7860 static void
dump_default_def(FILE * file,tree def,int spc,dump_flags_t flags)7861 dump_default_def (FILE *file, tree def, int spc, dump_flags_t flags)
7862 {
7863 for (int i = 0; i < spc; ++i)
7864 fprintf (file, " ");
7865 dump_ssaname_info_to_file (file, def, spc);
7866
7867 print_generic_expr (file, TREE_TYPE (def), flags);
7868 fprintf (file, " ");
7869 print_generic_expr (file, def, flags);
7870 fprintf (file, " = ");
7871 print_generic_expr (file, SSA_NAME_VAR (def), flags);
7872 fprintf (file, ";\n");
7873 }
7874
7875 /* Print no_sanitize attribute to FILE for a given attribute VALUE. */
7876
7877 static void
print_no_sanitize_attr_value(FILE * file,tree value)7878 print_no_sanitize_attr_value (FILE *file, tree value)
7879 {
7880 unsigned int flags = tree_to_uhwi (value);
7881 bool first = true;
7882 for (int i = 0; sanitizer_opts[i].name != NULL; ++i)
7883 {
7884 if ((sanitizer_opts[i].flag & flags) == sanitizer_opts[i].flag)
7885 {
7886 if (!first)
7887 fprintf (file, " | ");
7888 fprintf (file, "%s", sanitizer_opts[i].name);
7889 first = false;
7890 }
7891 }
7892 }
7893
7894 /* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in dumpfile.h)
7895 */
7896
7897 void
dump_function_to_file(tree fndecl,FILE * file,dump_flags_t flags)7898 dump_function_to_file (tree fndecl, FILE *file, dump_flags_t flags)
7899 {
7900 tree arg, var, old_current_fndecl = current_function_decl;
7901 struct function *dsf;
7902 bool ignore_topmost_bind = false, any_var = false;
7903 basic_block bb;
7904 tree chain;
7905 bool tmclone = (TREE_CODE (fndecl) == FUNCTION_DECL
7906 && decl_is_tm_clone (fndecl));
7907 struct function *fun = DECL_STRUCT_FUNCTION (fndecl);
7908
7909 if (DECL_ATTRIBUTES (fndecl) != NULL_TREE)
7910 {
7911 fprintf (file, "__attribute__((");
7912
7913 bool first = true;
7914 tree chain;
7915 for (chain = DECL_ATTRIBUTES (fndecl); chain;
7916 first = false, chain = TREE_CHAIN (chain))
7917 {
7918 if (!first)
7919 fprintf (file, ", ");
7920
7921 tree name = get_attribute_name (chain);
7922 print_generic_expr (file, name, dump_flags);
7923 if (TREE_VALUE (chain) != NULL_TREE)
7924 {
7925 fprintf (file, " (");
7926
7927 if (strstr (IDENTIFIER_POINTER (name), "no_sanitize"))
7928 print_no_sanitize_attr_value (file, TREE_VALUE (chain));
7929 else
7930 print_generic_expr (file, TREE_VALUE (chain), dump_flags);
7931 fprintf (file, ")");
7932 }
7933 }
7934
7935 fprintf (file, "))\n");
7936 }
7937
7938 current_function_decl = fndecl;
7939 if (flags & TDF_GIMPLE)
7940 {
7941 print_generic_expr (file, TREE_TYPE (TREE_TYPE (fndecl)),
7942 dump_flags | TDF_SLIM);
7943 fprintf (file, " __GIMPLE (%s)\n%s (",
7944 (fun->curr_properties & PROP_ssa) ? "ssa"
7945 : (fun->curr_properties & PROP_cfg) ? "cfg"
7946 : "",
7947 function_name (fun));
7948 }
7949 else
7950 fprintf (file, "%s %s(", function_name (fun), tmclone ? "[tm-clone] " : "");
7951
7952 arg = DECL_ARGUMENTS (fndecl);
7953 while (arg)
7954 {
7955 print_generic_expr (file, TREE_TYPE (arg), dump_flags);
7956 fprintf (file, " ");
7957 print_generic_expr (file, arg, dump_flags);
7958 if (DECL_CHAIN (arg))
7959 fprintf (file, ", ");
7960 arg = DECL_CHAIN (arg);
7961 }
7962 fprintf (file, ")\n");
7963
7964 dsf = DECL_STRUCT_FUNCTION (fndecl);
7965 if (dsf && (flags & TDF_EH))
7966 dump_eh_tree (file, dsf);
7967
7968 if (flags & TDF_RAW && !gimple_has_body_p (fndecl))
7969 {
7970 dump_node (fndecl, TDF_SLIM | flags, file);
7971 current_function_decl = old_current_fndecl;
7972 return;
7973 }
7974
7975 /* When GIMPLE is lowered, the variables are no longer available in
7976 BIND_EXPRs, so display them separately. */
7977 if (fun && fun->decl == fndecl && (fun->curr_properties & PROP_gimple_lcf))
7978 {
7979 unsigned ix;
7980 ignore_topmost_bind = true;
7981
7982 fprintf (file, "{\n");
7983 if (gimple_in_ssa_p (fun)
7984 && (flags & TDF_ALIAS))
7985 {
7986 for (arg = DECL_ARGUMENTS (fndecl); arg != NULL;
7987 arg = DECL_CHAIN (arg))
7988 {
7989 tree def = ssa_default_def (fun, arg);
7990 if (def)
7991 dump_default_def (file, def, 2, flags);
7992 }
7993
7994 tree res = DECL_RESULT (fun->decl);
7995 if (res != NULL_TREE
7996 && DECL_BY_REFERENCE (res))
7997 {
7998 tree def = ssa_default_def (fun, res);
7999 if (def)
8000 dump_default_def (file, def, 2, flags);
8001 }
8002
8003 tree static_chain = fun->static_chain_decl;
8004 if (static_chain != NULL_TREE)
8005 {
8006 tree def = ssa_default_def (fun, static_chain);
8007 if (def)
8008 dump_default_def (file, def, 2, flags);
8009 }
8010 }
8011
8012 if (!vec_safe_is_empty (fun->local_decls))
8013 FOR_EACH_LOCAL_DECL (fun, ix, var)
8014 {
8015 print_generic_decl (file, var, flags);
8016 fprintf (file, "\n");
8017
8018 any_var = true;
8019 }
8020
8021 tree name;
8022
8023 if (gimple_in_ssa_p (cfun))
8024 FOR_EACH_SSA_NAME (ix, name, cfun)
8025 {
8026 if (!SSA_NAME_VAR (name))
8027 {
8028 fprintf (file, " ");
8029 print_generic_expr (file, TREE_TYPE (name), flags);
8030 fprintf (file, " ");
8031 print_generic_expr (file, name, flags);
8032 fprintf (file, ";\n");
8033
8034 any_var = true;
8035 }
8036 }
8037 }
8038
8039 if (fun && fun->decl == fndecl
8040 && fun->cfg
8041 && basic_block_info_for_fn (fun))
8042 {
8043 /* If the CFG has been built, emit a CFG-based dump. */
8044 if (!ignore_topmost_bind)
8045 fprintf (file, "{\n");
8046
8047 if (any_var && n_basic_blocks_for_fn (fun))
8048 fprintf (file, "\n");
8049
8050 FOR_EACH_BB_FN (bb, fun)
8051 dump_bb (file, bb, 2, flags);
8052
8053 fprintf (file, "}\n");
8054 }
8055 else if (fun->curr_properties & PROP_gimple_any)
8056 {
8057 /* The function is now in GIMPLE form but the CFG has not been
8058 built yet. Emit the single sequence of GIMPLE statements
8059 that make up its body. */
8060 gimple_seq body = gimple_body (fndecl);
8061
8062 if (gimple_seq_first_stmt (body)
8063 && gimple_seq_first_stmt (body) == gimple_seq_last_stmt (body)
8064 && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND)
8065 print_gimple_seq (file, body, 0, flags);
8066 else
8067 {
8068 if (!ignore_topmost_bind)
8069 fprintf (file, "{\n");
8070
8071 if (any_var)
8072 fprintf (file, "\n");
8073
8074 print_gimple_seq (file, body, 2, flags);
8075 fprintf (file, "}\n");
8076 }
8077 }
8078 else
8079 {
8080 int indent;
8081
8082 /* Make a tree based dump. */
8083 chain = DECL_SAVED_TREE (fndecl);
8084 if (chain && TREE_CODE (chain) == BIND_EXPR)
8085 {
8086 if (ignore_topmost_bind)
8087 {
8088 chain = BIND_EXPR_BODY (chain);
8089 indent = 2;
8090 }
8091 else
8092 indent = 0;
8093 }
8094 else
8095 {
8096 if (!ignore_topmost_bind)
8097 {
8098 fprintf (file, "{\n");
8099 /* No topmost bind, pretend it's ignored for later. */
8100 ignore_topmost_bind = true;
8101 }
8102 indent = 2;
8103 }
8104
8105 if (any_var)
8106 fprintf (file, "\n");
8107
8108 print_generic_stmt_indented (file, chain, flags, indent);
8109 if (ignore_topmost_bind)
8110 fprintf (file, "}\n");
8111 }
8112
8113 if (flags & TDF_ENUMERATE_LOCALS)
8114 dump_enumerated_decls (file, flags);
8115 fprintf (file, "\n\n");
8116
8117 current_function_decl = old_current_fndecl;
8118 }
8119
8120 /* Dump FUNCTION_DECL FN to stderr using FLAGS (see TDF_* in tree.h) */
8121
8122 DEBUG_FUNCTION void
debug_function(tree fn,dump_flags_t flags)8123 debug_function (tree fn, dump_flags_t flags)
8124 {
8125 dump_function_to_file (fn, stderr, flags);
8126 }
8127
8128
8129 /* Print on FILE the indexes for the predecessors of basic_block BB. */
8130
8131 static void
print_pred_bbs(FILE * file,basic_block bb)8132 print_pred_bbs (FILE *file, basic_block bb)
8133 {
8134 edge e;
8135 edge_iterator ei;
8136
8137 FOR_EACH_EDGE (e, ei, bb->preds)
8138 fprintf (file, "bb_%d ", e->src->index);
8139 }
8140
8141
8142 /* Print on FILE the indexes for the successors of basic_block BB. */
8143
8144 static void
print_succ_bbs(FILE * file,basic_block bb)8145 print_succ_bbs (FILE *file, basic_block bb)
8146 {
8147 edge e;
8148 edge_iterator ei;
8149
8150 FOR_EACH_EDGE (e, ei, bb->succs)
8151 fprintf (file, "bb_%d ", e->dest->index);
8152 }
8153
8154 /* Print to FILE the basic block BB following the VERBOSITY level. */
8155
8156 void
print_loops_bb(FILE * file,basic_block bb,int indent,int verbosity)8157 print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
8158 {
8159 char *s_indent = (char *) alloca ((size_t) indent + 1);
8160 memset ((void *) s_indent, ' ', (size_t) indent);
8161 s_indent[indent] = '\0';
8162
8163 /* Print basic_block's header. */
8164 if (verbosity >= 2)
8165 {
8166 fprintf (file, "%s bb_%d (preds = {", s_indent, bb->index);
8167 print_pred_bbs (file, bb);
8168 fprintf (file, "}, succs = {");
8169 print_succ_bbs (file, bb);
8170 fprintf (file, "})\n");
8171 }
8172
8173 /* Print basic_block's body. */
8174 if (verbosity >= 3)
8175 {
8176 fprintf (file, "%s {\n", s_indent);
8177 dump_bb (file, bb, indent + 4, TDF_VOPS|TDF_MEMSYMS);
8178 fprintf (file, "%s }\n", s_indent);
8179 }
8180 }
8181
8182 static void print_loop_and_siblings (FILE *, struct loop *, int, int);
8183
8184 /* Pretty print LOOP on FILE, indented INDENT spaces. Following
8185 VERBOSITY level this outputs the contents of the loop, or just its
8186 structure. */
8187
8188 static void
print_loop(FILE * file,struct loop * loop,int indent,int verbosity)8189 print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
8190 {
8191 char *s_indent;
8192 basic_block bb;
8193
8194 if (loop == NULL)
8195 return;
8196
8197 s_indent = (char *) alloca ((size_t) indent + 1);
8198 memset ((void *) s_indent, ' ', (size_t) indent);
8199 s_indent[indent] = '\0';
8200
8201 /* Print loop's header. */
8202 fprintf (file, "%sloop_%d (", s_indent, loop->num);
8203 if (loop->header)
8204 fprintf (file, "header = %d", loop->header->index);
8205 else
8206 {
8207 fprintf (file, "deleted)\n");
8208 return;
8209 }
8210 if (loop->latch)
8211 fprintf (file, ", latch = %d", loop->latch->index);
8212 else
8213 fprintf (file, ", multiple latches");
8214 fprintf (file, ", niter = ");
8215 print_generic_expr (file, loop->nb_iterations);
8216
8217 if (loop->any_upper_bound)
8218 {
8219 fprintf (file, ", upper_bound = ");
8220 print_decu (loop->nb_iterations_upper_bound, file);
8221 }
8222 if (loop->any_likely_upper_bound)
8223 {
8224 fprintf (file, ", likely_upper_bound = ");
8225 print_decu (loop->nb_iterations_likely_upper_bound, file);
8226 }
8227
8228 if (loop->any_estimate)
8229 {
8230 fprintf (file, ", estimate = ");
8231 print_decu (loop->nb_iterations_estimate, file);
8232 }
8233 if (loop->unroll)
8234 fprintf (file, ", unroll = %d", loop->unroll);
8235 fprintf (file, ")\n");
8236
8237 /* Print loop's body. */
8238 if (verbosity >= 1)
8239 {
8240 fprintf (file, "%s{\n", s_indent);
8241 FOR_EACH_BB_FN (bb, cfun)
8242 if (bb->loop_father == loop)
8243 print_loops_bb (file, bb, indent, verbosity);
8244
8245 print_loop_and_siblings (file, loop->inner, indent + 2, verbosity);
8246 fprintf (file, "%s}\n", s_indent);
8247 }
8248 }
8249
8250 /* Print the LOOP and its sibling loops on FILE, indented INDENT
8251 spaces. Following VERBOSITY level this outputs the contents of the
8252 loop, or just its structure. */
8253
8254 static void
print_loop_and_siblings(FILE * file,struct loop * loop,int indent,int verbosity)8255 print_loop_and_siblings (FILE *file, struct loop *loop, int indent,
8256 int verbosity)
8257 {
8258 if (loop == NULL)
8259 return;
8260
8261 print_loop (file, loop, indent, verbosity);
8262 print_loop_and_siblings (file, loop->next, indent, verbosity);
8263 }
8264
8265 /* Follow a CFG edge from the entry point of the program, and on entry
8266 of a loop, pretty print the loop structure on FILE. */
8267
8268 void
print_loops(FILE * file,int verbosity)8269 print_loops (FILE *file, int verbosity)
8270 {
8271 basic_block bb;
8272
8273 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
8274 fprintf (file, "\nLoops in function: %s\n", current_function_name ());
8275 if (bb && bb->loop_father)
8276 print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
8277 }
8278
8279 /* Dump a loop. */
8280
8281 DEBUG_FUNCTION void
debug(struct loop & ref)8282 debug (struct loop &ref)
8283 {
8284 print_loop (stderr, &ref, 0, /*verbosity*/0);
8285 }
8286
8287 DEBUG_FUNCTION void
debug(struct loop * ptr)8288 debug (struct loop *ptr)
8289 {
8290 if (ptr)
8291 debug (*ptr);
8292 else
8293 fprintf (stderr, "<nil>\n");
8294 }
8295
8296 /* Dump a loop verbosely. */
8297
8298 DEBUG_FUNCTION void
debug_verbose(struct loop & ref)8299 debug_verbose (struct loop &ref)
8300 {
8301 print_loop (stderr, &ref, 0, /*verbosity*/3);
8302 }
8303
8304 DEBUG_FUNCTION void
debug_verbose(struct loop * ptr)8305 debug_verbose (struct loop *ptr)
8306 {
8307 if (ptr)
8308 debug (*ptr);
8309 else
8310 fprintf (stderr, "<nil>\n");
8311 }
8312
8313
8314 /* Debugging loops structure at tree level, at some VERBOSITY level. */
8315
8316 DEBUG_FUNCTION void
debug_loops(int verbosity)8317 debug_loops (int verbosity)
8318 {
8319 print_loops (stderr, verbosity);
8320 }
8321
8322 /* Print on stderr the code of LOOP, at some VERBOSITY level. */
8323
8324 DEBUG_FUNCTION void
debug_loop(struct loop * loop,int verbosity)8325 debug_loop (struct loop *loop, int verbosity)
8326 {
8327 print_loop (stderr, loop, 0, verbosity);
8328 }
8329
8330 /* Print on stderr the code of loop number NUM, at some VERBOSITY
8331 level. */
8332
8333 DEBUG_FUNCTION void
debug_loop_num(unsigned num,int verbosity)8334 debug_loop_num (unsigned num, int verbosity)
8335 {
8336 debug_loop (get_loop (cfun, num), verbosity);
8337 }
8338
8339 /* Return true if BB ends with a call, possibly followed by some
8340 instructions that must stay with the call. Return false,
8341 otherwise. */
8342
8343 static bool
gimple_block_ends_with_call_p(basic_block bb)8344 gimple_block_ends_with_call_p (basic_block bb)
8345 {
8346 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
8347 return !gsi_end_p (gsi) && is_gimple_call (gsi_stmt (gsi));
8348 }
8349
8350
8351 /* Return true if BB ends with a conditional branch. Return false,
8352 otherwise. */
8353
8354 static bool
gimple_block_ends_with_condjump_p(const_basic_block bb)8355 gimple_block_ends_with_condjump_p (const_basic_block bb)
8356 {
8357 gimple *stmt = last_stmt (CONST_CAST_BB (bb));
8358 return (stmt && gimple_code (stmt) == GIMPLE_COND);
8359 }
8360
8361
8362 /* Return true if statement T may terminate execution of BB in ways not
8363 explicitly represtented in the CFG. */
8364
8365 bool
stmt_can_terminate_bb_p(gimple * t)8366 stmt_can_terminate_bb_p (gimple *t)
8367 {
8368 tree fndecl = NULL_TREE;
8369 int call_flags = 0;
8370
8371 /* Eh exception not handled internally terminates execution of the whole
8372 function. */
8373 if (stmt_can_throw_external (cfun, t))
8374 return true;
8375
8376 /* NORETURN and LONGJMP calls already have an edge to exit.
8377 CONST and PURE calls do not need one.
8378 We don't currently check for CONST and PURE here, although
8379 it would be a good idea, because those attributes are
8380 figured out from the RTL in mark_constant_function, and
8381 the counter incrementation code from -fprofile-arcs
8382 leads to different results from -fbranch-probabilities. */
8383 if (is_gimple_call (t))
8384 {
8385 fndecl = gimple_call_fndecl (t);
8386 call_flags = gimple_call_flags (t);
8387 }
8388
8389 if (is_gimple_call (t)
8390 && fndecl
8391 && fndecl_built_in_p (fndecl)
8392 && (call_flags & ECF_NOTHROW)
8393 && !(call_flags & ECF_RETURNS_TWICE)
8394 /* fork() doesn't really return twice, but the effect of
8395 wrapping it in __gcov_fork() which calls __gcov_flush()
8396 and clears the counters before forking has the same
8397 effect as returning twice. Force a fake edge. */
8398 && !fndecl_built_in_p (fndecl, BUILT_IN_FORK))
8399 return false;
8400
8401 if (is_gimple_call (t))
8402 {
8403 edge_iterator ei;
8404 edge e;
8405 basic_block bb;
8406
8407 if (call_flags & (ECF_PURE | ECF_CONST)
8408 && !(call_flags & ECF_LOOPING_CONST_OR_PURE))
8409 return false;
8410
8411 /* Function call may do longjmp, terminate program or do other things.
8412 Special case noreturn that have non-abnormal edges out as in this case
8413 the fact is sufficiently represented by lack of edges out of T. */
8414 if (!(call_flags & ECF_NORETURN))
8415 return true;
8416
8417 bb = gimple_bb (t);
8418 FOR_EACH_EDGE (e, ei, bb->succs)
8419 if ((e->flags & EDGE_FAKE) == 0)
8420 return true;
8421 }
8422
8423 if (gasm *asm_stmt = dyn_cast <gasm *> (t))
8424 if (gimple_asm_volatile_p (asm_stmt) || gimple_asm_input_p (asm_stmt))
8425 return true;
8426
8427 return false;
8428 }
8429
8430
8431 /* Add fake edges to the function exit for any non constant and non
8432 noreturn calls (or noreturn calls with EH/abnormal edges),
8433 volatile inline assembly in the bitmap of blocks specified by BLOCKS
8434 or to the whole CFG if BLOCKS is zero. Return the number of blocks
8435 that were split.
8436
8437 The goal is to expose cases in which entering a basic block does
8438 not imply that all subsequent instructions must be executed. */
8439
8440 static int
gimple_flow_call_edges_add(sbitmap blocks)8441 gimple_flow_call_edges_add (sbitmap blocks)
8442 {
8443 int i;
8444 int blocks_split = 0;
8445 int last_bb = last_basic_block_for_fn (cfun);
8446 bool check_last_block = false;
8447
8448 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
8449 return 0;
8450
8451 if (! blocks)
8452 check_last_block = true;
8453 else
8454 check_last_block = bitmap_bit_p (blocks,
8455 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
8456
8457 /* In the last basic block, before epilogue generation, there will be
8458 a fallthru edge to EXIT. Special care is required if the last insn
8459 of the last basic block is a call because make_edge folds duplicate
8460 edges, which would result in the fallthru edge also being marked
8461 fake, which would result in the fallthru edge being removed by
8462 remove_fake_edges, which would result in an invalid CFG.
8463
8464 Moreover, we can't elide the outgoing fake edge, since the block
8465 profiler needs to take this into account in order to solve the minimal
8466 spanning tree in the case that the call doesn't return.
8467
8468 Handle this by adding a dummy instruction in a new last basic block. */
8469 if (check_last_block)
8470 {
8471 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8472 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
8473 gimple *t = NULL;
8474
8475 if (!gsi_end_p (gsi))
8476 t = gsi_stmt (gsi);
8477
8478 if (t && stmt_can_terminate_bb_p (t))
8479 {
8480 edge e;
8481
8482 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
8483 if (e)
8484 {
8485 gsi_insert_on_edge (e, gimple_build_nop ());
8486 gsi_commit_edge_inserts ();
8487 }
8488 }
8489 }
8490
8491 /* Now add fake edges to the function exit for any non constant
8492 calls since there is no way that we can determine if they will
8493 return or not... */
8494 for (i = 0; i < last_bb; i++)
8495 {
8496 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8497 gimple_stmt_iterator gsi;
8498 gimple *stmt, *last_stmt;
8499
8500 if (!bb)
8501 continue;
8502
8503 if (blocks && !bitmap_bit_p (blocks, i))
8504 continue;
8505
8506 gsi = gsi_last_nondebug_bb (bb);
8507 if (!gsi_end_p (gsi))
8508 {
8509 last_stmt = gsi_stmt (gsi);
8510 do
8511 {
8512 stmt = gsi_stmt (gsi);
8513 if (stmt_can_terminate_bb_p (stmt))
8514 {
8515 edge e;
8516
8517 /* The handling above of the final block before the
8518 epilogue should be enough to verify that there is
8519 no edge to the exit block in CFG already.
8520 Calling make_edge in such case would cause us to
8521 mark that edge as fake and remove it later. */
8522 if (flag_checking && stmt == last_stmt)
8523 {
8524 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
8525 gcc_assert (e == NULL);
8526 }
8527
8528 /* Note that the following may create a new basic block
8529 and renumber the existing basic blocks. */
8530 if (stmt != last_stmt)
8531 {
8532 e = split_block (bb, stmt);
8533 if (e)
8534 blocks_split++;
8535 }
8536 e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
8537 e->probability = profile_probability::guessed_never ();
8538 }
8539 gsi_prev (&gsi);
8540 }
8541 while (!gsi_end_p (gsi));
8542 }
8543 }
8544
8545 if (blocks_split)
8546 checking_verify_flow_info ();
8547
8548 return blocks_split;
8549 }
8550
8551 /* Removes edge E and all the blocks dominated by it, and updates dominance
8552 information. The IL in E->src needs to be updated separately.
8553 If dominance info is not available, only the edge E is removed.*/
8554
8555 void
remove_edge_and_dominated_blocks(edge e)8556 remove_edge_and_dominated_blocks (edge e)
8557 {
8558 vec<basic_block> bbs_to_remove = vNULL;
8559 vec<basic_block> bbs_to_fix_dom = vNULL;
8560 edge f;
8561 edge_iterator ei;
8562 bool none_removed = false;
8563 unsigned i;
8564 basic_block bb, dbb;
8565 bitmap_iterator bi;
8566
8567 /* If we are removing a path inside a non-root loop that may change
8568 loop ownership of blocks or remove loops. Mark loops for fixup. */
8569 if (current_loops
8570 && loop_outer (e->src->loop_father) != NULL
8571 && e->src->loop_father == e->dest->loop_father)
8572 loops_state_set (LOOPS_NEED_FIXUP);
8573
8574 if (!dom_info_available_p (CDI_DOMINATORS))
8575 {
8576 remove_edge (e);
8577 return;
8578 }
8579
8580 /* No updating is needed for edges to exit. */
8581 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
8582 {
8583 if (cfgcleanup_altered_bbs)
8584 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
8585 remove_edge (e);
8586 return;
8587 }
8588
8589 /* First, we find the basic blocks to remove. If E->dest has a predecessor
8590 that is not dominated by E->dest, then this set is empty. Otherwise,
8591 all the basic blocks dominated by E->dest are removed.
8592
8593 Also, to DF_IDOM we store the immediate dominators of the blocks in
8594 the dominance frontier of E (i.e., of the successors of the
8595 removed blocks, if there are any, and of E->dest otherwise). */
8596 FOR_EACH_EDGE (f, ei, e->dest->preds)
8597 {
8598 if (f == e)
8599 continue;
8600
8601 if (!dominated_by_p (CDI_DOMINATORS, f->src, e->dest))
8602 {
8603 none_removed = true;
8604 break;
8605 }
8606 }
8607
8608 auto_bitmap df, df_idom;
8609 if (none_removed)
8610 bitmap_set_bit (df_idom,
8611 get_immediate_dominator (CDI_DOMINATORS, e->dest)->index);
8612 else
8613 {
8614 bbs_to_remove = get_all_dominated_blocks (CDI_DOMINATORS, e->dest);
8615 FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
8616 {
8617 FOR_EACH_EDGE (f, ei, bb->succs)
8618 {
8619 if (f->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
8620 bitmap_set_bit (df, f->dest->index);
8621 }
8622 }
8623 FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
8624 bitmap_clear_bit (df, bb->index);
8625
8626 EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
8627 {
8628 bb = BASIC_BLOCK_FOR_FN (cfun, i);
8629 bitmap_set_bit (df_idom,
8630 get_immediate_dominator (CDI_DOMINATORS, bb)->index);
8631 }
8632 }
8633
8634 if (cfgcleanup_altered_bbs)
8635 {
8636 /* Record the set of the altered basic blocks. */
8637 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
8638 bitmap_ior_into (cfgcleanup_altered_bbs, df);
8639 }
8640
8641 /* Remove E and the cancelled blocks. */
8642 if (none_removed)
8643 remove_edge (e);
8644 else
8645 {
8646 /* Walk backwards so as to get a chance to substitute all
8647 released DEFs into debug stmts. See
8648 eliminate_unnecessary_stmts() in tree-ssa-dce.c for more
8649 details. */
8650 for (i = bbs_to_remove.length (); i-- > 0; )
8651 delete_basic_block (bbs_to_remove[i]);
8652 }
8653
8654 /* Update the dominance information. The immediate dominator may change only
8655 for blocks whose immediate dominator belongs to DF_IDOM:
8656
8657 Suppose that idom(X) = Y before removal of E and idom(X) != Y after the
8658 removal. Let Z the arbitrary block such that idom(Z) = Y and
8659 Z dominates X after the removal. Before removal, there exists a path P
8660 from Y to X that avoids Z. Let F be the last edge on P that is
8661 removed, and let W = F->dest. Before removal, idom(W) = Y (since Y
8662 dominates W, and because of P, Z does not dominate W), and W belongs to
8663 the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
8664 EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
8665 {
8666 bb = BASIC_BLOCK_FOR_FN (cfun, i);
8667 for (dbb = first_dom_son (CDI_DOMINATORS, bb);
8668 dbb;
8669 dbb = next_dom_son (CDI_DOMINATORS, dbb))
8670 bbs_to_fix_dom.safe_push (dbb);
8671 }
8672
8673 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
8674
8675 bbs_to_remove.release ();
8676 bbs_to_fix_dom.release ();
8677 }
8678
8679 /* Purge dead EH edges from basic block BB. */
8680
8681 bool
gimple_purge_dead_eh_edges(basic_block bb)8682 gimple_purge_dead_eh_edges (basic_block bb)
8683 {
8684 bool changed = false;
8685 edge e;
8686 edge_iterator ei;
8687 gimple *stmt = last_stmt (bb);
8688
8689 if (stmt && stmt_can_throw_internal (cfun, stmt))
8690 return false;
8691
8692 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
8693 {
8694 if (e->flags & EDGE_EH)
8695 {
8696 remove_edge_and_dominated_blocks (e);
8697 changed = true;
8698 }
8699 else
8700 ei_next (&ei);
8701 }
8702
8703 return changed;
8704 }
8705
8706 /* Purge dead EH edges from basic block listed in BLOCKS. */
8707
8708 bool
gimple_purge_all_dead_eh_edges(const_bitmap blocks)8709 gimple_purge_all_dead_eh_edges (const_bitmap blocks)
8710 {
8711 bool changed = false;
8712 unsigned i;
8713 bitmap_iterator bi;
8714
8715 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
8716 {
8717 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8718
8719 /* Earlier gimple_purge_dead_eh_edges could have removed
8720 this basic block already. */
8721 gcc_assert (bb || changed);
8722 if (bb != NULL)
8723 changed |= gimple_purge_dead_eh_edges (bb);
8724 }
8725
8726 return changed;
8727 }
8728
8729 /* Purge dead abnormal call edges from basic block BB. */
8730
8731 bool
gimple_purge_dead_abnormal_call_edges(basic_block bb)8732 gimple_purge_dead_abnormal_call_edges (basic_block bb)
8733 {
8734 bool changed = false;
8735 edge e;
8736 edge_iterator ei;
8737 gimple *stmt = last_stmt (bb);
8738
8739 if (!cfun->has_nonlocal_label
8740 && !cfun->calls_setjmp)
8741 return false;
8742
8743 if (stmt && stmt_can_make_abnormal_goto (stmt))
8744 return false;
8745
8746 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
8747 {
8748 if (e->flags & EDGE_ABNORMAL)
8749 {
8750 if (e->flags & EDGE_FALLTHRU)
8751 e->flags &= ~EDGE_ABNORMAL;
8752 else
8753 remove_edge_and_dominated_blocks (e);
8754 changed = true;
8755 }
8756 else
8757 ei_next (&ei);
8758 }
8759
8760 return changed;
8761 }
8762
8763 /* Purge dead abnormal call edges from basic block listed in BLOCKS. */
8764
8765 bool
gimple_purge_all_dead_abnormal_call_edges(const_bitmap blocks)8766 gimple_purge_all_dead_abnormal_call_edges (const_bitmap blocks)
8767 {
8768 bool changed = false;
8769 unsigned i;
8770 bitmap_iterator bi;
8771
8772 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
8773 {
8774 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8775
8776 /* Earlier gimple_purge_dead_abnormal_call_edges could have removed
8777 this basic block already. */
8778 gcc_assert (bb || changed);
8779 if (bb != NULL)
8780 changed |= gimple_purge_dead_abnormal_call_edges (bb);
8781 }
8782
8783 return changed;
8784 }
8785
8786 /* This function is called whenever a new edge is created or
8787 redirected. */
8788
8789 static void
gimple_execute_on_growing_pred(edge e)8790 gimple_execute_on_growing_pred (edge e)
8791 {
8792 basic_block bb = e->dest;
8793
8794 if (!gimple_seq_empty_p (phi_nodes (bb)))
8795 reserve_phi_args_for_new_edge (bb);
8796 }
8797
8798 /* This function is called immediately before edge E is removed from
8799 the edge vector E->dest->preds. */
8800
8801 static void
gimple_execute_on_shrinking_pred(edge e)8802 gimple_execute_on_shrinking_pred (edge e)
8803 {
8804 if (!gimple_seq_empty_p (phi_nodes (e->dest)))
8805 remove_phi_args (e);
8806 }
8807
8808 /*---------------------------------------------------------------------------
8809 Helper functions for Loop versioning
8810 ---------------------------------------------------------------------------*/
8811
8812 /* Adjust phi nodes for 'first' basic block. 'second' basic block is a copy
8813 of 'first'. Both of them are dominated by 'new_head' basic block. When
8814 'new_head' was created by 'second's incoming edge it received phi arguments
8815 on the edge by split_edge(). Later, additional edge 'e' was created to
8816 connect 'new_head' and 'first'. Now this routine adds phi args on this
8817 additional edge 'e' that new_head to second edge received as part of edge
8818 splitting. */
8819
8820 static void
gimple_lv_adjust_loop_header_phi(basic_block first,basic_block second,basic_block new_head,edge e)8821 gimple_lv_adjust_loop_header_phi (basic_block first, basic_block second,
8822 basic_block new_head, edge e)
8823 {
8824 gphi *phi1, *phi2;
8825 gphi_iterator psi1, psi2;
8826 tree def;
8827 edge e2 = find_edge (new_head, second);
8828
8829 /* Because NEW_HEAD has been created by splitting SECOND's incoming
8830 edge, we should always have an edge from NEW_HEAD to SECOND. */
8831 gcc_assert (e2 != NULL);
8832
8833 /* Browse all 'second' basic block phi nodes and add phi args to
8834 edge 'e' for 'first' head. PHI args are always in correct order. */
8835
8836 for (psi2 = gsi_start_phis (second),
8837 psi1 = gsi_start_phis (first);
8838 !gsi_end_p (psi2) && !gsi_end_p (psi1);
8839 gsi_next (&psi2), gsi_next (&psi1))
8840 {
8841 phi1 = psi1.phi ();
8842 phi2 = psi2.phi ();
8843 def = PHI_ARG_DEF (phi2, e2->dest_idx);
8844 add_phi_arg (phi1, def, e, gimple_phi_arg_location_from_edge (phi2, e2));
8845 }
8846 }
8847
8848
8849 /* Adds a if else statement to COND_BB with condition COND_EXPR.
8850 SECOND_HEAD is the destination of the THEN and FIRST_HEAD is
8851 the destination of the ELSE part. */
8852
8853 static void
gimple_lv_add_condition_to_bb(basic_block first_head ATTRIBUTE_UNUSED,basic_block second_head ATTRIBUTE_UNUSED,basic_block cond_bb,void * cond_e)8854 gimple_lv_add_condition_to_bb (basic_block first_head ATTRIBUTE_UNUSED,
8855 basic_block second_head ATTRIBUTE_UNUSED,
8856 basic_block cond_bb, void *cond_e)
8857 {
8858 gimple_stmt_iterator gsi;
8859 gimple *new_cond_expr;
8860 tree cond_expr = (tree) cond_e;
8861 edge e0;
8862
8863 /* Build new conditional expr */
8864 new_cond_expr = gimple_build_cond_from_tree (cond_expr,
8865 NULL_TREE, NULL_TREE);
8866
8867 /* Add new cond in cond_bb. */
8868 gsi = gsi_last_bb (cond_bb);
8869 gsi_insert_after (&gsi, new_cond_expr, GSI_NEW_STMT);
8870
8871 /* Adjust edges appropriately to connect new head with first head
8872 as well as second head. */
8873 e0 = single_succ_edge (cond_bb);
8874 e0->flags &= ~EDGE_FALLTHRU;
8875 e0->flags |= EDGE_FALSE_VALUE;
8876 }
8877
8878
8879 /* Do book-keeping of basic block BB for the profile consistency checker.
8880 Store the counting in RECORD. */
8881 static void
gimple_account_profile_record(basic_block bb,struct profile_record * record)8882 gimple_account_profile_record (basic_block bb,
8883 struct profile_record *record)
8884 {
8885 gimple_stmt_iterator i;
8886 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
8887 {
8888 record->size
8889 += estimate_num_insns (gsi_stmt (i), &eni_size_weights);
8890 if (bb->count.initialized_p ())
8891 record->time
8892 += estimate_num_insns (gsi_stmt (i),
8893 &eni_time_weights) * bb->count.to_gcov_type ();
8894 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
8895 record->time
8896 += estimate_num_insns (gsi_stmt (i),
8897 &eni_time_weights) * bb->count.to_frequency (cfun);
8898 }
8899 }
8900
8901 struct cfg_hooks gimple_cfg_hooks = {
8902 "gimple",
8903 gimple_verify_flow_info,
8904 gimple_dump_bb, /* dump_bb */
8905 gimple_dump_bb_for_graph, /* dump_bb_for_graph */
8906 create_bb, /* create_basic_block */
8907 gimple_redirect_edge_and_branch, /* redirect_edge_and_branch */
8908 gimple_redirect_edge_and_branch_force, /* redirect_edge_and_branch_force */
8909 gimple_can_remove_branch_p, /* can_remove_branch_p */
8910 remove_bb, /* delete_basic_block */
8911 gimple_split_block, /* split_block */
8912 gimple_move_block_after, /* move_block_after */
8913 gimple_can_merge_blocks_p, /* can_merge_blocks_p */
8914 gimple_merge_blocks, /* merge_blocks */
8915 gimple_predict_edge, /* predict_edge */
8916 gimple_predicted_by_p, /* predicted_by_p */
8917 gimple_can_duplicate_bb_p, /* can_duplicate_block_p */
8918 gimple_duplicate_bb, /* duplicate_block */
8919 gimple_split_edge, /* split_edge */
8920 gimple_make_forwarder_block, /* make_forward_block */
8921 NULL, /* tidy_fallthru_edge */
8922 NULL, /* force_nonfallthru */
8923 gimple_block_ends_with_call_p,/* block_ends_with_call_p */
8924 gimple_block_ends_with_condjump_p, /* block_ends_with_condjump_p */
8925 gimple_flow_call_edges_add, /* flow_call_edges_add */
8926 gimple_execute_on_growing_pred, /* execute_on_growing_pred */
8927 gimple_execute_on_shrinking_pred, /* execute_on_shrinking_pred */
8928 gimple_duplicate_loop_to_header_edge, /* duplicate loop for trees */
8929 gimple_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
8930 gimple_lv_adjust_loop_header_phi, /* lv_adjust_loop_header_phi*/
8931 extract_true_false_edges_from_block, /* extract_cond_bb_edges */
8932 flush_pending_stmts, /* flush_pending_stmts */
8933 gimple_empty_block_p, /* block_empty_p */
8934 gimple_split_block_before_cond_jump, /* split_block_before_cond_jump */
8935 gimple_account_profile_record,
8936 };
8937
8938
8939 /* Split all critical edges. */
8940
8941 unsigned int
split_critical_edges(void)8942 split_critical_edges (void)
8943 {
8944 basic_block bb;
8945 edge e;
8946 edge_iterator ei;
8947
8948 /* split_edge can redirect edges out of SWITCH_EXPRs, which can get
8949 expensive. So we want to enable recording of edge to CASE_LABEL_EXPR
8950 mappings around the calls to split_edge. */
8951 start_recording_case_labels ();
8952 FOR_ALL_BB_FN (bb, cfun)
8953 {
8954 FOR_EACH_EDGE (e, ei, bb->succs)
8955 {
8956 if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL))
8957 split_edge (e);
8958 /* PRE inserts statements to edges and expects that
8959 since split_critical_edges was done beforehand, committing edge
8960 insertions will not split more edges. In addition to critical
8961 edges we must split edges that have multiple successors and
8962 end by control flow statements, such as RESX.
8963 Go ahead and split them too. This matches the logic in
8964 gimple_find_edge_insert_loc. */
8965 else if ((!single_pred_p (e->dest)
8966 || !gimple_seq_empty_p (phi_nodes (e->dest))
8967 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
8968 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
8969 && !(e->flags & EDGE_ABNORMAL))
8970 {
8971 gimple_stmt_iterator gsi;
8972
8973 gsi = gsi_last_bb (e->src);
8974 if (!gsi_end_p (gsi)
8975 && stmt_ends_bb_p (gsi_stmt (gsi))
8976 && (gimple_code (gsi_stmt (gsi)) != GIMPLE_RETURN
8977 && !gimple_call_builtin_p (gsi_stmt (gsi),
8978 BUILT_IN_RETURN)))
8979 split_edge (e);
8980 }
8981 }
8982 }
8983 end_recording_case_labels ();
8984 return 0;
8985 }
8986
8987 namespace {
8988
8989 const pass_data pass_data_split_crit_edges =
8990 {
8991 GIMPLE_PASS, /* type */
8992 "crited", /* name */
8993 OPTGROUP_NONE, /* optinfo_flags */
8994 TV_TREE_SPLIT_EDGES, /* tv_id */
8995 PROP_cfg, /* properties_required */
8996 PROP_no_crit_edges, /* properties_provided */
8997 0, /* properties_destroyed */
8998 0, /* todo_flags_start */
8999 0, /* todo_flags_finish */
9000 };
9001
9002 class pass_split_crit_edges : public gimple_opt_pass
9003 {
9004 public:
pass_split_crit_edges(gcc::context * ctxt)9005 pass_split_crit_edges (gcc::context *ctxt)
9006 : gimple_opt_pass (pass_data_split_crit_edges, ctxt)
9007 {}
9008
9009 /* opt_pass methods: */
execute(function *)9010 virtual unsigned int execute (function *) { return split_critical_edges (); }
9011
clone()9012 opt_pass * clone () { return new pass_split_crit_edges (m_ctxt); }
9013 }; // class pass_split_crit_edges
9014
9015 } // anon namespace
9016
9017 gimple_opt_pass *
make_pass_split_crit_edges(gcc::context * ctxt)9018 make_pass_split_crit_edges (gcc::context *ctxt)
9019 {
9020 return new pass_split_crit_edges (ctxt);
9021 }
9022
9023
9024 /* Insert COND expression which is GIMPLE_COND after STMT
9025 in basic block BB with appropriate basic block split
9026 and creation of a new conditionally executed basic block.
9027 Update profile so the new bb is visited with probability PROB.
9028 Return created basic block. */
9029 basic_block
insert_cond_bb(basic_block bb,gimple * stmt,gimple * cond,profile_probability prob)9030 insert_cond_bb (basic_block bb, gimple *stmt, gimple *cond,
9031 profile_probability prob)
9032 {
9033 edge fall = split_block (bb, stmt);
9034 gimple_stmt_iterator iter = gsi_last_bb (bb);
9035 basic_block new_bb;
9036
9037 /* Insert cond statement. */
9038 gcc_assert (gimple_code (cond) == GIMPLE_COND);
9039 if (gsi_end_p (iter))
9040 gsi_insert_before (&iter, cond, GSI_CONTINUE_LINKING);
9041 else
9042 gsi_insert_after (&iter, cond, GSI_CONTINUE_LINKING);
9043
9044 /* Create conditionally executed block. */
9045 new_bb = create_empty_bb (bb);
9046 edge e = make_edge (bb, new_bb, EDGE_TRUE_VALUE);
9047 e->probability = prob;
9048 new_bb->count = e->count ();
9049 make_single_succ_edge (new_bb, fall->dest, EDGE_FALLTHRU);
9050
9051 /* Fix edge for split bb. */
9052 fall->flags = EDGE_FALSE_VALUE;
9053 fall->probability -= e->probability;
9054
9055 /* Update dominance info. */
9056 if (dom_info_available_p (CDI_DOMINATORS))
9057 {
9058 set_immediate_dominator (CDI_DOMINATORS, new_bb, bb);
9059 set_immediate_dominator (CDI_DOMINATORS, fall->dest, bb);
9060 }
9061
9062 /* Update loop info. */
9063 if (current_loops)
9064 add_bb_to_loop (new_bb, bb->loop_father);
9065
9066 return new_bb;
9067 }
9068
9069 /* Build a ternary operation and gimplify it. Emit code before GSI.
9070 Return the gimple_val holding the result. */
9071
9072 tree
gimplify_build3(gimple_stmt_iterator * gsi,enum tree_code code,tree type,tree a,tree b,tree c)9073 gimplify_build3 (gimple_stmt_iterator *gsi, enum tree_code code,
9074 tree type, tree a, tree b, tree c)
9075 {
9076 tree ret;
9077 location_t loc = gimple_location (gsi_stmt (*gsi));
9078
9079 ret = fold_build3_loc (loc, code, type, a, b, c);
9080 return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
9081 GSI_SAME_STMT);
9082 }
9083
9084 /* Build a binary operation and gimplify it. Emit code before GSI.
9085 Return the gimple_val holding the result. */
9086
9087 tree
gimplify_build2(gimple_stmt_iterator * gsi,enum tree_code code,tree type,tree a,tree b)9088 gimplify_build2 (gimple_stmt_iterator *gsi, enum tree_code code,
9089 tree type, tree a, tree b)
9090 {
9091 tree ret;
9092
9093 ret = fold_build2_loc (gimple_location (gsi_stmt (*gsi)), code, type, a, b);
9094 return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
9095 GSI_SAME_STMT);
9096 }
9097
9098 /* Build a unary operation and gimplify it. Emit code before GSI.
9099 Return the gimple_val holding the result. */
9100
9101 tree
gimplify_build1(gimple_stmt_iterator * gsi,enum tree_code code,tree type,tree a)9102 gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type,
9103 tree a)
9104 {
9105 tree ret;
9106
9107 ret = fold_build1_loc (gimple_location (gsi_stmt (*gsi)), code, type, a);
9108 return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
9109 GSI_SAME_STMT);
9110 }
9111
9112
9113
9114 /* Given a basic block B which ends with a conditional and has
9115 precisely two successors, determine which of the edges is taken if
9116 the conditional is true and which is taken if the conditional is
9117 false. Set TRUE_EDGE and FALSE_EDGE appropriately. */
9118
9119 void
extract_true_false_edges_from_block(basic_block b,edge * true_edge,edge * false_edge)9120 extract_true_false_edges_from_block (basic_block b,
9121 edge *true_edge,
9122 edge *false_edge)
9123 {
9124 edge e = EDGE_SUCC (b, 0);
9125
9126 if (e->flags & EDGE_TRUE_VALUE)
9127 {
9128 *true_edge = e;
9129 *false_edge = EDGE_SUCC (b, 1);
9130 }
9131 else
9132 {
9133 *false_edge = e;
9134 *true_edge = EDGE_SUCC (b, 1);
9135 }
9136 }
9137
9138
9139 /* From a controlling predicate in the immediate dominator DOM of
9140 PHIBLOCK determine the edges into PHIBLOCK that are chosen if the
9141 predicate evaluates to true and false and store them to
9142 *TRUE_CONTROLLED_EDGE and *FALSE_CONTROLLED_EDGE if
9143 they are non-NULL. Returns true if the edges can be determined,
9144 else return false. */
9145
9146 bool
extract_true_false_controlled_edges(basic_block dom,basic_block phiblock,edge * true_controlled_edge,edge * false_controlled_edge)9147 extract_true_false_controlled_edges (basic_block dom, basic_block phiblock,
9148 edge *true_controlled_edge,
9149 edge *false_controlled_edge)
9150 {
9151 basic_block bb = phiblock;
9152 edge true_edge, false_edge, tem;
9153 edge e0 = NULL, e1 = NULL;
9154
9155 /* We have to verify that one edge into the PHI node is dominated
9156 by the true edge of the predicate block and the other edge
9157 dominated by the false edge. This ensures that the PHI argument
9158 we are going to take is completely determined by the path we
9159 take from the predicate block.
9160 We can only use BB dominance checks below if the destination of
9161 the true/false edges are dominated by their edge, thus only
9162 have a single predecessor. */
9163 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
9164 tem = EDGE_PRED (bb, 0);
9165 if (tem == true_edge
9166 || (single_pred_p (true_edge->dest)
9167 && (tem->src == true_edge->dest
9168 || dominated_by_p (CDI_DOMINATORS,
9169 tem->src, true_edge->dest))))
9170 e0 = tem;
9171 else if (tem == false_edge
9172 || (single_pred_p (false_edge->dest)
9173 && (tem->src == false_edge->dest
9174 || dominated_by_p (CDI_DOMINATORS,
9175 tem->src, false_edge->dest))))
9176 e1 = tem;
9177 else
9178 return false;
9179 tem = EDGE_PRED (bb, 1);
9180 if (tem == true_edge
9181 || (single_pred_p (true_edge->dest)
9182 && (tem->src == true_edge->dest
9183 || dominated_by_p (CDI_DOMINATORS,
9184 tem->src, true_edge->dest))))
9185 e0 = tem;
9186 else if (tem == false_edge
9187 || (single_pred_p (false_edge->dest)
9188 && (tem->src == false_edge->dest
9189 || dominated_by_p (CDI_DOMINATORS,
9190 tem->src, false_edge->dest))))
9191 e1 = tem;
9192 else
9193 return false;
9194 if (!e0 || !e1)
9195 return false;
9196
9197 if (true_controlled_edge)
9198 *true_controlled_edge = e0;
9199 if (false_controlled_edge)
9200 *false_controlled_edge = e1;
9201
9202 return true;
9203 }
9204
9205 /* Generate a range test LHS CODE RHS that determines whether INDEX is in the
9206 range [low, high]. Place associated stmts before *GSI. */
9207
9208 void
generate_range_test(basic_block bb,tree index,tree low,tree high,tree * lhs,tree * rhs)9209 generate_range_test (basic_block bb, tree index, tree low, tree high,
9210 tree *lhs, tree *rhs)
9211 {
9212 tree type = TREE_TYPE (index);
9213 tree utype = range_check_type (type);
9214
9215 low = fold_convert (utype, low);
9216 high = fold_convert (utype, high);
9217
9218 gimple_seq seq = NULL;
9219 index = gimple_convert (&seq, utype, index);
9220 *lhs = gimple_build (&seq, MINUS_EXPR, utype, index, low);
9221 *rhs = const_binop (MINUS_EXPR, utype, high, low);
9222
9223 gimple_stmt_iterator gsi = gsi_last_bb (bb);
9224 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
9225 }
9226
9227 /* Return the basic block that belongs to label numbered INDEX
9228 of a switch statement. */
9229
9230 basic_block
gimple_switch_label_bb(function * ifun,gswitch * gs,unsigned index)9231 gimple_switch_label_bb (function *ifun, gswitch *gs, unsigned index)
9232 {
9233 return label_to_block (ifun, CASE_LABEL (gimple_switch_label (gs, index)));
9234 }
9235
9236 /* Return the default basic block of a switch statement. */
9237
9238 basic_block
gimple_switch_default_bb(function * ifun,gswitch * gs)9239 gimple_switch_default_bb (function *ifun, gswitch *gs)
9240 {
9241 return gimple_switch_label_bb (ifun, gs, 0);
9242 }
9243
9244 /* Return the edge that belongs to label numbered INDEX
9245 of a switch statement. */
9246
9247 edge
gimple_switch_edge(function * ifun,gswitch * gs,unsigned index)9248 gimple_switch_edge (function *ifun, gswitch *gs, unsigned index)
9249 {
9250 return find_edge (gimple_bb (gs), gimple_switch_label_bb (ifun, gs, index));
9251 }
9252
9253 /* Return the default edge of a switch statement. */
9254
9255 edge
gimple_switch_default_edge(function * ifun,gswitch * gs)9256 gimple_switch_default_edge (function *ifun, gswitch *gs)
9257 {
9258 return gimple_switch_edge (ifun, gs, 0);
9259 }
9260
9261
9262 /* Emit return warnings. */
9263
9264 namespace {
9265
9266 const pass_data pass_data_warn_function_return =
9267 {
9268 GIMPLE_PASS, /* type */
9269 "*warn_function_return", /* name */
9270 OPTGROUP_NONE, /* optinfo_flags */
9271 TV_NONE, /* tv_id */
9272 PROP_cfg, /* properties_required */
9273 0, /* properties_provided */
9274 0, /* properties_destroyed */
9275 0, /* todo_flags_start */
9276 0, /* todo_flags_finish */
9277 };
9278
9279 class pass_warn_function_return : public gimple_opt_pass
9280 {
9281 public:
pass_warn_function_return(gcc::context * ctxt)9282 pass_warn_function_return (gcc::context *ctxt)
9283 : gimple_opt_pass (pass_data_warn_function_return, ctxt)
9284 {}
9285
9286 /* opt_pass methods: */
9287 virtual unsigned int execute (function *);
9288
9289 }; // class pass_warn_function_return
9290
9291 unsigned int
execute(function * fun)9292 pass_warn_function_return::execute (function *fun)
9293 {
9294 location_t location;
9295 gimple *last;
9296 edge e;
9297 edge_iterator ei;
9298
9299 if (!targetm.warn_func_return (fun->decl))
9300 return 0;
9301
9302 /* If we have a path to EXIT, then we do return. */
9303 if (TREE_THIS_VOLATILE (fun->decl)
9304 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) > 0)
9305 {
9306 location = UNKNOWN_LOCATION;
9307 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (fun)->preds);
9308 (e = ei_safe_edge (ei)); )
9309 {
9310 last = last_stmt (e->src);
9311 if ((gimple_code (last) == GIMPLE_RETURN
9312 || gimple_call_builtin_p (last, BUILT_IN_RETURN))
9313 && location == UNKNOWN_LOCATION
9314 && ((location = LOCATION_LOCUS (gimple_location (last)))
9315 != UNKNOWN_LOCATION)
9316 && !optimize)
9317 break;
9318 /* When optimizing, replace return stmts in noreturn functions
9319 with __builtin_unreachable () call. */
9320 if (optimize && gimple_code (last) == GIMPLE_RETURN)
9321 {
9322 tree fndecl = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
9323 gimple *new_stmt = gimple_build_call (fndecl, 0);
9324 gimple_set_location (new_stmt, gimple_location (last));
9325 gimple_stmt_iterator gsi = gsi_for_stmt (last);
9326 gsi_replace (&gsi, new_stmt, true);
9327 remove_edge (e);
9328 }
9329 else
9330 ei_next (&ei);
9331 }
9332 if (location == UNKNOWN_LOCATION)
9333 location = cfun->function_end_locus;
9334 warning_at (location, 0, "%<noreturn%> function does return");
9335 }
9336
9337 /* If we see "return;" in some basic block, then we do reach the end
9338 without returning a value. */
9339 else if (warn_return_type > 0
9340 && !TREE_NO_WARNING (fun->decl)
9341 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fun->decl))))
9342 {
9343 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (fun)->preds)
9344 {
9345 gimple *last = last_stmt (e->src);
9346 greturn *return_stmt = dyn_cast <greturn *> (last);
9347 if (return_stmt
9348 && gimple_return_retval (return_stmt) == NULL
9349 && !gimple_no_warning_p (last))
9350 {
9351 location = gimple_location (last);
9352 if (LOCATION_LOCUS (location) == UNKNOWN_LOCATION)
9353 location = fun->function_end_locus;
9354 if (warning_at (location, OPT_Wreturn_type,
9355 "control reaches end of non-void function"))
9356 TREE_NO_WARNING (fun->decl) = 1;
9357 break;
9358 }
9359 }
9360 /* The C++ FE turns fallthrough from the end of non-void function
9361 into __builtin_unreachable () call with BUILTINS_LOCATION.
9362 Recognize those too. */
9363 basic_block bb;
9364 if (!TREE_NO_WARNING (fun->decl))
9365 FOR_EACH_BB_FN (bb, fun)
9366 if (EDGE_COUNT (bb->succs) == 0)
9367 {
9368 gimple *last = last_stmt (bb);
9369 const enum built_in_function ubsan_missing_ret
9370 = BUILT_IN_UBSAN_HANDLE_MISSING_RETURN;
9371 if (last
9372 && ((LOCATION_LOCUS (gimple_location (last))
9373 == BUILTINS_LOCATION
9374 && gimple_call_builtin_p (last, BUILT_IN_UNREACHABLE))
9375 || gimple_call_builtin_p (last, ubsan_missing_ret)))
9376 {
9377 gimple_stmt_iterator gsi = gsi_for_stmt (last);
9378 gsi_prev_nondebug (&gsi);
9379 gimple *prev = gsi_stmt (gsi);
9380 if (prev == NULL)
9381 location = UNKNOWN_LOCATION;
9382 else
9383 location = gimple_location (prev);
9384 if (LOCATION_LOCUS (location) == UNKNOWN_LOCATION)
9385 location = fun->function_end_locus;
9386 if (warning_at (location, OPT_Wreturn_type,
9387 "control reaches end of non-void function"))
9388 TREE_NO_WARNING (fun->decl) = 1;
9389 break;
9390 }
9391 }
9392 }
9393 return 0;
9394 }
9395
9396 } // anon namespace
9397
9398 gimple_opt_pass *
make_pass_warn_function_return(gcc::context * ctxt)9399 make_pass_warn_function_return (gcc::context *ctxt)
9400 {
9401 return new pass_warn_function_return (ctxt);
9402 }
9403
9404 /* Walk a gimplified function and warn for functions whose return value is
9405 ignored and attribute((warn_unused_result)) is set. This is done before
9406 inlining, so we don't have to worry about that. */
9407
9408 static void
do_warn_unused_result(gimple_seq seq)9409 do_warn_unused_result (gimple_seq seq)
9410 {
9411 tree fdecl, ftype;
9412 gimple_stmt_iterator i;
9413
9414 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
9415 {
9416 gimple *g = gsi_stmt (i);
9417
9418 switch (gimple_code (g))
9419 {
9420 case GIMPLE_BIND:
9421 do_warn_unused_result (gimple_bind_body (as_a <gbind *>(g)));
9422 break;
9423 case GIMPLE_TRY:
9424 do_warn_unused_result (gimple_try_eval (g));
9425 do_warn_unused_result (gimple_try_cleanup (g));
9426 break;
9427 case GIMPLE_CATCH:
9428 do_warn_unused_result (gimple_catch_handler (
9429 as_a <gcatch *> (g)));
9430 break;
9431 case GIMPLE_EH_FILTER:
9432 do_warn_unused_result (gimple_eh_filter_failure (g));
9433 break;
9434
9435 case GIMPLE_CALL:
9436 if (gimple_call_lhs (g))
9437 break;
9438 if (gimple_call_internal_p (g))
9439 break;
9440
9441 /* This is a naked call, as opposed to a GIMPLE_CALL with an
9442 LHS. All calls whose value is ignored should be
9443 represented like this. Look for the attribute. */
9444 fdecl = gimple_call_fndecl (g);
9445 ftype = gimple_call_fntype (g);
9446
9447 if (lookup_attribute ("warn_unused_result", TYPE_ATTRIBUTES (ftype)))
9448 {
9449 location_t loc = gimple_location (g);
9450
9451 if (fdecl)
9452 warning_at (loc, OPT_Wunused_result,
9453 "ignoring return value of %qD, "
9454 "declared with attribute warn_unused_result",
9455 fdecl);
9456 else
9457 warning_at (loc, OPT_Wunused_result,
9458 "ignoring return value of function "
9459 "declared with attribute warn_unused_result");
9460 }
9461 break;
9462
9463 default:
9464 /* Not a container, not a call, or a call whose value is used. */
9465 break;
9466 }
9467 }
9468 }
9469
9470 namespace {
9471
9472 const pass_data pass_data_warn_unused_result =
9473 {
9474 GIMPLE_PASS, /* type */
9475 "*warn_unused_result", /* name */
9476 OPTGROUP_NONE, /* optinfo_flags */
9477 TV_NONE, /* tv_id */
9478 PROP_gimple_any, /* properties_required */
9479 0, /* properties_provided */
9480 0, /* properties_destroyed */
9481 0, /* todo_flags_start */
9482 0, /* todo_flags_finish */
9483 };
9484
9485 class pass_warn_unused_result : public gimple_opt_pass
9486 {
9487 public:
pass_warn_unused_result(gcc::context * ctxt)9488 pass_warn_unused_result (gcc::context *ctxt)
9489 : gimple_opt_pass (pass_data_warn_unused_result, ctxt)
9490 {}
9491
9492 /* opt_pass methods: */
gate(function *)9493 virtual bool gate (function *) { return flag_warn_unused_result; }
execute(function *)9494 virtual unsigned int execute (function *)
9495 {
9496 do_warn_unused_result (gimple_body (current_function_decl));
9497 return 0;
9498 }
9499
9500 }; // class pass_warn_unused_result
9501
9502 } // anon namespace
9503
9504 gimple_opt_pass *
make_pass_warn_unused_result(gcc::context * ctxt)9505 make_pass_warn_unused_result (gcc::context *ctxt)
9506 {
9507 return new pass_warn_unused_result (ctxt);
9508 }
9509
9510 /* IPA passes, compilation of earlier functions or inlining
9511 might have changed some properties, such as marked functions nothrow,
9512 pure, const or noreturn.
9513 Remove redundant edges and basic blocks, and create new ones if necessary.
9514
9515 This pass can't be executed as stand alone pass from pass manager, because
9516 in between inlining and this fixup the verify_flow_info would fail. */
9517
9518 unsigned int
execute_fixup_cfg(void)9519 execute_fixup_cfg (void)
9520 {
9521 basic_block bb;
9522 gimple_stmt_iterator gsi;
9523 int todo = 0;
9524 cgraph_node *node = cgraph_node::get (current_function_decl);
9525 profile_count num = node->count;
9526 profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
9527 bool scale = num.initialized_p () && !(num == den);
9528
9529 if (scale)
9530 {
9531 profile_count::adjust_for_ipa_scaling (&num, &den);
9532 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = node->count;
9533 EXIT_BLOCK_PTR_FOR_FN (cfun)->count
9534 = EXIT_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale (num, den);
9535 }
9536
9537 FOR_EACH_BB_FN (bb, cfun)
9538 {
9539 if (scale)
9540 bb->count = bb->count.apply_scale (num, den);
9541 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
9542 {
9543 gimple *stmt = gsi_stmt (gsi);
9544 tree decl = is_gimple_call (stmt)
9545 ? gimple_call_fndecl (stmt)
9546 : NULL;
9547 if (decl)
9548 {
9549 int flags = gimple_call_flags (stmt);
9550 if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
9551 {
9552 if (gimple_purge_dead_abnormal_call_edges (bb))
9553 todo |= TODO_cleanup_cfg;
9554
9555 if (gimple_in_ssa_p (cfun))
9556 {
9557 todo |= TODO_update_ssa | TODO_cleanup_cfg;
9558 update_stmt (stmt);
9559 }
9560 }
9561
9562 if (flags & ECF_NORETURN
9563 && fixup_noreturn_call (stmt))
9564 todo |= TODO_cleanup_cfg;
9565 }
9566
9567 /* Remove stores to variables we marked write-only.
9568 Keep access when store has side effect, i.e. in case when source
9569 is volatile. */
9570 if (gimple_store_p (stmt)
9571 && !gimple_has_side_effects (stmt))
9572 {
9573 tree lhs = get_base_address (gimple_get_lhs (stmt));
9574
9575 if (VAR_P (lhs)
9576 && (TREE_STATIC (lhs) || DECL_EXTERNAL (lhs))
9577 && varpool_node::get (lhs)->writeonly)
9578 {
9579 unlink_stmt_vdef (stmt);
9580 gsi_remove (&gsi, true);
9581 release_defs (stmt);
9582 todo |= TODO_update_ssa | TODO_cleanup_cfg;
9583 continue;
9584 }
9585 }
9586 /* For calls we can simply remove LHS when it is known
9587 to be write-only. */
9588 if (is_gimple_call (stmt)
9589 && gimple_get_lhs (stmt))
9590 {
9591 tree lhs = get_base_address (gimple_get_lhs (stmt));
9592
9593 if (VAR_P (lhs)
9594 && (TREE_STATIC (lhs) || DECL_EXTERNAL (lhs))
9595 && varpool_node::get (lhs)->writeonly)
9596 {
9597 gimple_call_set_lhs (stmt, NULL);
9598 update_stmt (stmt);
9599 todo |= TODO_update_ssa | TODO_cleanup_cfg;
9600 }
9601 }
9602
9603 if (maybe_clean_eh_stmt (stmt)
9604 && gimple_purge_dead_eh_edges (bb))
9605 todo |= TODO_cleanup_cfg;
9606 gsi_next (&gsi);
9607 }
9608
9609 /* If we have a basic block with no successors that does not
9610 end with a control statement or a noreturn call end it with
9611 a call to __builtin_unreachable. This situation can occur
9612 when inlining a noreturn call that does in fact return. */
9613 if (EDGE_COUNT (bb->succs) == 0)
9614 {
9615 gimple *stmt = last_stmt (bb);
9616 if (!stmt
9617 || (!is_ctrl_stmt (stmt)
9618 && (!is_gimple_call (stmt)
9619 || !gimple_call_noreturn_p (stmt))))
9620 {
9621 if (stmt && is_gimple_call (stmt))
9622 gimple_call_set_ctrl_altering (stmt, false);
9623 tree fndecl = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
9624 stmt = gimple_build_call (fndecl, 0);
9625 gimple_stmt_iterator gsi = gsi_last_bb (bb);
9626 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
9627 if (!cfun->after_inlining)
9628 {
9629 gcall *call_stmt = dyn_cast <gcall *> (stmt);
9630 node->create_edge (cgraph_node::get_create (fndecl),
9631 call_stmt, bb->count);
9632 }
9633 }
9634 }
9635 }
9636 if (scale)
9637 compute_function_frequency ();
9638
9639 if (current_loops
9640 && (todo & TODO_cleanup_cfg))
9641 loops_state_set (LOOPS_NEED_FIXUP);
9642
9643 return todo;
9644 }
9645
9646 namespace {
9647
9648 const pass_data pass_data_fixup_cfg =
9649 {
9650 GIMPLE_PASS, /* type */
9651 "fixup_cfg", /* name */
9652 OPTGROUP_NONE, /* optinfo_flags */
9653 TV_NONE, /* tv_id */
9654 PROP_cfg, /* properties_required */
9655 0, /* properties_provided */
9656 0, /* properties_destroyed */
9657 0, /* todo_flags_start */
9658 0, /* todo_flags_finish */
9659 };
9660
9661 class pass_fixup_cfg : public gimple_opt_pass
9662 {
9663 public:
pass_fixup_cfg(gcc::context * ctxt)9664 pass_fixup_cfg (gcc::context *ctxt)
9665 : gimple_opt_pass (pass_data_fixup_cfg, ctxt)
9666 {}
9667
9668 /* opt_pass methods: */
clone()9669 opt_pass * clone () { return new pass_fixup_cfg (m_ctxt); }
execute(function *)9670 virtual unsigned int execute (function *) { return execute_fixup_cfg (); }
9671
9672 }; // class pass_fixup_cfg
9673
9674 } // anon namespace
9675
9676 gimple_opt_pass *
make_pass_fixup_cfg(gcc::context * ctxt)9677 make_pass_fixup_cfg (gcc::context *ctxt)
9678 {
9679 return new pass_fixup_cfg (ctxt);
9680 }
9681
9682 /* Garbage collection support for edge_def. */
9683
9684 extern void gt_ggc_mx (tree&);
9685 extern void gt_ggc_mx (gimple *&);
9686 extern void gt_ggc_mx (rtx&);
9687 extern void gt_ggc_mx (basic_block&);
9688
9689 static void
gt_ggc_mx(rtx_insn * & x)9690 gt_ggc_mx (rtx_insn *& x)
9691 {
9692 if (x)
9693 gt_ggc_mx_rtx_def ((void *) x);
9694 }
9695
9696 void
gt_ggc_mx(edge_def * e)9697 gt_ggc_mx (edge_def *e)
9698 {
9699 tree block = LOCATION_BLOCK (e->goto_locus);
9700 gt_ggc_mx (e->src);
9701 gt_ggc_mx (e->dest);
9702 if (current_ir_type () == IR_GIMPLE)
9703 gt_ggc_mx (e->insns.g);
9704 else
9705 gt_ggc_mx (e->insns.r);
9706 gt_ggc_mx (block);
9707 }
9708
9709 /* PCH support for edge_def. */
9710
9711 extern void gt_pch_nx (tree&);
9712 extern void gt_pch_nx (gimple *&);
9713 extern void gt_pch_nx (rtx&);
9714 extern void gt_pch_nx (basic_block&);
9715
9716 static void
gt_pch_nx(rtx_insn * & x)9717 gt_pch_nx (rtx_insn *& x)
9718 {
9719 if (x)
9720 gt_pch_nx_rtx_def ((void *) x);
9721 }
9722
9723 void
gt_pch_nx(edge_def * e)9724 gt_pch_nx (edge_def *e)
9725 {
9726 tree block = LOCATION_BLOCK (e->goto_locus);
9727 gt_pch_nx (e->src);
9728 gt_pch_nx (e->dest);
9729 if (current_ir_type () == IR_GIMPLE)
9730 gt_pch_nx (e->insns.g);
9731 else
9732 gt_pch_nx (e->insns.r);
9733 gt_pch_nx (block);
9734 }
9735
9736 void
gt_pch_nx(edge_def * e,gt_pointer_operator op,void * cookie)9737 gt_pch_nx (edge_def *e, gt_pointer_operator op, void *cookie)
9738 {
9739 tree block = LOCATION_BLOCK (e->goto_locus);
9740 op (&(e->src), cookie);
9741 op (&(e->dest), cookie);
9742 if (current_ir_type () == IR_GIMPLE)
9743 op (&(e->insns.g), cookie);
9744 else
9745 op (&(e->insns.r), cookie);
9746 op (&(block), cookie);
9747 }
9748
9749 #if CHECKING_P
9750
9751 namespace selftest {
9752
9753 /* Helper function for CFG selftests: create a dummy function decl
9754 and push it as cfun. */
9755
9756 static tree
push_fndecl(const char * name)9757 push_fndecl (const char *name)
9758 {
9759 tree fn_type = build_function_type_array (integer_type_node, 0, NULL);
9760 /* FIXME: this uses input_location: */
9761 tree fndecl = build_fn_decl (name, fn_type);
9762 tree retval = build_decl (UNKNOWN_LOCATION, RESULT_DECL,
9763 NULL_TREE, integer_type_node);
9764 DECL_RESULT (fndecl) = retval;
9765 push_struct_function (fndecl);
9766 function *fun = DECL_STRUCT_FUNCTION (fndecl);
9767 ASSERT_TRUE (fun != NULL);
9768 init_empty_tree_cfg_for_function (fun);
9769 ASSERT_EQ (2, n_basic_blocks_for_fn (fun));
9770 ASSERT_EQ (0, n_edges_for_fn (fun));
9771 return fndecl;
9772 }
9773
9774 /* These tests directly create CFGs.
9775 Compare with the static fns within tree-cfg.c:
9776 - build_gimple_cfg
9777 - make_blocks: calls create_basic_block (seq, bb);
9778 - make_edges. */
9779
9780 /* Verify a simple cfg of the form:
9781 ENTRY -> A -> B -> C -> EXIT. */
9782
9783 static void
test_linear_chain()9784 test_linear_chain ()
9785 {
9786 gimple_register_cfg_hooks ();
9787
9788 tree fndecl = push_fndecl ("cfg_test_linear_chain");
9789 function *fun = DECL_STRUCT_FUNCTION (fndecl);
9790
9791 /* Create some empty blocks. */
9792 basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
9793 basic_block bb_b = create_empty_bb (bb_a);
9794 basic_block bb_c = create_empty_bb (bb_b);
9795
9796 ASSERT_EQ (5, n_basic_blocks_for_fn (fun));
9797 ASSERT_EQ (0, n_edges_for_fn (fun));
9798
9799 /* Create some edges: a simple linear chain of BBs. */
9800 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
9801 make_edge (bb_a, bb_b, 0);
9802 make_edge (bb_b, bb_c, 0);
9803 make_edge (bb_c, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
9804
9805 /* Verify the edges. */
9806 ASSERT_EQ (4, n_edges_for_fn (fun));
9807 ASSERT_EQ (NULL, ENTRY_BLOCK_PTR_FOR_FN (fun)->preds);
9808 ASSERT_EQ (1, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs->length ());
9809 ASSERT_EQ (1, bb_a->preds->length ());
9810 ASSERT_EQ (1, bb_a->succs->length ());
9811 ASSERT_EQ (1, bb_b->preds->length ());
9812 ASSERT_EQ (1, bb_b->succs->length ());
9813 ASSERT_EQ (1, bb_c->preds->length ());
9814 ASSERT_EQ (1, bb_c->succs->length ());
9815 ASSERT_EQ (1, EXIT_BLOCK_PTR_FOR_FN (fun)->preds->length ());
9816 ASSERT_EQ (NULL, EXIT_BLOCK_PTR_FOR_FN (fun)->succs);
9817
9818 /* Verify the dominance information
9819 Each BB in our simple chain should be dominated by the one before
9820 it. */
9821 calculate_dominance_info (CDI_DOMINATORS);
9822 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
9823 ASSERT_EQ (bb_b, get_immediate_dominator (CDI_DOMINATORS, bb_c));
9824 vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
9825 ASSERT_EQ (1, dom_by_b.length ());
9826 ASSERT_EQ (bb_c, dom_by_b[0]);
9827 free_dominance_info (CDI_DOMINATORS);
9828 dom_by_b.release ();
9829
9830 /* Similarly for post-dominance: each BB in our chain is post-dominated
9831 by the one after it. */
9832 calculate_dominance_info (CDI_POST_DOMINATORS);
9833 ASSERT_EQ (bb_b, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
9834 ASSERT_EQ (bb_c, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
9835 vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
9836 ASSERT_EQ (1, postdom_by_b.length ());
9837 ASSERT_EQ (bb_a, postdom_by_b[0]);
9838 free_dominance_info (CDI_POST_DOMINATORS);
9839 postdom_by_b.release ();
9840
9841 pop_cfun ();
9842 }
9843
9844 /* Verify a simple CFG of the form:
9845 ENTRY
9846 |
9847 A
9848 / \
9849 /t \f
9850 B C
9851 \ /
9852 \ /
9853 D
9854 |
9855 EXIT. */
9856
9857 static void
test_diamond()9858 test_diamond ()
9859 {
9860 gimple_register_cfg_hooks ();
9861
9862 tree fndecl = push_fndecl ("cfg_test_diamond");
9863 function *fun = DECL_STRUCT_FUNCTION (fndecl);
9864
9865 /* Create some empty blocks. */
9866 basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
9867 basic_block bb_b = create_empty_bb (bb_a);
9868 basic_block bb_c = create_empty_bb (bb_a);
9869 basic_block bb_d = create_empty_bb (bb_b);
9870
9871 ASSERT_EQ (6, n_basic_blocks_for_fn (fun));
9872 ASSERT_EQ (0, n_edges_for_fn (fun));
9873
9874 /* Create the edges. */
9875 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
9876 make_edge (bb_a, bb_b, EDGE_TRUE_VALUE);
9877 make_edge (bb_a, bb_c, EDGE_FALSE_VALUE);
9878 make_edge (bb_b, bb_d, 0);
9879 make_edge (bb_c, bb_d, 0);
9880 make_edge (bb_d, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
9881
9882 /* Verify the edges. */
9883 ASSERT_EQ (6, n_edges_for_fn (fun));
9884 ASSERT_EQ (1, bb_a->preds->length ());
9885 ASSERT_EQ (2, bb_a->succs->length ());
9886 ASSERT_EQ (1, bb_b->preds->length ());
9887 ASSERT_EQ (1, bb_b->succs->length ());
9888 ASSERT_EQ (1, bb_c->preds->length ());
9889 ASSERT_EQ (1, bb_c->succs->length ());
9890 ASSERT_EQ (2, bb_d->preds->length ());
9891 ASSERT_EQ (1, bb_d->succs->length ());
9892
9893 /* Verify the dominance information. */
9894 calculate_dominance_info (CDI_DOMINATORS);
9895 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
9896 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_c));
9897 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_d));
9898 vec<basic_block> dom_by_a = get_dominated_by (CDI_DOMINATORS, bb_a);
9899 ASSERT_EQ (3, dom_by_a.length ()); /* B, C, D, in some order. */
9900 dom_by_a.release ();
9901 vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
9902 ASSERT_EQ (0, dom_by_b.length ());
9903 dom_by_b.release ();
9904 free_dominance_info (CDI_DOMINATORS);
9905
9906 /* Similarly for post-dominance. */
9907 calculate_dominance_info (CDI_POST_DOMINATORS);
9908 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
9909 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
9910 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_c));
9911 vec<basic_block> postdom_by_d = get_dominated_by (CDI_POST_DOMINATORS, bb_d);
9912 ASSERT_EQ (3, postdom_by_d.length ()); /* A, B, C in some order. */
9913 postdom_by_d.release ();
9914 vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
9915 ASSERT_EQ (0, postdom_by_b.length ());
9916 postdom_by_b.release ();
9917 free_dominance_info (CDI_POST_DOMINATORS);
9918
9919 pop_cfun ();
9920 }
9921
9922 /* Verify that we can handle a CFG containing a "complete" aka
9923 fully-connected subgraph (where A B C D below all have edges
9924 pointing to each other node, also to themselves).
9925 e.g.:
9926 ENTRY EXIT
9927 | ^
9928 | /
9929 | /
9930 | /
9931 V/
9932 A<--->B
9933 ^^ ^^
9934 | \ / |
9935 | X |
9936 | / \ |
9937 VV VV
9938 C<--->D
9939 */
9940
9941 static void
test_fully_connected()9942 test_fully_connected ()
9943 {
9944 gimple_register_cfg_hooks ();
9945
9946 tree fndecl = push_fndecl ("cfg_fully_connected");
9947 function *fun = DECL_STRUCT_FUNCTION (fndecl);
9948
9949 const int n = 4;
9950
9951 /* Create some empty blocks. */
9952 auto_vec <basic_block> subgraph_nodes;
9953 for (int i = 0; i < n; i++)
9954 subgraph_nodes.safe_push (create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun)));
9955
9956 ASSERT_EQ (n + 2, n_basic_blocks_for_fn (fun));
9957 ASSERT_EQ (0, n_edges_for_fn (fun));
9958
9959 /* Create the edges. */
9960 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), subgraph_nodes[0], EDGE_FALLTHRU);
9961 make_edge (subgraph_nodes[0], EXIT_BLOCK_PTR_FOR_FN (fun), 0);
9962 for (int i = 0; i < n; i++)
9963 for (int j = 0; j < n; j++)
9964 make_edge (subgraph_nodes[i], subgraph_nodes[j], 0);
9965
9966 /* Verify the edges. */
9967 ASSERT_EQ (2 + (n * n), n_edges_for_fn (fun));
9968 /* The first one is linked to ENTRY/EXIT as well as itself and
9969 everything else. */
9970 ASSERT_EQ (n + 1, subgraph_nodes[0]->preds->length ());
9971 ASSERT_EQ (n + 1, subgraph_nodes[0]->succs->length ());
9972 /* The other ones in the subgraph are linked to everything in
9973 the subgraph (including themselves). */
9974 for (int i = 1; i < n; i++)
9975 {
9976 ASSERT_EQ (n, subgraph_nodes[i]->preds->length ());
9977 ASSERT_EQ (n, subgraph_nodes[i]->succs->length ());
9978 }
9979
9980 /* Verify the dominance information. */
9981 calculate_dominance_info (CDI_DOMINATORS);
9982 /* The initial block in the subgraph should be dominated by ENTRY. */
9983 ASSERT_EQ (ENTRY_BLOCK_PTR_FOR_FN (fun),
9984 get_immediate_dominator (CDI_DOMINATORS,
9985 subgraph_nodes[0]));
9986 /* Every other block in the subgraph should be dominated by the
9987 initial block. */
9988 for (int i = 1; i < n; i++)
9989 ASSERT_EQ (subgraph_nodes[0],
9990 get_immediate_dominator (CDI_DOMINATORS,
9991 subgraph_nodes[i]));
9992 free_dominance_info (CDI_DOMINATORS);
9993
9994 /* Similarly for post-dominance. */
9995 calculate_dominance_info (CDI_POST_DOMINATORS);
9996 /* The initial block in the subgraph should be postdominated by EXIT. */
9997 ASSERT_EQ (EXIT_BLOCK_PTR_FOR_FN (fun),
9998 get_immediate_dominator (CDI_POST_DOMINATORS,
9999 subgraph_nodes[0]));
10000 /* Every other block in the subgraph should be postdominated by the
10001 initial block, since that leads to EXIT. */
10002 for (int i = 1; i < n; i++)
10003 ASSERT_EQ (subgraph_nodes[0],
10004 get_immediate_dominator (CDI_POST_DOMINATORS,
10005 subgraph_nodes[i]));
10006 free_dominance_info (CDI_POST_DOMINATORS);
10007
10008 pop_cfun ();
10009 }
10010
10011 /* Run all of the selftests within this file. */
10012
10013 void
tree_cfg_c_tests()10014 tree_cfg_c_tests ()
10015 {
10016 test_linear_chain ();
10017 test_diamond ();
10018 test_fully_connected ();
10019 }
10020
10021 } // namespace selftest
10022
10023 /* TODO: test the dominator/postdominator logic with various graphs/nodes:
10024 - loop
10025 - nested loops
10026 - switch statement (a block with many out-edges)
10027 - something that jumps to itself
10028 - etc */
10029
10030 #endif /* CHECKING_P */
10031