1 /* Control flow functions for trees.
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "cgraph.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "trans-mem.h"
37 #include "stor-layout.h"
38 #include "print-tree.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimplify-me.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-into-ssa.h"
49 #include "tree-dfa.h"
50 #include "tree-ssa.h"
51 #include "except.h"
52 #include "cfgloop.h"
53 #include "tree-ssa-propagate.h"
54 #include "value-prof.h"
55 #include "tree-inline.h"
56 #include "tree-ssa-live.h"
57 #include "omp-general.h"
58 #include "omp-expand.h"
59 #include "tree-cfgcleanup.h"
60 #include "gimplify.h"
61 #include "attribs.h"
62 #include "selftest.h"
63 #include "opts.h"
64 #include "asan.h"
65 #include "profile.h"
66
67 /* This file contains functions for building the Control Flow Graph (CFG)
68 for a function tree. */
69
70 /* Local declarations. */
71
72 /* Initial capacity for the basic block array. */
73 static const int initial_cfg_capacity = 20;
74
75 /* This hash table allows us to efficiently lookup all CASE_LABEL_EXPRs
76 which use a particular edge. The CASE_LABEL_EXPRs are chained together
77 via their CASE_CHAIN field, which we clear after we're done with the
78 hash table to prevent problems with duplication of GIMPLE_SWITCHes.
79
80 Access to this list of CASE_LABEL_EXPRs allows us to efficiently
81 update the case vector in response to edge redirections.
82
83 Right now this table is set up and torn down at key points in the
84 compilation process. It would be nice if we could make the table
85 more persistent. The key is getting notification of changes to
86 the CFG (particularly edge removal, creation and redirection). */
87
88 static hash_map<edge, tree> *edge_to_cases;
89
90 /* If we record edge_to_cases, this bitmap will hold indexes
91 of basic blocks that end in a GIMPLE_SWITCH which we touched
92 due to edge manipulations. */
93
94 static bitmap touched_switch_bbs;
95
96 /* OpenMP region idxs for blocks during cfg pass. */
97 static vec<int> bb_to_omp_idx;
98
99 /* CFG statistics. */
100 struct cfg_stats_d
101 {
102 long num_merged_labels;
103 };
104
105 static struct cfg_stats_d cfg_stats;
106
107 /* Data to pass to replace_block_vars_by_duplicates_1. */
108 struct replace_decls_d
109 {
110 hash_map<tree, tree> *vars_map;
111 tree to_context;
112 };
113
114 /* Hash table to store last discriminator assigned for each locus. */
115 struct locus_discrim_map
116 {
117 int location_line;
118 int discriminator;
119 };
120
121 /* Hashtable helpers. */
122
123 struct locus_discrim_hasher : free_ptr_hash <locus_discrim_map>
124 {
125 static inline hashval_t hash (const locus_discrim_map *);
126 static inline bool equal (const locus_discrim_map *,
127 const locus_discrim_map *);
128 };
129
130 /* Trivial hash function for a location_t. ITEM is a pointer to
131 a hash table entry that maps a location_t to a discriminator. */
132
133 inline hashval_t
hash(const locus_discrim_map * item)134 locus_discrim_hasher::hash (const locus_discrim_map *item)
135 {
136 return item->location_line;
137 }
138
139 /* Equality function for the locus-to-discriminator map. A and B
140 point to the two hash table entries to compare. */
141
142 inline bool
equal(const locus_discrim_map * a,const locus_discrim_map * b)143 locus_discrim_hasher::equal (const locus_discrim_map *a,
144 const locus_discrim_map *b)
145 {
146 return a->location_line == b->location_line;
147 }
148
149 static hash_table<locus_discrim_hasher> *discriminator_per_locus;
150
151 /* Basic blocks and flowgraphs. */
152 static void make_blocks (gimple_seq);
153
154 /* Edges. */
155 static void make_edges (void);
156 static void assign_discriminators (void);
157 static void make_cond_expr_edges (basic_block);
158 static void make_gimple_switch_edges (gswitch *, basic_block);
159 static bool make_goto_expr_edges (basic_block);
160 static void make_gimple_asm_edges (basic_block);
161 static edge gimple_redirect_edge_and_branch (edge, basic_block);
162 static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
163
164 /* Various helpers. */
165 static inline bool stmt_starts_bb_p (gimple *, gimple *);
166 static int gimple_verify_flow_info (void);
167 static void gimple_make_forwarder_block (edge);
168 static gimple *first_non_label_stmt (basic_block);
169 static bool verify_gimple_transaction (gtransaction *);
170 static bool call_can_make_abnormal_goto (gimple *);
171
172 /* Flowgraph optimization and cleanup. */
173 static void gimple_merge_blocks (basic_block, basic_block);
174 static bool gimple_can_merge_blocks_p (basic_block, basic_block);
175 static void remove_bb (basic_block);
176 static edge find_taken_edge_computed_goto (basic_block, tree);
177 static edge find_taken_edge_cond_expr (const gcond *, tree);
178
179 void
init_empty_tree_cfg_for_function(struct function * fn)180 init_empty_tree_cfg_for_function (struct function *fn)
181 {
182 /* Initialize the basic block array. */
183 init_flow (fn);
184 profile_status_for_fn (fn) = PROFILE_ABSENT;
185 n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
186 last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
187 vec_safe_grow_cleared (basic_block_info_for_fn (fn),
188 initial_cfg_capacity, true);
189
190 /* Build a mapping of labels to their associated blocks. */
191 vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
192 initial_cfg_capacity, true);
193
194 SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
195 SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
196
197 ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
198 = EXIT_BLOCK_PTR_FOR_FN (fn);
199 EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
200 = ENTRY_BLOCK_PTR_FOR_FN (fn);
201 }
202
203 void
init_empty_tree_cfg(void)204 init_empty_tree_cfg (void)
205 {
206 init_empty_tree_cfg_for_function (cfun);
207 }
208
209 /*---------------------------------------------------------------------------
210 Create basic blocks
211 ---------------------------------------------------------------------------*/
212
213 /* Entry point to the CFG builder for trees. SEQ is the sequence of
214 statements to be added to the flowgraph. */
215
216 static void
build_gimple_cfg(gimple_seq seq)217 build_gimple_cfg (gimple_seq seq)
218 {
219 /* Register specific gimple functions. */
220 gimple_register_cfg_hooks ();
221
222 memset ((void *) &cfg_stats, 0, sizeof (cfg_stats));
223
224 init_empty_tree_cfg ();
225
226 make_blocks (seq);
227
228 /* Make sure there is always at least one block, even if it's empty. */
229 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
230 create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
231
232 /* Adjust the size of the array. */
233 if (basic_block_info_for_fn (cfun)->length ()
234 < (size_t) n_basic_blocks_for_fn (cfun))
235 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
236 n_basic_blocks_for_fn (cfun));
237
238 /* To speed up statement iterator walks, we first purge dead labels. */
239 cleanup_dead_labels ();
240
241 /* Group case nodes to reduce the number of edges.
242 We do this after cleaning up dead labels because otherwise we miss
243 a lot of obvious case merging opportunities. */
244 group_case_labels ();
245
246 /* Create the edges of the flowgraph. */
247 discriminator_per_locus = new hash_table<locus_discrim_hasher> (13);
248 make_edges ();
249 assign_discriminators ();
250 cleanup_dead_labels ();
251 delete discriminator_per_locus;
252 discriminator_per_locus = NULL;
253 }
254
255 /* Look for ANNOTATE calls with loop annotation kind in BB; if found, remove
256 them and propagate the information to LOOP. We assume that the annotations
257 come immediately before the condition in BB, if any. */
258
259 static void
replace_loop_annotate_in_block(basic_block bb,class loop * loop)260 replace_loop_annotate_in_block (basic_block bb, class loop *loop)
261 {
262 gimple_stmt_iterator gsi = gsi_last_bb (bb);
263 gimple *stmt = gsi_stmt (gsi);
264
265 if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
266 return;
267
268 for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
269 {
270 stmt = gsi_stmt (gsi);
271 if (gimple_code (stmt) != GIMPLE_CALL)
272 break;
273 if (!gimple_call_internal_p (stmt)
274 || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
275 break;
276
277 switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
278 {
279 case annot_expr_ivdep_kind:
280 loop->safelen = INT_MAX;
281 break;
282 case annot_expr_unroll_kind:
283 loop->unroll
284 = (unsigned short) tree_to_shwi (gimple_call_arg (stmt, 2));
285 cfun->has_unroll = true;
286 break;
287 case annot_expr_no_vector_kind:
288 loop->dont_vectorize = true;
289 break;
290 case annot_expr_vector_kind:
291 loop->force_vectorize = true;
292 cfun->has_force_vectorize_loops = true;
293 break;
294 case annot_expr_parallel_kind:
295 loop->can_be_parallel = true;
296 loop->safelen = INT_MAX;
297 break;
298 default:
299 gcc_unreachable ();
300 }
301
302 stmt = gimple_build_assign (gimple_call_lhs (stmt),
303 gimple_call_arg (stmt, 0));
304 gsi_replace (&gsi, stmt, true);
305 }
306 }
307
308 /* Look for ANNOTATE calls with loop annotation kind; if found, remove
309 them and propagate the information to the loop. We assume that the
310 annotations come immediately before the condition of the loop. */
311
312 static void
replace_loop_annotate(void)313 replace_loop_annotate (void)
314 {
315 class loop *loop;
316 basic_block bb;
317 gimple_stmt_iterator gsi;
318 gimple *stmt;
319
320 FOR_EACH_LOOP (loop, 0)
321 {
322 /* First look into the header. */
323 replace_loop_annotate_in_block (loop->header, loop);
324
325 /* Then look into the latch, if any. */
326 if (loop->latch)
327 replace_loop_annotate_in_block (loop->latch, loop);
328
329 /* Push the global flag_finite_loops state down to individual loops. */
330 loop->finite_p = flag_finite_loops;
331 }
332
333 /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
334 FOR_EACH_BB_FN (bb, cfun)
335 {
336 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
337 {
338 stmt = gsi_stmt (gsi);
339 if (gimple_code (stmt) != GIMPLE_CALL)
340 continue;
341 if (!gimple_call_internal_p (stmt)
342 || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
343 continue;
344
345 switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
346 {
347 case annot_expr_ivdep_kind:
348 case annot_expr_unroll_kind:
349 case annot_expr_no_vector_kind:
350 case annot_expr_vector_kind:
351 case annot_expr_parallel_kind:
352 break;
353 default:
354 gcc_unreachable ();
355 }
356
357 warning_at (gimple_location (stmt), 0, "ignoring loop annotation");
358 stmt = gimple_build_assign (gimple_call_lhs (stmt),
359 gimple_call_arg (stmt, 0));
360 gsi_replace (&gsi, stmt, true);
361 }
362 }
363 }
364
365 static unsigned int
execute_build_cfg(void)366 execute_build_cfg (void)
367 {
368 gimple_seq body = gimple_body (current_function_decl);
369
370 build_gimple_cfg (body);
371 gimple_set_body (current_function_decl, NULL);
372 if (dump_file && (dump_flags & TDF_DETAILS))
373 {
374 fprintf (dump_file, "Scope blocks:\n");
375 dump_scope_blocks (dump_file, dump_flags);
376 }
377 cleanup_tree_cfg ();
378
379 bb_to_omp_idx.release ();
380
381 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
382 replace_loop_annotate ();
383 return 0;
384 }
385
386 namespace {
387
388 const pass_data pass_data_build_cfg =
389 {
390 GIMPLE_PASS, /* type */
391 "cfg", /* name */
392 OPTGROUP_NONE, /* optinfo_flags */
393 TV_TREE_CFG, /* tv_id */
394 PROP_gimple_leh, /* properties_required */
395 ( PROP_cfg | PROP_loops ), /* properties_provided */
396 0, /* properties_destroyed */
397 0, /* todo_flags_start */
398 0, /* todo_flags_finish */
399 };
400
401 class pass_build_cfg : public gimple_opt_pass
402 {
403 public:
pass_build_cfg(gcc::context * ctxt)404 pass_build_cfg (gcc::context *ctxt)
405 : gimple_opt_pass (pass_data_build_cfg, ctxt)
406 {}
407
408 /* opt_pass methods: */
execute(function *)409 virtual unsigned int execute (function *) { return execute_build_cfg (); }
410
411 }; // class pass_build_cfg
412
413 } // anon namespace
414
415 gimple_opt_pass *
make_pass_build_cfg(gcc::context * ctxt)416 make_pass_build_cfg (gcc::context *ctxt)
417 {
418 return new pass_build_cfg (ctxt);
419 }
420
421
422 /* Return true if T is a computed goto. */
423
424 bool
computed_goto_p(gimple * t)425 computed_goto_p (gimple *t)
426 {
427 return (gimple_code (t) == GIMPLE_GOTO
428 && TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL);
429 }
430
431 /* Returns true if the sequence of statements STMTS only contains
432 a call to __builtin_unreachable (). */
433
434 bool
gimple_seq_unreachable_p(gimple_seq stmts)435 gimple_seq_unreachable_p (gimple_seq stmts)
436 {
437 if (stmts == NULL
438 /* Return false if -fsanitize=unreachable, we don't want to
439 optimize away those calls, but rather turn them into
440 __ubsan_handle_builtin_unreachable () or __builtin_trap ()
441 later. */
442 || sanitize_flags_p (SANITIZE_UNREACHABLE))
443 return false;
444
445 gimple_stmt_iterator gsi = gsi_last (stmts);
446
447 if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_UNREACHABLE))
448 return false;
449
450 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
451 {
452 gimple *stmt = gsi_stmt (gsi);
453 if (gimple_code (stmt) != GIMPLE_LABEL
454 && !is_gimple_debug (stmt)
455 && !gimple_clobber_p (stmt))
456 return false;
457 }
458 return true;
459 }
460
461 /* Returns true for edge E where e->src ends with a GIMPLE_COND and
462 the other edge points to a bb with just __builtin_unreachable ().
463 I.e. return true for C->M edge in:
464 <bb C>:
465 ...
466 if (something)
467 goto <bb N>;
468 else
469 goto <bb M>;
470 <bb N>:
471 __builtin_unreachable ();
472 <bb M>: */
473
474 bool
assert_unreachable_fallthru_edge_p(edge e)475 assert_unreachable_fallthru_edge_p (edge e)
476 {
477 basic_block pred_bb = e->src;
478 gimple *last = last_stmt (pred_bb);
479 if (last && gimple_code (last) == GIMPLE_COND)
480 {
481 basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest;
482 if (other_bb == e->dest)
483 other_bb = EDGE_SUCC (pred_bb, 1)->dest;
484 if (EDGE_COUNT (other_bb->succs) == 0)
485 return gimple_seq_unreachable_p (bb_seq (other_bb));
486 }
487 return false;
488 }
489
490
491 /* Initialize GF_CALL_CTRL_ALTERING flag, which indicates the call
492 could alter control flow except via eh. We initialize the flag at
493 CFG build time and only ever clear it later. */
494
495 static void
gimple_call_initialize_ctrl_altering(gimple * stmt)496 gimple_call_initialize_ctrl_altering (gimple *stmt)
497 {
498 int flags = gimple_call_flags (stmt);
499
500 /* A call alters control flow if it can make an abnormal goto. */
501 if (call_can_make_abnormal_goto (stmt)
502 /* A call also alters control flow if it does not return. */
503 || flags & ECF_NORETURN
504 /* TM ending statements have backedges out of the transaction.
505 Return true so we split the basic block containing them.
506 Note that the TM_BUILTIN test is merely an optimization. */
507 || ((flags & ECF_TM_BUILTIN)
508 && is_tm_ending_fndecl (gimple_call_fndecl (stmt)))
509 /* BUILT_IN_RETURN call is same as return statement. */
510 || gimple_call_builtin_p (stmt, BUILT_IN_RETURN)
511 /* IFN_UNIQUE should be the last insn, to make checking for it
512 as cheap as possible. */
513 || (gimple_call_internal_p (stmt)
514 && gimple_call_internal_unique_p (stmt)))
515 gimple_call_set_ctrl_altering (stmt, true);
516 else
517 gimple_call_set_ctrl_altering (stmt, false);
518 }
519
520
521 /* Insert SEQ after BB and build a flowgraph. */
522
523 static basic_block
make_blocks_1(gimple_seq seq,basic_block bb)524 make_blocks_1 (gimple_seq seq, basic_block bb)
525 {
526 gimple_stmt_iterator i = gsi_start (seq);
527 gimple *stmt = NULL;
528 gimple *prev_stmt = NULL;
529 bool start_new_block = true;
530 bool first_stmt_of_seq = true;
531
532 while (!gsi_end_p (i))
533 {
534 /* PREV_STMT should only be set to a debug stmt if the debug
535 stmt is before nondebug stmts. Once stmt reaches a nondebug
536 nonlabel, prev_stmt will be set to it, so that
537 stmt_starts_bb_p will know to start a new block if a label is
538 found. However, if stmt was a label after debug stmts only,
539 keep the label in prev_stmt even if we find further debug
540 stmts, for there may be other labels after them, and they
541 should land in the same block. */
542 if (!prev_stmt || !stmt || !is_gimple_debug (stmt))
543 prev_stmt = stmt;
544 stmt = gsi_stmt (i);
545
546 if (stmt && is_gimple_call (stmt))
547 gimple_call_initialize_ctrl_altering (stmt);
548
549 /* If the statement starts a new basic block or if we have determined
550 in a previous pass that we need to create a new block for STMT, do
551 so now. */
552 if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt))
553 {
554 if (!first_stmt_of_seq)
555 gsi_split_seq_before (&i, &seq);
556 bb = create_basic_block (seq, bb);
557 start_new_block = false;
558 prev_stmt = NULL;
559 }
560
561 /* Now add STMT to BB and create the subgraphs for special statement
562 codes. */
563 gimple_set_bb (stmt, bb);
564
565 /* If STMT is a basic block terminator, set START_NEW_BLOCK for the
566 next iteration. */
567 if (stmt_ends_bb_p (stmt))
568 {
569 /* If the stmt can make abnormal goto use a new temporary
570 for the assignment to the LHS. This makes sure the old value
571 of the LHS is available on the abnormal edge. Otherwise
572 we will end up with overlapping life-ranges for abnormal
573 SSA names. */
574 if (gimple_has_lhs (stmt)
575 && stmt_can_make_abnormal_goto (stmt)
576 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
577 {
578 tree lhs = gimple_get_lhs (stmt);
579 tree tmp = create_tmp_var (TREE_TYPE (lhs));
580 gimple *s = gimple_build_assign (lhs, tmp);
581 gimple_set_location (s, gimple_location (stmt));
582 gimple_set_block (s, gimple_block (stmt));
583 gimple_set_lhs (stmt, tmp);
584 gsi_insert_after (&i, s, GSI_SAME_STMT);
585 }
586 start_new_block = true;
587 }
588
589 gsi_next (&i);
590 first_stmt_of_seq = false;
591 }
592 return bb;
593 }
594
595 /* Build a flowgraph for the sequence of stmts SEQ. */
596
597 static void
make_blocks(gimple_seq seq)598 make_blocks (gimple_seq seq)
599 {
600 /* Look for debug markers right before labels, and move the debug
601 stmts after the labels. Accepting labels among debug markers
602 adds no value, just complexity; if we wanted to annotate labels
603 with view numbers (so sequencing among markers would matter) or
604 somesuch, we're probably better off still moving the labels, but
605 adding other debug annotations in their original positions or
606 emitting nonbind or bind markers associated with the labels in
607 the original position of the labels.
608
609 Moving labels would probably be simpler, but we can't do that:
610 moving labels assigns label ids to them, and doing so because of
611 debug markers makes for -fcompare-debug and possibly even codegen
612 differences. So, we have to move the debug stmts instead. To
613 that end, we scan SEQ backwards, marking the position of the
614 latest (earliest we find) label, and moving debug stmts that are
615 not separated from it by nondebug nonlabel stmts after the
616 label. */
617 if (MAY_HAVE_DEBUG_MARKER_STMTS)
618 {
619 gimple_stmt_iterator label = gsi_none ();
620
621 for (gimple_stmt_iterator i = gsi_last (seq); !gsi_end_p (i); gsi_prev (&i))
622 {
623 gimple *stmt = gsi_stmt (i);
624
625 /* If this is the first label we encounter (latest in SEQ)
626 before nondebug stmts, record its position. */
627 if (is_a <glabel *> (stmt))
628 {
629 if (gsi_end_p (label))
630 label = i;
631 continue;
632 }
633
634 /* Without a recorded label position to move debug stmts to,
635 there's nothing to do. */
636 if (gsi_end_p (label))
637 continue;
638
639 /* Move the debug stmt at I after LABEL. */
640 if (is_gimple_debug (stmt))
641 {
642 gcc_assert (gimple_debug_nonbind_marker_p (stmt));
643 /* As STMT is removed, I advances to the stmt after
644 STMT, so the gsi_prev in the for "increment"
645 expression gets us to the stmt we're to visit after
646 STMT. LABEL, however, would advance to the moved
647 stmt if we passed it to gsi_move_after, so pass it a
648 copy instead, so as to keep LABEL pointing to the
649 LABEL. */
650 gimple_stmt_iterator copy = label;
651 gsi_move_after (&i, ©);
652 continue;
653 }
654
655 /* There aren't any (more?) debug stmts before label, so
656 there isn't anything else to move after it. */
657 label = gsi_none ();
658 }
659 }
660
661 make_blocks_1 (seq, ENTRY_BLOCK_PTR_FOR_FN (cfun));
662 }
663
664 /* Create and return a new empty basic block after bb AFTER. */
665
666 static basic_block
create_bb(void * h,void * e,basic_block after)667 create_bb (void *h, void *e, basic_block after)
668 {
669 basic_block bb;
670
671 gcc_assert (!e);
672
673 /* Create and initialize a new basic block. Since alloc_block uses
674 GC allocation that clears memory to allocate a basic block, we do
675 not have to clear the newly allocated basic block here. */
676 bb = alloc_block ();
677
678 bb->index = last_basic_block_for_fn (cfun);
679 bb->flags = BB_NEW;
680 set_bb_seq (bb, h ? (gimple_seq) h : NULL);
681
682 /* Add the new block to the linked list of blocks. */
683 link_block (bb, after);
684
685 /* Grow the basic block array if needed. */
686 if ((size_t) last_basic_block_for_fn (cfun)
687 == basic_block_info_for_fn (cfun)->length ())
688 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
689 last_basic_block_for_fn (cfun) + 1);
690
691 /* Add the newly created block to the array. */
692 SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block_for_fn (cfun), bb);
693
694 n_basic_blocks_for_fn (cfun)++;
695 last_basic_block_for_fn (cfun)++;
696
697 return bb;
698 }
699
700
701 /*---------------------------------------------------------------------------
702 Edge creation
703 ---------------------------------------------------------------------------*/
704
705 /* If basic block BB has an abnormal edge to a basic block
706 containing IFN_ABNORMAL_DISPATCHER internal call, return
707 that the dispatcher's basic block, otherwise return NULL. */
708
709 basic_block
get_abnormal_succ_dispatcher(basic_block bb)710 get_abnormal_succ_dispatcher (basic_block bb)
711 {
712 edge e;
713 edge_iterator ei;
714
715 FOR_EACH_EDGE (e, ei, bb->succs)
716 if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
717 {
718 gimple_stmt_iterator gsi
719 = gsi_start_nondebug_after_labels_bb (e->dest);
720 gimple *g = gsi_stmt (gsi);
721 if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
722 return e->dest;
723 }
724 return NULL;
725 }
726
727 /* Helper function for make_edges. Create a basic block with
728 with ABNORMAL_DISPATCHER internal call in it if needed, and
729 create abnormal edges from BBS to it and from it to FOR_BB
730 if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
731
732 static void
handle_abnormal_edges(basic_block * dispatcher_bbs,basic_block for_bb,auto_vec<basic_block> * bbs,bool computed_goto)733 handle_abnormal_edges (basic_block *dispatcher_bbs, basic_block for_bb,
734 auto_vec<basic_block> *bbs, bool computed_goto)
735 {
736 basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
737 unsigned int idx = 0;
738 basic_block bb;
739 bool inner = false;
740
741 if (!bb_to_omp_idx.is_empty ())
742 {
743 dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
744 if (bb_to_omp_idx[for_bb->index] != 0)
745 inner = true;
746 }
747
748 /* If the dispatcher has been created already, then there are basic
749 blocks with abnormal edges to it, so just make a new edge to
750 for_bb. */
751 if (*dispatcher == NULL)
752 {
753 /* Check if there are any basic blocks that need to have
754 abnormal edges to this dispatcher. If there are none, return
755 early. */
756 if (bb_to_omp_idx.is_empty ())
757 {
758 if (bbs->is_empty ())
759 return;
760 }
761 else
762 {
763 FOR_EACH_VEC_ELT (*bbs, idx, bb)
764 if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index])
765 break;
766 if (bb == NULL)
767 return;
768 }
769
770 /* Create the dispatcher bb. */
771 *dispatcher = create_basic_block (NULL, for_bb);
772 if (computed_goto)
773 {
774 /* Factor computed gotos into a common computed goto site. Also
775 record the location of that site so that we can un-factor the
776 gotos after we have converted back to normal form. */
777 gimple_stmt_iterator gsi = gsi_start_bb (*dispatcher);
778
779 /* Create the destination of the factored goto. Each original
780 computed goto will put its desired destination into this
781 variable and jump to the label we create immediately below. */
782 tree var = create_tmp_var (ptr_type_node, "gotovar");
783
784 /* Build a label for the new block which will contain the
785 factored computed goto. */
786 tree factored_label_decl
787 = create_artificial_label (UNKNOWN_LOCATION);
788 gimple *factored_computed_goto_label
789 = gimple_build_label (factored_label_decl);
790 gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
791
792 /* Build our new computed goto. */
793 gimple *factored_computed_goto = gimple_build_goto (var);
794 gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
795
796 FOR_EACH_VEC_ELT (*bbs, idx, bb)
797 {
798 if (!bb_to_omp_idx.is_empty ()
799 && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
800 continue;
801
802 gsi = gsi_last_bb (bb);
803 gimple *last = gsi_stmt (gsi);
804
805 gcc_assert (computed_goto_p (last));
806
807 /* Copy the original computed goto's destination into VAR. */
808 gimple *assignment
809 = gimple_build_assign (var, gimple_goto_dest (last));
810 gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
811
812 edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU);
813 e->goto_locus = gimple_location (last);
814 gsi_remove (&gsi, true);
815 }
816 }
817 else
818 {
819 tree arg = inner ? boolean_true_node : boolean_false_node;
820 gimple *g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
821 1, arg);
822 gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher);
823 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
824
825 /* Create predecessor edges of the dispatcher. */
826 FOR_EACH_VEC_ELT (*bbs, idx, bb)
827 {
828 if (!bb_to_omp_idx.is_empty ()
829 && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
830 continue;
831 make_edge (bb, *dispatcher, EDGE_ABNORMAL);
832 }
833 }
834 }
835
836 make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
837 }
838
839 /* Creates outgoing edges for BB. Returns 1 when it ends with an
840 computed goto, returns 2 when it ends with a statement that
841 might return to this function via an nonlocal goto, otherwise
842 return 0. Updates *PCUR_REGION with the OMP region this BB is in. */
843
844 static int
make_edges_bb(basic_block bb,struct omp_region ** pcur_region,int * pomp_index)845 make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index)
846 {
847 gimple *last = last_stmt (bb);
848 bool fallthru = false;
849 int ret = 0;
850
851 if (!last)
852 return ret;
853
854 switch (gimple_code (last))
855 {
856 case GIMPLE_GOTO:
857 if (make_goto_expr_edges (bb))
858 ret = 1;
859 fallthru = false;
860 break;
861 case GIMPLE_RETURN:
862 {
863 edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
864 e->goto_locus = gimple_location (last);
865 fallthru = false;
866 }
867 break;
868 case GIMPLE_COND:
869 make_cond_expr_edges (bb);
870 fallthru = false;
871 break;
872 case GIMPLE_SWITCH:
873 make_gimple_switch_edges (as_a <gswitch *> (last), bb);
874 fallthru = false;
875 break;
876 case GIMPLE_RESX:
877 make_eh_edges (last);
878 fallthru = false;
879 break;
880 case GIMPLE_EH_DISPATCH:
881 fallthru = make_eh_dispatch_edges (as_a <geh_dispatch *> (last));
882 break;
883
884 case GIMPLE_CALL:
885 /* If this function receives a nonlocal goto, then we need to
886 make edges from this call site to all the nonlocal goto
887 handlers. */
888 if (stmt_can_make_abnormal_goto (last))
889 ret = 2;
890
891 /* If this statement has reachable exception handlers, then
892 create abnormal edges to them. */
893 make_eh_edges (last);
894
895 /* BUILTIN_RETURN is really a return statement. */
896 if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
897 {
898 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
899 fallthru = false;
900 }
901 /* Some calls are known not to return. */
902 else
903 fallthru = !gimple_call_noreturn_p (last);
904 break;
905
906 case GIMPLE_ASSIGN:
907 /* A GIMPLE_ASSIGN may throw internally and thus be considered
908 control-altering. */
909 if (is_ctrl_altering_stmt (last))
910 make_eh_edges (last);
911 fallthru = true;
912 break;
913
914 case GIMPLE_ASM:
915 make_gimple_asm_edges (bb);
916 fallthru = true;
917 break;
918
919 CASE_GIMPLE_OMP:
920 fallthru = omp_make_gimple_edges (bb, pcur_region, pomp_index);
921 break;
922
923 case GIMPLE_TRANSACTION:
924 {
925 gtransaction *txn = as_a <gtransaction *> (last);
926 tree label1 = gimple_transaction_label_norm (txn);
927 tree label2 = gimple_transaction_label_uninst (txn);
928
929 if (label1)
930 make_edge (bb, label_to_block (cfun, label1), EDGE_FALLTHRU);
931 if (label2)
932 make_edge (bb, label_to_block (cfun, label2),
933 EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU));
934
935 tree label3 = gimple_transaction_label_over (txn);
936 if (gimple_transaction_subcode (txn)
937 & (GTMA_HAVE_ABORT | GTMA_IS_OUTER))
938 make_edge (bb, label_to_block (cfun, label3), EDGE_TM_ABORT);
939
940 fallthru = false;
941 }
942 break;
943
944 default:
945 gcc_assert (!stmt_ends_bb_p (last));
946 fallthru = true;
947 break;
948 }
949
950 if (fallthru)
951 make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
952
953 return ret;
954 }
955
956 /* Join all the blocks in the flowgraph. */
957
958 static void
make_edges(void)959 make_edges (void)
960 {
961 basic_block bb;
962 struct omp_region *cur_region = NULL;
963 auto_vec<basic_block> ab_edge_goto;
964 auto_vec<basic_block> ab_edge_call;
965 int cur_omp_region_idx = 0;
966
967 /* Create an edge from entry to the first block with executable
968 statements in it. */
969 make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
970 BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS),
971 EDGE_FALLTHRU);
972
973 /* Traverse the basic block array placing edges. */
974 FOR_EACH_BB_FN (bb, cfun)
975 {
976 int mer;
977
978 if (!bb_to_omp_idx.is_empty ())
979 bb_to_omp_idx[bb->index] = cur_omp_region_idx;
980
981 mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
982 if (mer == 1)
983 ab_edge_goto.safe_push (bb);
984 else if (mer == 2)
985 ab_edge_call.safe_push (bb);
986
987 if (cur_region && bb_to_omp_idx.is_empty ())
988 bb_to_omp_idx.safe_grow_cleared (n_basic_blocks_for_fn (cfun), true);
989 }
990
991 /* Computed gotos are hell to deal with, especially if there are
992 lots of them with a large number of destinations. So we factor
993 them to a common computed goto location before we build the
994 edge list. After we convert back to normal form, we will un-factor
995 the computed gotos since factoring introduces an unwanted jump.
996 For non-local gotos and abnormal edges from calls to calls that return
997 twice or forced labels, factor the abnormal edges too, by having all
998 abnormal edges from the calls go to a common artificial basic block
999 with ABNORMAL_DISPATCHER internal call and abnormal edges from that
1000 basic block to all forced labels and calls returning twice.
1001 We do this per-OpenMP structured block, because those regions
1002 are guaranteed to be single entry single exit by the standard,
1003 so it is not allowed to enter or exit such regions abnormally this way,
1004 thus all computed gotos, non-local gotos and setjmp/longjmp calls
1005 must not transfer control across SESE region boundaries. */
1006 if (!ab_edge_goto.is_empty () || !ab_edge_call.is_empty ())
1007 {
1008 gimple_stmt_iterator gsi;
1009 basic_block dispatcher_bb_array[2] = { NULL, NULL };
1010 basic_block *dispatcher_bbs = dispatcher_bb_array;
1011 int count = n_basic_blocks_for_fn (cfun);
1012
1013 if (!bb_to_omp_idx.is_empty ())
1014 dispatcher_bbs = XCNEWVEC (basic_block, 2 * count);
1015
1016 FOR_EACH_BB_FN (bb, cfun)
1017 {
1018 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1019 {
1020 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
1021 tree target;
1022
1023 if (!label_stmt)
1024 break;
1025
1026 target = gimple_label_label (label_stmt);
1027
1028 /* Make an edge to every label block that has been marked as a
1029 potential target for a computed goto or a non-local goto. */
1030 if (FORCED_LABEL (target))
1031 handle_abnormal_edges (dispatcher_bbs, bb, &ab_edge_goto,
1032 true);
1033 if (DECL_NONLOCAL (target))
1034 {
1035 handle_abnormal_edges (dispatcher_bbs, bb, &ab_edge_call,
1036 false);
1037 break;
1038 }
1039 }
1040
1041 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
1042 gsi_next_nondebug (&gsi);
1043 if (!gsi_end_p (gsi))
1044 {
1045 /* Make an edge to every setjmp-like call. */
1046 gimple *call_stmt = gsi_stmt (gsi);
1047 if (is_gimple_call (call_stmt)
1048 && ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
1049 || gimple_call_builtin_p (call_stmt,
1050 BUILT_IN_SETJMP_RECEIVER)))
1051 handle_abnormal_edges (dispatcher_bbs, bb, &ab_edge_call,
1052 false);
1053 }
1054 }
1055
1056 if (!bb_to_omp_idx.is_empty ())
1057 XDELETE (dispatcher_bbs);
1058 }
1059
1060 omp_free_regions ();
1061 }
1062
1063 /* Add SEQ after GSI. Start new bb after GSI, and created further bbs as
1064 needed. Returns true if new bbs were created.
1065 Note: This is transitional code, and should not be used for new code. We
1066 should be able to get rid of this by rewriting all target va-arg
1067 gimplification hooks to use an interface gimple_build_cond_value as described
1068 in https://gcc.gnu.org/ml/gcc-patches/2015-02/msg01194.html. */
1069
1070 bool
gimple_find_sub_bbs(gimple_seq seq,gimple_stmt_iterator * gsi)1071 gimple_find_sub_bbs (gimple_seq seq, gimple_stmt_iterator *gsi)
1072 {
1073 gimple *stmt = gsi_stmt (*gsi);
1074 basic_block bb = gimple_bb (stmt);
1075 basic_block lastbb, afterbb;
1076 int old_num_bbs = n_basic_blocks_for_fn (cfun);
1077 edge e;
1078 lastbb = make_blocks_1 (seq, bb);
1079 if (old_num_bbs == n_basic_blocks_for_fn (cfun))
1080 return false;
1081 e = split_block (bb, stmt);
1082 /* Move e->dest to come after the new basic blocks. */
1083 afterbb = e->dest;
1084 unlink_block (afterbb);
1085 link_block (afterbb, lastbb);
1086 redirect_edge_succ (e, bb->next_bb);
1087 bb = bb->next_bb;
1088 while (bb != afterbb)
1089 {
1090 struct omp_region *cur_region = NULL;
1091 profile_count cnt = profile_count::zero ();
1092 bool all = true;
1093
1094 int cur_omp_region_idx = 0;
1095 int mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
1096 gcc_assert (!mer && !cur_region);
1097 add_bb_to_loop (bb, afterbb->loop_father);
1098
1099 edge e;
1100 edge_iterator ei;
1101 FOR_EACH_EDGE (e, ei, bb->preds)
1102 {
1103 if (e->count ().initialized_p ())
1104 cnt += e->count ();
1105 else
1106 all = false;
1107 }
1108 tree_guess_outgoing_edge_probabilities (bb);
1109 if (all || profile_status_for_fn (cfun) == PROFILE_READ)
1110 bb->count = cnt;
1111
1112 bb = bb->next_bb;
1113 }
1114 return true;
1115 }
1116
1117 /* Find the next available discriminator value for LOCUS. The
1118 discriminator distinguishes among several basic blocks that
1119 share a common locus, allowing for more accurate sample-based
1120 profiling. */
1121
1122 static int
next_discriminator_for_locus(int line)1123 next_discriminator_for_locus (int line)
1124 {
1125 struct locus_discrim_map item;
1126 struct locus_discrim_map **slot;
1127
1128 item.location_line = line;
1129 item.discriminator = 0;
1130 slot = discriminator_per_locus->find_slot_with_hash (&item, line, INSERT);
1131 gcc_assert (slot);
1132 if (*slot == HTAB_EMPTY_ENTRY)
1133 {
1134 *slot = XNEW (struct locus_discrim_map);
1135 gcc_assert (*slot);
1136 (*slot)->location_line = line;
1137 (*slot)->discriminator = 0;
1138 }
1139 (*slot)->discriminator++;
1140 return (*slot)->discriminator;
1141 }
1142
1143 /* Return TRUE if LOCUS1 and LOCUS2 refer to the same source line. */
1144
1145 static bool
same_line_p(location_t locus1,expanded_location * from,location_t locus2)1146 same_line_p (location_t locus1, expanded_location *from, location_t locus2)
1147 {
1148 expanded_location to;
1149
1150 if (locus1 == locus2)
1151 return true;
1152
1153 to = expand_location (locus2);
1154
1155 if (from->line != to.line)
1156 return false;
1157 if (from->file == to.file)
1158 return true;
1159 return (from->file != NULL
1160 && to.file != NULL
1161 && filename_cmp (from->file, to.file) == 0);
1162 }
1163
1164 /* Assign discriminators to each basic block. */
1165
1166 static void
assign_discriminators(void)1167 assign_discriminators (void)
1168 {
1169 basic_block bb;
1170
1171 FOR_EACH_BB_FN (bb, cfun)
1172 {
1173 edge e;
1174 edge_iterator ei;
1175 gimple *last = last_stmt (bb);
1176 location_t locus = last ? gimple_location (last) : UNKNOWN_LOCATION;
1177
1178 if (locus == UNKNOWN_LOCATION)
1179 continue;
1180
1181 expanded_location locus_e = expand_location (locus);
1182
1183 FOR_EACH_EDGE (e, ei, bb->succs)
1184 {
1185 gimple *first = first_non_label_stmt (e->dest);
1186 gimple *last = last_stmt (e->dest);
1187 if ((first && same_line_p (locus, &locus_e,
1188 gimple_location (first)))
1189 || (last && same_line_p (locus, &locus_e,
1190 gimple_location (last))))
1191 {
1192 if (e->dest->discriminator != 0 && bb->discriminator == 0)
1193 bb->discriminator
1194 = next_discriminator_for_locus (locus_e.line);
1195 else
1196 e->dest->discriminator
1197 = next_discriminator_for_locus (locus_e.line);
1198 }
1199 }
1200 }
1201 }
1202
1203 /* Create the edges for a GIMPLE_COND starting at block BB. */
1204
1205 static void
make_cond_expr_edges(basic_block bb)1206 make_cond_expr_edges (basic_block bb)
1207 {
1208 gcond *entry = as_a <gcond *> (last_stmt (bb));
1209 gimple *then_stmt, *else_stmt;
1210 basic_block then_bb, else_bb;
1211 tree then_label, else_label;
1212 edge e;
1213
1214 gcc_assert (entry);
1215 gcc_assert (gimple_code (entry) == GIMPLE_COND);
1216
1217 /* Entry basic blocks for each component. */
1218 then_label = gimple_cond_true_label (entry);
1219 else_label = gimple_cond_false_label (entry);
1220 then_bb = label_to_block (cfun, then_label);
1221 else_bb = label_to_block (cfun, else_label);
1222 then_stmt = first_stmt (then_bb);
1223 else_stmt = first_stmt (else_bb);
1224
1225 e = make_edge (bb, then_bb, EDGE_TRUE_VALUE);
1226 e->goto_locus = gimple_location (then_stmt);
1227 e = make_edge (bb, else_bb, EDGE_FALSE_VALUE);
1228 if (e)
1229 e->goto_locus = gimple_location (else_stmt);
1230
1231 /* We do not need the labels anymore. */
1232 gimple_cond_set_true_label (entry, NULL_TREE);
1233 gimple_cond_set_false_label (entry, NULL_TREE);
1234 }
1235
1236
1237 /* Called for each element in the hash table (P) as we delete the
1238 edge to cases hash table.
1239
1240 Clear all the CASE_CHAINs to prevent problems with copying of
1241 SWITCH_EXPRs and structure sharing rules, then free the hash table
1242 element. */
1243
1244 bool
edge_to_cases_cleanup(edge const &,tree const & value,void *)1245 edge_to_cases_cleanup (edge const &, tree const &value, void *)
1246 {
1247 tree t, next;
1248
1249 for (t = value; t; t = next)
1250 {
1251 next = CASE_CHAIN (t);
1252 CASE_CHAIN (t) = NULL;
1253 }
1254
1255 return true;
1256 }
1257
1258 /* Start recording information mapping edges to case labels. */
1259
1260 void
start_recording_case_labels(void)1261 start_recording_case_labels (void)
1262 {
1263 gcc_assert (edge_to_cases == NULL);
1264 edge_to_cases = new hash_map<edge, tree>;
1265 touched_switch_bbs = BITMAP_ALLOC (NULL);
1266 }
1267
1268 /* Return nonzero if we are recording information for case labels. */
1269
1270 static bool
recording_case_labels_p(void)1271 recording_case_labels_p (void)
1272 {
1273 return (edge_to_cases != NULL);
1274 }
1275
1276 /* Stop recording information mapping edges to case labels and
1277 remove any information we have recorded. */
1278 void
end_recording_case_labels(void)1279 end_recording_case_labels (void)
1280 {
1281 bitmap_iterator bi;
1282 unsigned i;
1283 edge_to_cases->traverse<void *, edge_to_cases_cleanup> (NULL);
1284 delete edge_to_cases;
1285 edge_to_cases = NULL;
1286 EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
1287 {
1288 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1289 if (bb)
1290 {
1291 gimple *stmt = last_stmt (bb);
1292 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1293 group_case_labels_stmt (as_a <gswitch *> (stmt));
1294 }
1295 }
1296 BITMAP_FREE (touched_switch_bbs);
1297 }
1298
1299 /* If we are inside a {start,end}_recording_cases block, then return
1300 a chain of CASE_LABEL_EXPRs from T which reference E.
1301
1302 Otherwise return NULL. */
1303
1304 static tree
get_cases_for_edge(edge e,gswitch * t)1305 get_cases_for_edge (edge e, gswitch *t)
1306 {
1307 tree *slot;
1308 size_t i, n;
1309
1310 /* If we are not recording cases, then we do not have CASE_LABEL_EXPR
1311 chains available. Return NULL so the caller can detect this case. */
1312 if (!recording_case_labels_p ())
1313 return NULL;
1314
1315 slot = edge_to_cases->get (e);
1316 if (slot)
1317 return *slot;
1318
1319 /* If we did not find E in the hash table, then this must be the first
1320 time we have been queried for information about E & T. Add all the
1321 elements from T to the hash table then perform the query again. */
1322
1323 n = gimple_switch_num_labels (t);
1324 for (i = 0; i < n; i++)
1325 {
1326 tree elt = gimple_switch_label (t, i);
1327 tree lab = CASE_LABEL (elt);
1328 basic_block label_bb = label_to_block (cfun, lab);
1329 edge this_edge = find_edge (e->src, label_bb);
1330
1331 /* Add it to the chain of CASE_LABEL_EXPRs referencing E, or create
1332 a new chain. */
1333 tree &s = edge_to_cases->get_or_insert (this_edge);
1334 CASE_CHAIN (elt) = s;
1335 s = elt;
1336 }
1337
1338 return *edge_to_cases->get (e);
1339 }
1340
1341 /* Create the edges for a GIMPLE_SWITCH starting at block BB. */
1342
1343 static void
make_gimple_switch_edges(gswitch * entry,basic_block bb)1344 make_gimple_switch_edges (gswitch *entry, basic_block bb)
1345 {
1346 size_t i, n;
1347
1348 n = gimple_switch_num_labels (entry);
1349
1350 for (i = 0; i < n; ++i)
1351 {
1352 basic_block label_bb = gimple_switch_label_bb (cfun, entry, i);
1353 make_edge (bb, label_bb, 0);
1354 }
1355 }
1356
1357
1358 /* Return the basic block holding label DEST. */
1359
1360 basic_block
label_to_block(struct function * ifun,tree dest)1361 label_to_block (struct function *ifun, tree dest)
1362 {
1363 int uid = LABEL_DECL_UID (dest);
1364
1365 /* We would die hard when faced by an undefined label. Emit a label to
1366 the very first basic block. This will hopefully make even the dataflow
1367 and undefined variable warnings quite right. */
1368 if (seen_error () && uid < 0)
1369 {
1370 gimple_stmt_iterator gsi =
1371 gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS));
1372 gimple *stmt;
1373
1374 stmt = gimple_build_label (dest);
1375 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
1376 uid = LABEL_DECL_UID (dest);
1377 }
1378 if (vec_safe_length (ifun->cfg->x_label_to_block_map) <= (unsigned int) uid)
1379 return NULL;
1380 return (*ifun->cfg->x_label_to_block_map)[uid];
1381 }
1382
1383 /* Create edges for a goto statement at block BB. Returns true
1384 if abnormal edges should be created. */
1385
1386 static bool
make_goto_expr_edges(basic_block bb)1387 make_goto_expr_edges (basic_block bb)
1388 {
1389 gimple_stmt_iterator last = gsi_last_bb (bb);
1390 gimple *goto_t = gsi_stmt (last);
1391
1392 /* A simple GOTO creates normal edges. */
1393 if (simple_goto_p (goto_t))
1394 {
1395 tree dest = gimple_goto_dest (goto_t);
1396 basic_block label_bb = label_to_block (cfun, dest);
1397 edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
1398 e->goto_locus = gimple_location (goto_t);
1399 gsi_remove (&last, true);
1400 return false;
1401 }
1402
1403 /* A computed GOTO creates abnormal edges. */
1404 return true;
1405 }
1406
1407 /* Create edges for an asm statement with labels at block BB. */
1408
1409 static void
make_gimple_asm_edges(basic_block bb)1410 make_gimple_asm_edges (basic_block bb)
1411 {
1412 gasm *stmt = as_a <gasm *> (last_stmt (bb));
1413 int i, n = gimple_asm_nlabels (stmt);
1414
1415 for (i = 0; i < n; ++i)
1416 {
1417 tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
1418 basic_block label_bb = label_to_block (cfun, label);
1419 make_edge (bb, label_bb, 0);
1420 }
1421 }
1422
1423 /*---------------------------------------------------------------------------
1424 Flowgraph analysis
1425 ---------------------------------------------------------------------------*/
1426
1427 /* Cleanup useless labels in basic blocks. This is something we wish
1428 to do early because it allows us to group case labels before creating
1429 the edges for the CFG, and it speeds up block statement iterators in
1430 all passes later on.
1431 We rerun this pass after CFG is created, to get rid of the labels that
1432 are no longer referenced. After then we do not run it any more, since
1433 (almost) no new labels should be created. */
1434
1435 /* A map from basic block index to the leading label of that block. */
1436 struct label_record
1437 {
1438 /* The label. */
1439 tree label;
1440
1441 /* True if the label is referenced from somewhere. */
1442 bool used;
1443 };
1444
1445 /* Given LABEL return the first label in the same basic block. */
1446
1447 static tree
main_block_label(tree label,label_record * label_for_bb)1448 main_block_label (tree label, label_record *label_for_bb)
1449 {
1450 basic_block bb = label_to_block (cfun, label);
1451 tree main_label = label_for_bb[bb->index].label;
1452
1453 /* label_to_block possibly inserted undefined label into the chain. */
1454 if (!main_label)
1455 {
1456 label_for_bb[bb->index].label = label;
1457 main_label = label;
1458 }
1459
1460 label_for_bb[bb->index].used = true;
1461 return main_label;
1462 }
1463
1464 /* Clean up redundant labels within the exception tree. */
1465
1466 static void
cleanup_dead_labels_eh(label_record * label_for_bb)1467 cleanup_dead_labels_eh (label_record *label_for_bb)
1468 {
1469 eh_landing_pad lp;
1470 eh_region r;
1471 tree lab;
1472 int i;
1473
1474 if (cfun->eh == NULL)
1475 return;
1476
1477 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
1478 if (lp && lp->post_landing_pad)
1479 {
1480 lab = main_block_label (lp->post_landing_pad, label_for_bb);
1481 if (lab != lp->post_landing_pad)
1482 {
1483 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
1484 EH_LANDING_PAD_NR (lab) = lp->index;
1485 }
1486 }
1487
1488 FOR_ALL_EH_REGION (r)
1489 switch (r->type)
1490 {
1491 case ERT_CLEANUP:
1492 case ERT_MUST_NOT_THROW:
1493 break;
1494
1495 case ERT_TRY:
1496 {
1497 eh_catch c;
1498 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
1499 {
1500 lab = c->label;
1501 if (lab)
1502 c->label = main_block_label (lab, label_for_bb);
1503 }
1504 }
1505 break;
1506
1507 case ERT_ALLOWED_EXCEPTIONS:
1508 lab = r->u.allowed.label;
1509 if (lab)
1510 r->u.allowed.label = main_block_label (lab, label_for_bb);
1511 break;
1512 }
1513 }
1514
1515
1516 /* Cleanup redundant labels. This is a three-step process:
1517 1) Find the leading label for each block.
1518 2) Redirect all references to labels to the leading labels.
1519 3) Cleanup all useless labels. */
1520
1521 void
cleanup_dead_labels(void)1522 cleanup_dead_labels (void)
1523 {
1524 basic_block bb;
1525 label_record *label_for_bb = XCNEWVEC (struct label_record,
1526 last_basic_block_for_fn (cfun));
1527
1528 /* Find a suitable label for each block. We use the first user-defined
1529 label if there is one, or otherwise just the first label we see. */
1530 FOR_EACH_BB_FN (bb, cfun)
1531 {
1532 gimple_stmt_iterator i;
1533
1534 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
1535 {
1536 tree label;
1537 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
1538
1539 if (!label_stmt)
1540 break;
1541
1542 label = gimple_label_label (label_stmt);
1543
1544 /* If we have not yet seen a label for the current block,
1545 remember this one and see if there are more labels. */
1546 if (!label_for_bb[bb->index].label)
1547 {
1548 label_for_bb[bb->index].label = label;
1549 continue;
1550 }
1551
1552 /* If we did see a label for the current block already, but it
1553 is an artificially created label, replace it if the current
1554 label is a user defined label. */
1555 if (!DECL_ARTIFICIAL (label)
1556 && DECL_ARTIFICIAL (label_for_bb[bb->index].label))
1557 {
1558 label_for_bb[bb->index].label = label;
1559 break;
1560 }
1561 }
1562 }
1563
1564 /* Now redirect all jumps/branches to the selected label.
1565 First do so for each block ending in a control statement. */
1566 FOR_EACH_BB_FN (bb, cfun)
1567 {
1568 gimple *stmt = last_stmt (bb);
1569 tree label, new_label;
1570
1571 if (!stmt)
1572 continue;
1573
1574 switch (gimple_code (stmt))
1575 {
1576 case GIMPLE_COND:
1577 {
1578 gcond *cond_stmt = as_a <gcond *> (stmt);
1579 label = gimple_cond_true_label (cond_stmt);
1580 if (label)
1581 {
1582 new_label = main_block_label (label, label_for_bb);
1583 if (new_label != label)
1584 gimple_cond_set_true_label (cond_stmt, new_label);
1585 }
1586
1587 label = gimple_cond_false_label (cond_stmt);
1588 if (label)
1589 {
1590 new_label = main_block_label (label, label_for_bb);
1591 if (new_label != label)
1592 gimple_cond_set_false_label (cond_stmt, new_label);
1593 }
1594 }
1595 break;
1596
1597 case GIMPLE_SWITCH:
1598 {
1599 gswitch *switch_stmt = as_a <gswitch *> (stmt);
1600 size_t i, n = gimple_switch_num_labels (switch_stmt);
1601
1602 /* Replace all destination labels. */
1603 for (i = 0; i < n; ++i)
1604 {
1605 tree case_label = gimple_switch_label (switch_stmt, i);
1606 label = CASE_LABEL (case_label);
1607 new_label = main_block_label (label, label_for_bb);
1608 if (new_label != label)
1609 CASE_LABEL (case_label) = new_label;
1610 }
1611 break;
1612 }
1613
1614 case GIMPLE_ASM:
1615 {
1616 gasm *asm_stmt = as_a <gasm *> (stmt);
1617 int i, n = gimple_asm_nlabels (asm_stmt);
1618
1619 for (i = 0; i < n; ++i)
1620 {
1621 tree cons = gimple_asm_label_op (asm_stmt, i);
1622 tree label = main_block_label (TREE_VALUE (cons), label_for_bb);
1623 TREE_VALUE (cons) = label;
1624 }
1625 break;
1626 }
1627
1628 /* We have to handle gotos until they're removed, and we don't
1629 remove them until after we've created the CFG edges. */
1630 case GIMPLE_GOTO:
1631 if (!computed_goto_p (stmt))
1632 {
1633 ggoto *goto_stmt = as_a <ggoto *> (stmt);
1634 label = gimple_goto_dest (goto_stmt);
1635 new_label = main_block_label (label, label_for_bb);
1636 if (new_label != label)
1637 gimple_goto_set_dest (goto_stmt, new_label);
1638 }
1639 break;
1640
1641 case GIMPLE_TRANSACTION:
1642 {
1643 gtransaction *txn = as_a <gtransaction *> (stmt);
1644
1645 label = gimple_transaction_label_norm (txn);
1646 if (label)
1647 {
1648 new_label = main_block_label (label, label_for_bb);
1649 if (new_label != label)
1650 gimple_transaction_set_label_norm (txn, new_label);
1651 }
1652
1653 label = gimple_transaction_label_uninst (txn);
1654 if (label)
1655 {
1656 new_label = main_block_label (label, label_for_bb);
1657 if (new_label != label)
1658 gimple_transaction_set_label_uninst (txn, new_label);
1659 }
1660
1661 label = gimple_transaction_label_over (txn);
1662 if (label)
1663 {
1664 new_label = main_block_label (label, label_for_bb);
1665 if (new_label != label)
1666 gimple_transaction_set_label_over (txn, new_label);
1667 }
1668 }
1669 break;
1670
1671 default:
1672 break;
1673 }
1674 }
1675
1676 /* Do the same for the exception region tree labels. */
1677 cleanup_dead_labels_eh (label_for_bb);
1678
1679 /* Finally, purge dead labels. All user-defined labels and labels that
1680 can be the target of non-local gotos and labels which have their
1681 address taken are preserved. */
1682 FOR_EACH_BB_FN (bb, cfun)
1683 {
1684 gimple_stmt_iterator i;
1685 tree label_for_this_bb = label_for_bb[bb->index].label;
1686
1687 if (!label_for_this_bb)
1688 continue;
1689
1690 /* If the main label of the block is unused, we may still remove it. */
1691 if (!label_for_bb[bb->index].used)
1692 label_for_this_bb = NULL;
1693
1694 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
1695 {
1696 tree label;
1697 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
1698
1699 if (!label_stmt)
1700 break;
1701
1702 label = gimple_label_label (label_stmt);
1703
1704 if (label == label_for_this_bb
1705 || !DECL_ARTIFICIAL (label)
1706 || DECL_NONLOCAL (label)
1707 || FORCED_LABEL (label))
1708 gsi_next (&i);
1709 else
1710 gsi_remove (&i, true);
1711 }
1712 }
1713
1714 free (label_for_bb);
1715 }
1716
1717 /* Scan the sorted vector of cases in STMT (a GIMPLE_SWITCH) and combine
1718 the ones jumping to the same label.
1719 Eg. three separate entries 1: 2: 3: become one entry 1..3: */
1720
1721 bool
group_case_labels_stmt(gswitch * stmt)1722 group_case_labels_stmt (gswitch *stmt)
1723 {
1724 int old_size = gimple_switch_num_labels (stmt);
1725 int i, next_index, new_size;
1726 basic_block default_bb = NULL;
1727 hash_set<tree> *removed_labels = NULL;
1728
1729 default_bb = gimple_switch_default_bb (cfun, stmt);
1730
1731 /* Look for possible opportunities to merge cases. */
1732 new_size = i = 1;
1733 while (i < old_size)
1734 {
1735 tree base_case, base_high;
1736 basic_block base_bb;
1737
1738 base_case = gimple_switch_label (stmt, i);
1739
1740 gcc_assert (base_case);
1741 base_bb = label_to_block (cfun, CASE_LABEL (base_case));
1742
1743 /* Discard cases that have the same destination as the default case or
1744 whose destination blocks have already been removed as unreachable. */
1745 if (base_bb == NULL
1746 || base_bb == default_bb
1747 || (removed_labels
1748 && removed_labels->contains (CASE_LABEL (base_case))))
1749 {
1750 i++;
1751 continue;
1752 }
1753
1754 base_high = CASE_HIGH (base_case)
1755 ? CASE_HIGH (base_case)
1756 : CASE_LOW (base_case);
1757 next_index = i + 1;
1758
1759 /* Try to merge case labels. Break out when we reach the end
1760 of the label vector or when we cannot merge the next case
1761 label with the current one. */
1762 while (next_index < old_size)
1763 {
1764 tree merge_case = gimple_switch_label (stmt, next_index);
1765 basic_block merge_bb = label_to_block (cfun, CASE_LABEL (merge_case));
1766 wide_int bhp1 = wi::to_wide (base_high) + 1;
1767
1768 /* Merge the cases if they jump to the same place,
1769 and their ranges are consecutive. */
1770 if (merge_bb == base_bb
1771 && (removed_labels == NULL
1772 || !removed_labels->contains (CASE_LABEL (merge_case)))
1773 && wi::to_wide (CASE_LOW (merge_case)) == bhp1)
1774 {
1775 base_high
1776 = (CASE_HIGH (merge_case)
1777 ? CASE_HIGH (merge_case) : CASE_LOW (merge_case));
1778 CASE_HIGH (base_case) = base_high;
1779 next_index++;
1780 }
1781 else
1782 break;
1783 }
1784
1785 /* Discard cases that have an unreachable destination block. */
1786 if (EDGE_COUNT (base_bb->succs) == 0
1787 && gimple_seq_unreachable_p (bb_seq (base_bb))
1788 /* Don't optimize this if __builtin_unreachable () is the
1789 implicitly added one by the C++ FE too early, before
1790 -Wreturn-type can be diagnosed. We'll optimize it later
1791 during switchconv pass or any other cfg cleanup. */
1792 && (gimple_in_ssa_p (cfun)
1793 || (LOCATION_LOCUS (gimple_location (last_stmt (base_bb)))
1794 != BUILTINS_LOCATION)))
1795 {
1796 edge base_edge = find_edge (gimple_bb (stmt), base_bb);
1797 if (base_edge != NULL)
1798 {
1799 for (gimple_stmt_iterator gsi = gsi_start_bb (base_bb);
1800 !gsi_end_p (gsi); gsi_next (&gsi))
1801 if (glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi)))
1802 {
1803 if (FORCED_LABEL (gimple_label_label (stmt))
1804 || DECL_NONLOCAL (gimple_label_label (stmt)))
1805 {
1806 /* Forced/non-local labels aren't going to be removed,
1807 but they will be moved to some neighbouring basic
1808 block. If some later case label refers to one of
1809 those labels, we should throw that case away rather
1810 than keeping it around and refering to some random
1811 other basic block without an edge to it. */
1812 if (removed_labels == NULL)
1813 removed_labels = new hash_set<tree>;
1814 removed_labels->add (gimple_label_label (stmt));
1815 }
1816 }
1817 else
1818 break;
1819 remove_edge_and_dominated_blocks (base_edge);
1820 }
1821 i = next_index;
1822 continue;
1823 }
1824
1825 if (new_size < i)
1826 gimple_switch_set_label (stmt, new_size,
1827 gimple_switch_label (stmt, i));
1828 i = next_index;
1829 new_size++;
1830 }
1831
1832 gcc_assert (new_size <= old_size);
1833
1834 if (new_size < old_size)
1835 gimple_switch_set_num_labels (stmt, new_size);
1836
1837 delete removed_labels;
1838 return new_size < old_size;
1839 }
1840
1841 /* Look for blocks ending in a multiway branch (a GIMPLE_SWITCH),
1842 and scan the sorted vector of cases. Combine the ones jumping to the
1843 same label. */
1844
1845 bool
group_case_labels(void)1846 group_case_labels (void)
1847 {
1848 basic_block bb;
1849 bool changed = false;
1850
1851 FOR_EACH_BB_FN (bb, cfun)
1852 {
1853 gimple *stmt = last_stmt (bb);
1854 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1855 changed |= group_case_labels_stmt (as_a <gswitch *> (stmt));
1856 }
1857
1858 return changed;
1859 }
1860
1861 /* Checks whether we can merge block B into block A. */
1862
1863 static bool
gimple_can_merge_blocks_p(basic_block a,basic_block b)1864 gimple_can_merge_blocks_p (basic_block a, basic_block b)
1865 {
1866 gimple *stmt;
1867
1868 if (!single_succ_p (a))
1869 return false;
1870
1871 if (single_succ_edge (a)->flags & EDGE_COMPLEX)
1872 return false;
1873
1874 if (single_succ (a) != b)
1875 return false;
1876
1877 if (!single_pred_p (b))
1878 return false;
1879
1880 if (a == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1881 || b == EXIT_BLOCK_PTR_FOR_FN (cfun))
1882 return false;
1883
1884 /* If A ends by a statement causing exceptions or something similar, we
1885 cannot merge the blocks. */
1886 stmt = last_stmt (a);
1887 if (stmt && stmt_ends_bb_p (stmt))
1888 return false;
1889
1890 /* Do not allow a block with only a non-local label to be merged. */
1891 if (stmt)
1892 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
1893 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1894 return false;
1895
1896 /* Examine the labels at the beginning of B. */
1897 for (gimple_stmt_iterator gsi = gsi_start_bb (b); !gsi_end_p (gsi);
1898 gsi_next (&gsi))
1899 {
1900 tree lab;
1901 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
1902 if (!label_stmt)
1903 break;
1904 lab = gimple_label_label (label_stmt);
1905
1906 /* Do not remove user forced labels or for -O0 any user labels. */
1907 if (!DECL_ARTIFICIAL (lab) && (!optimize || FORCED_LABEL (lab)))
1908 return false;
1909 }
1910
1911 /* Protect simple loop latches. We only want to avoid merging
1912 the latch with the loop header or with a block in another
1913 loop in this case. */
1914 if (current_loops
1915 && b->loop_father->latch == b
1916 && loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES)
1917 && (b->loop_father->header == a
1918 || b->loop_father != a->loop_father))
1919 return false;
1920
1921 /* It must be possible to eliminate all phi nodes in B. If ssa form
1922 is not up-to-date and a name-mapping is registered, we cannot eliminate
1923 any phis. Symbols marked for renaming are never a problem though. */
1924 for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (gsi);
1925 gsi_next (&gsi))
1926 {
1927 gphi *phi = gsi.phi ();
1928 /* Technically only new names matter. */
1929 if (name_registered_for_update_p (PHI_RESULT (phi)))
1930 return false;
1931 }
1932
1933 /* When not optimizing, don't merge if we'd lose goto_locus. */
1934 if (!optimize
1935 && single_succ_edge (a)->goto_locus != UNKNOWN_LOCATION)
1936 {
1937 location_t goto_locus = single_succ_edge (a)->goto_locus;
1938 gimple_stmt_iterator prev, next;
1939 prev = gsi_last_nondebug_bb (a);
1940 next = gsi_after_labels (b);
1941 if (!gsi_end_p (next) && is_gimple_debug (gsi_stmt (next)))
1942 gsi_next_nondebug (&next);
1943 if ((gsi_end_p (prev)
1944 || gimple_location (gsi_stmt (prev)) != goto_locus)
1945 && (gsi_end_p (next)
1946 || gimple_location (gsi_stmt (next)) != goto_locus))
1947 return false;
1948 }
1949
1950 return true;
1951 }
1952
1953 /* Replaces all uses of NAME by VAL. */
1954
1955 void
replace_uses_by(tree name,tree val)1956 replace_uses_by (tree name, tree val)
1957 {
1958 imm_use_iterator imm_iter;
1959 use_operand_p use;
1960 gimple *stmt;
1961 edge e;
1962
1963 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
1964 {
1965 /* Mark the block if we change the last stmt in it. */
1966 if (cfgcleanup_altered_bbs
1967 && stmt_ends_bb_p (stmt))
1968 bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
1969
1970 FOR_EACH_IMM_USE_ON_STMT (use, imm_iter)
1971 {
1972 replace_exp (use, val);
1973
1974 if (gimple_code (stmt) == GIMPLE_PHI)
1975 {
1976 e = gimple_phi_arg_edge (as_a <gphi *> (stmt),
1977 PHI_ARG_INDEX_FROM_USE (use));
1978 if (e->flags & EDGE_ABNORMAL
1979 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
1980 {
1981 /* This can only occur for virtual operands, since
1982 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1983 would prevent replacement. */
1984 gcc_checking_assert (virtual_operand_p (name));
1985 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
1986 }
1987 }
1988 }
1989
1990 if (gimple_code (stmt) != GIMPLE_PHI)
1991 {
1992 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1993 gimple *orig_stmt = stmt;
1994 size_t i;
1995
1996 /* FIXME. It shouldn't be required to keep TREE_CONSTANT
1997 on ADDR_EXPRs up-to-date on GIMPLE. Propagation will
1998 only change sth from non-invariant to invariant, and only
1999 when propagating constants. */
2000 if (is_gimple_min_invariant (val))
2001 for (i = 0; i < gimple_num_ops (stmt); i++)
2002 {
2003 tree op = gimple_op (stmt, i);
2004 /* Operands may be empty here. For example, the labels
2005 of a GIMPLE_COND are nulled out following the creation
2006 of the corresponding CFG edges. */
2007 if (op && TREE_CODE (op) == ADDR_EXPR)
2008 recompute_tree_invariant_for_addr_expr (op);
2009 }
2010
2011 if (fold_stmt (&gsi))
2012 stmt = gsi_stmt (gsi);
2013
2014 if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
2015 gimple_purge_dead_eh_edges (gimple_bb (stmt));
2016
2017 update_stmt (stmt);
2018 }
2019 }
2020
2021 gcc_checking_assert (has_zero_uses (name));
2022
2023 /* Also update the trees stored in loop structures. */
2024 if (current_loops)
2025 {
2026 class loop *loop;
2027
2028 FOR_EACH_LOOP (loop, 0)
2029 {
2030 substitute_in_loop_info (loop, name, val);
2031 }
2032 }
2033 }
2034
2035 /* Merge block B into block A. */
2036
2037 static void
gimple_merge_blocks(basic_block a,basic_block b)2038 gimple_merge_blocks (basic_block a, basic_block b)
2039 {
2040 gimple_stmt_iterator last, gsi;
2041 gphi_iterator psi;
2042
2043 if (dump_file)
2044 fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index);
2045
2046 /* Remove all single-valued PHI nodes from block B of the form
2047 V_i = PHI <V_j> by propagating V_j to all the uses of V_i. */
2048 gsi = gsi_last_bb (a);
2049 for (psi = gsi_start_phis (b); !gsi_end_p (psi); )
2050 {
2051 gimple *phi = gsi_stmt (psi);
2052 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
2053 gimple *copy;
2054 bool may_replace_uses = (virtual_operand_p (def)
2055 || may_propagate_copy (def, use));
2056
2057 /* In case we maintain loop closed ssa form, do not propagate arguments
2058 of loop exit phi nodes. */
2059 if (current_loops
2060 && loops_state_satisfies_p (LOOP_CLOSED_SSA)
2061 && !virtual_operand_p (def)
2062 && TREE_CODE (use) == SSA_NAME
2063 && a->loop_father != b->loop_father)
2064 may_replace_uses = false;
2065
2066 if (!may_replace_uses)
2067 {
2068 gcc_assert (!virtual_operand_p (def));
2069
2070 /* Note that just emitting the copies is fine -- there is no problem
2071 with ordering of phi nodes. This is because A is the single
2072 predecessor of B, therefore results of the phi nodes cannot
2073 appear as arguments of the phi nodes. */
2074 copy = gimple_build_assign (def, use);
2075 gsi_insert_after (&gsi, copy, GSI_NEW_STMT);
2076 remove_phi_node (&psi, false);
2077 }
2078 else
2079 {
2080 /* If we deal with a PHI for virtual operands, we can simply
2081 propagate these without fussing with folding or updating
2082 the stmt. */
2083 if (virtual_operand_p (def))
2084 {
2085 imm_use_iterator iter;
2086 use_operand_p use_p;
2087 gimple *stmt;
2088
2089 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
2090 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2091 SET_USE (use_p, use);
2092
2093 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
2094 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use) = 1;
2095 }
2096 else
2097 replace_uses_by (def, use);
2098
2099 remove_phi_node (&psi, true);
2100 }
2101 }
2102
2103 /* Ensure that B follows A. */
2104 move_block_after (b, a);
2105
2106 gcc_assert (single_succ_edge (a)->flags & EDGE_FALLTHRU);
2107 gcc_assert (!last_stmt (a) || !stmt_ends_bb_p (last_stmt (a)));
2108
2109 /* Remove labels from B and set gimple_bb to A for other statements. */
2110 for (gsi = gsi_start_bb (b); !gsi_end_p (gsi);)
2111 {
2112 gimple *stmt = gsi_stmt (gsi);
2113 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2114 {
2115 tree label = gimple_label_label (label_stmt);
2116 int lp_nr;
2117
2118 gsi_remove (&gsi, false);
2119
2120 /* Now that we can thread computed gotos, we might have
2121 a situation where we have a forced label in block B
2122 However, the label at the start of block B might still be
2123 used in other ways (think about the runtime checking for
2124 Fortran assigned gotos). So we cannot just delete the
2125 label. Instead we move the label to the start of block A. */
2126 if (FORCED_LABEL (label))
2127 {
2128 gimple_stmt_iterator dest_gsi = gsi_start_bb (a);
2129 tree first_label = NULL_TREE;
2130 if (!gsi_end_p (dest_gsi))
2131 if (glabel *first_label_stmt
2132 = dyn_cast <glabel *> (gsi_stmt (dest_gsi)))
2133 first_label = gimple_label_label (first_label_stmt);
2134 if (first_label
2135 && (DECL_NONLOCAL (first_label)
2136 || EH_LANDING_PAD_NR (first_label) != 0))
2137 gsi_insert_after (&dest_gsi, stmt, GSI_NEW_STMT);
2138 else
2139 gsi_insert_before (&dest_gsi, stmt, GSI_NEW_STMT);
2140 }
2141 /* Other user labels keep around in a form of a debug stmt. */
2142 else if (!DECL_ARTIFICIAL (label) && MAY_HAVE_DEBUG_BIND_STMTS)
2143 {
2144 gimple *dbg = gimple_build_debug_bind (label,
2145 integer_zero_node,
2146 stmt);
2147 gimple_debug_bind_reset_value (dbg);
2148 gsi_insert_before (&gsi, dbg, GSI_SAME_STMT);
2149 }
2150
2151 lp_nr = EH_LANDING_PAD_NR (label);
2152 if (lp_nr)
2153 {
2154 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
2155 lp->post_landing_pad = NULL;
2156 }
2157 }
2158 else
2159 {
2160 gimple_set_bb (stmt, a);
2161 gsi_next (&gsi);
2162 }
2163 }
2164
2165 /* When merging two BBs, if their counts are different, the larger count
2166 is selected as the new bb count. This is to handle inconsistent
2167 profiles. */
2168 if (a->loop_father == b->loop_father)
2169 {
2170 a->count = a->count.merge (b->count);
2171 }
2172
2173 /* Merge the sequences. */
2174 last = gsi_last_bb (a);
2175 gsi_insert_seq_after (&last, bb_seq (b), GSI_NEW_STMT);
2176 set_bb_seq (b, NULL);
2177
2178 if (cfgcleanup_altered_bbs)
2179 bitmap_set_bit (cfgcleanup_altered_bbs, a->index);
2180 }
2181
2182
2183 /* Return the one of two successors of BB that is not reachable by a
2184 complex edge, if there is one. Else, return BB. We use
2185 this in optimizations that use post-dominators for their heuristics,
2186 to catch the cases in C++ where function calls are involved. */
2187
2188 basic_block
single_noncomplex_succ(basic_block bb)2189 single_noncomplex_succ (basic_block bb)
2190 {
2191 edge e0, e1;
2192 if (EDGE_COUNT (bb->succs) != 2)
2193 return bb;
2194
2195 e0 = EDGE_SUCC (bb, 0);
2196 e1 = EDGE_SUCC (bb, 1);
2197 if (e0->flags & EDGE_COMPLEX)
2198 return e1->dest;
2199 if (e1->flags & EDGE_COMPLEX)
2200 return e0->dest;
2201
2202 return bb;
2203 }
2204
2205 /* T is CALL_EXPR. Set current_function_calls_* flags. */
2206
2207 void
notice_special_calls(gcall * call)2208 notice_special_calls (gcall *call)
2209 {
2210 int flags = gimple_call_flags (call);
2211
2212 if (flags & ECF_MAY_BE_ALLOCA)
2213 cfun->calls_alloca = true;
2214 if (flags & ECF_RETURNS_TWICE)
2215 cfun->calls_setjmp = true;
2216 }
2217
2218
2219 /* Clear flags set by notice_special_calls. Used by dead code removal
2220 to update the flags. */
2221
2222 void
clear_special_calls(void)2223 clear_special_calls (void)
2224 {
2225 cfun->calls_alloca = false;
2226 cfun->calls_setjmp = false;
2227 }
2228
2229 /* Remove PHI nodes associated with basic block BB and all edges out of BB. */
2230
2231 static void
remove_phi_nodes_and_edges_for_unreachable_block(basic_block bb)2232 remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb)
2233 {
2234 /* Since this block is no longer reachable, we can just delete all
2235 of its PHI nodes. */
2236 remove_phi_nodes (bb);
2237
2238 /* Remove edges to BB's successors. */
2239 while (EDGE_COUNT (bb->succs) > 0)
2240 remove_edge (EDGE_SUCC (bb, 0));
2241 }
2242
2243
2244 /* Remove statements of basic block BB. */
2245
2246 static void
remove_bb(basic_block bb)2247 remove_bb (basic_block bb)
2248 {
2249 gimple_stmt_iterator i;
2250
2251 if (dump_file)
2252 {
2253 fprintf (dump_file, "Removing basic block %d\n", bb->index);
2254 if (dump_flags & TDF_DETAILS)
2255 {
2256 dump_bb (dump_file, bb, 0, TDF_BLOCKS);
2257 fprintf (dump_file, "\n");
2258 }
2259 }
2260
2261 if (current_loops)
2262 {
2263 class loop *loop = bb->loop_father;
2264
2265 /* If a loop gets removed, clean up the information associated
2266 with it. */
2267 if (loop->latch == bb
2268 || loop->header == bb)
2269 free_numbers_of_iterations_estimates (loop);
2270 }
2271
2272 /* Remove all the instructions in the block. */
2273 if (bb_seq (bb) != NULL)
2274 {
2275 /* Walk backwards so as to get a chance to substitute all
2276 released DEFs into debug stmts. See
2277 eliminate_unnecessary_stmts() in tree-ssa-dce.c for more
2278 details. */
2279 for (i = gsi_last_bb (bb); !gsi_end_p (i);)
2280 {
2281 gimple *stmt = gsi_stmt (i);
2282 glabel *label_stmt = dyn_cast <glabel *> (stmt);
2283 if (label_stmt
2284 && (FORCED_LABEL (gimple_label_label (label_stmt))
2285 || DECL_NONLOCAL (gimple_label_label (label_stmt))))
2286 {
2287 basic_block new_bb;
2288 gimple_stmt_iterator new_gsi;
2289
2290 /* A non-reachable non-local label may still be referenced.
2291 But it no longer needs to carry the extra semantics of
2292 non-locality. */
2293 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
2294 {
2295 DECL_NONLOCAL (gimple_label_label (label_stmt)) = 0;
2296 FORCED_LABEL (gimple_label_label (label_stmt)) = 1;
2297 }
2298
2299 new_bb = bb->prev_bb;
2300 /* Don't move any labels into ENTRY block. */
2301 if (new_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2302 {
2303 new_bb = single_succ (new_bb);
2304 gcc_assert (new_bb != bb);
2305 }
2306 if ((unsigned) bb->index < bb_to_omp_idx.length ()
2307 && ((unsigned) new_bb->index >= bb_to_omp_idx.length ()
2308 || (bb_to_omp_idx[bb->index]
2309 != bb_to_omp_idx[new_bb->index])))
2310 {
2311 /* During cfg pass make sure to put orphaned labels
2312 into the right OMP region. */
2313 unsigned int i;
2314 int idx;
2315 new_bb = NULL;
2316 FOR_EACH_VEC_ELT (bb_to_omp_idx, i, idx)
2317 if (i >= NUM_FIXED_BLOCKS
2318 && idx == bb_to_omp_idx[bb->index]
2319 && i != (unsigned) bb->index)
2320 {
2321 new_bb = BASIC_BLOCK_FOR_FN (cfun, i);
2322 break;
2323 }
2324 if (new_bb == NULL)
2325 {
2326 new_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2327 gcc_assert (new_bb != bb);
2328 }
2329 }
2330 new_gsi = gsi_after_labels (new_bb);
2331 gsi_remove (&i, false);
2332 gsi_insert_before (&new_gsi, stmt, GSI_NEW_STMT);
2333 }
2334 else
2335 {
2336 /* Release SSA definitions. */
2337 release_defs (stmt);
2338 gsi_remove (&i, true);
2339 }
2340
2341 if (gsi_end_p (i))
2342 i = gsi_last_bb (bb);
2343 else
2344 gsi_prev (&i);
2345 }
2346 }
2347
2348 if ((unsigned) bb->index < bb_to_omp_idx.length ())
2349 bb_to_omp_idx[bb->index] = -1;
2350 remove_phi_nodes_and_edges_for_unreachable_block (bb);
2351 bb->il.gimple.seq = NULL;
2352 bb->il.gimple.phi_nodes = NULL;
2353 }
2354
2355
2356 /* Given a basic block BB and a value VAL for use in the final statement
2357 of the block (if a GIMPLE_COND, GIMPLE_SWITCH, or computed goto), return
2358 the edge that will be taken out of the block.
2359 If VAL is NULL_TREE, then the current value of the final statement's
2360 predicate or index is used.
2361 If the value does not match a unique edge, NULL is returned. */
2362
2363 edge
find_taken_edge(basic_block bb,tree val)2364 find_taken_edge (basic_block bb, tree val)
2365 {
2366 gimple *stmt;
2367
2368 stmt = last_stmt (bb);
2369
2370 /* Handle ENTRY and EXIT. */
2371 if (!stmt)
2372 return NULL;
2373
2374 if (gimple_code (stmt) == GIMPLE_COND)
2375 return find_taken_edge_cond_expr (as_a <gcond *> (stmt), val);
2376
2377 if (gimple_code (stmt) == GIMPLE_SWITCH)
2378 return find_taken_edge_switch_expr (as_a <gswitch *> (stmt), val);
2379
2380 if (computed_goto_p (stmt))
2381 {
2382 /* Only optimize if the argument is a label, if the argument is
2383 not a label then we cannot construct a proper CFG.
2384
2385 It may be the case that we only need to allow the LABEL_REF to
2386 appear inside an ADDR_EXPR, but we also allow the LABEL_REF to
2387 appear inside a LABEL_EXPR just to be safe. */
2388 if (val
2389 && (TREE_CODE (val) == ADDR_EXPR || TREE_CODE (val) == LABEL_EXPR)
2390 && TREE_CODE (TREE_OPERAND (val, 0)) == LABEL_DECL)
2391 return find_taken_edge_computed_goto (bb, TREE_OPERAND (val, 0));
2392 }
2393
2394 /* Otherwise we only know the taken successor edge if it's unique. */
2395 return single_succ_p (bb) ? single_succ_edge (bb) : NULL;
2396 }
2397
2398 /* Given a constant value VAL and the entry block BB to a GOTO_EXPR
2399 statement, determine which of the outgoing edges will be taken out of the
2400 block. Return NULL if either edge may be taken. */
2401
2402 static edge
find_taken_edge_computed_goto(basic_block bb,tree val)2403 find_taken_edge_computed_goto (basic_block bb, tree val)
2404 {
2405 basic_block dest;
2406 edge e = NULL;
2407
2408 dest = label_to_block (cfun, val);
2409 if (dest)
2410 e = find_edge (bb, dest);
2411
2412 /* It's possible for find_edge to return NULL here on invalid code
2413 that abuses the labels-as-values extension (e.g. code that attempts to
2414 jump *between* functions via stored labels-as-values; PR 84136).
2415 If so, then we simply return that NULL for the edge.
2416 We don't currently have a way of detecting such invalid code, so we
2417 can't assert that it was the case when a NULL edge occurs here. */
2418
2419 return e;
2420 }
2421
2422 /* Given COND_STMT and a constant value VAL for use as the predicate,
2423 determine which of the two edges will be taken out of
2424 the statement's block. Return NULL if either edge may be taken.
2425 If VAL is NULL_TREE, then the current value of COND_STMT's predicate
2426 is used. */
2427
2428 static edge
find_taken_edge_cond_expr(const gcond * cond_stmt,tree val)2429 find_taken_edge_cond_expr (const gcond *cond_stmt, tree val)
2430 {
2431 edge true_edge, false_edge;
2432
2433 if (val == NULL_TREE)
2434 {
2435 /* Use the current value of the predicate. */
2436 if (gimple_cond_true_p (cond_stmt))
2437 val = integer_one_node;
2438 else if (gimple_cond_false_p (cond_stmt))
2439 val = integer_zero_node;
2440 else
2441 return NULL;
2442 }
2443 else if (TREE_CODE (val) != INTEGER_CST)
2444 return NULL;
2445
2446 extract_true_false_edges_from_block (gimple_bb (cond_stmt),
2447 &true_edge, &false_edge);
2448
2449 return (integer_zerop (val) ? false_edge : true_edge);
2450 }
2451
2452 /* Given SWITCH_STMT and an INTEGER_CST VAL for use as the index, determine
2453 which edge will be taken out of the statement's block. Return NULL if any
2454 edge may be taken.
2455 If VAL is NULL_TREE, then the current value of SWITCH_STMT's index
2456 is used. */
2457
2458 edge
find_taken_edge_switch_expr(const gswitch * switch_stmt,tree val)2459 find_taken_edge_switch_expr (const gswitch *switch_stmt, tree val)
2460 {
2461 basic_block dest_bb;
2462 edge e;
2463 tree taken_case;
2464
2465 if (gimple_switch_num_labels (switch_stmt) == 1)
2466 taken_case = gimple_switch_default_label (switch_stmt);
2467 else
2468 {
2469 if (val == NULL_TREE)
2470 val = gimple_switch_index (switch_stmt);
2471 if (TREE_CODE (val) != INTEGER_CST)
2472 return NULL;
2473 else
2474 taken_case = find_case_label_for_value (switch_stmt, val);
2475 }
2476 dest_bb = label_to_block (cfun, CASE_LABEL (taken_case));
2477
2478 e = find_edge (gimple_bb (switch_stmt), dest_bb);
2479 gcc_assert (e);
2480 return e;
2481 }
2482
2483
2484 /* Return the CASE_LABEL_EXPR that SWITCH_STMT will take for VAL.
2485 We can make optimal use here of the fact that the case labels are
2486 sorted: We can do a binary search for a case matching VAL. */
2487
2488 tree
find_case_label_for_value(const gswitch * switch_stmt,tree val)2489 find_case_label_for_value (const gswitch *switch_stmt, tree val)
2490 {
2491 size_t low, high, n = gimple_switch_num_labels (switch_stmt);
2492 tree default_case = gimple_switch_default_label (switch_stmt);
2493
2494 for (low = 0, high = n; high - low > 1; )
2495 {
2496 size_t i = (high + low) / 2;
2497 tree t = gimple_switch_label (switch_stmt, i);
2498 int cmp;
2499
2500 /* Cache the result of comparing CASE_LOW and val. */
2501 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2502
2503 if (cmp > 0)
2504 high = i;
2505 else
2506 low = i;
2507
2508 if (CASE_HIGH (t) == NULL)
2509 {
2510 /* A singe-valued case label. */
2511 if (cmp == 0)
2512 return t;
2513 }
2514 else
2515 {
2516 /* A case range. We can only handle integer ranges. */
2517 if (cmp <= 0 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2518 return t;
2519 }
2520 }
2521
2522 return default_case;
2523 }
2524
2525
2526 /* Dump a basic block on stderr. */
2527
2528 void
gimple_debug_bb(basic_block bb)2529 gimple_debug_bb (basic_block bb)
2530 {
2531 dump_bb (stderr, bb, 0, TDF_VOPS|TDF_MEMSYMS|TDF_BLOCKS);
2532 }
2533
2534
2535 /* Dump basic block with index N on stderr. */
2536
2537 basic_block
gimple_debug_bb_n(int n)2538 gimple_debug_bb_n (int n)
2539 {
2540 gimple_debug_bb (BASIC_BLOCK_FOR_FN (cfun, n));
2541 return BASIC_BLOCK_FOR_FN (cfun, n);
2542 }
2543
2544
2545 /* Dump the CFG on stderr.
2546
2547 FLAGS are the same used by the tree dumping functions
2548 (see TDF_* in dumpfile.h). */
2549
2550 void
gimple_debug_cfg(dump_flags_t flags)2551 gimple_debug_cfg (dump_flags_t flags)
2552 {
2553 gimple_dump_cfg (stderr, flags);
2554 }
2555
2556
2557 /* Dump the program showing basic block boundaries on the given FILE.
2558
2559 FLAGS are the same used by the tree dumping functions (see TDF_* in
2560 tree.h). */
2561
2562 void
gimple_dump_cfg(FILE * file,dump_flags_t flags)2563 gimple_dump_cfg (FILE *file, dump_flags_t flags)
2564 {
2565 if (flags & TDF_DETAILS)
2566 {
2567 dump_function_header (file, current_function_decl, flags);
2568 fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
2569 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
2570 last_basic_block_for_fn (cfun));
2571
2572 brief_dump_cfg (file, flags);
2573 fprintf (file, "\n");
2574 }
2575
2576 if (flags & TDF_STATS)
2577 dump_cfg_stats (file);
2578
2579 dump_function_to_file (current_function_decl, file, flags | TDF_BLOCKS);
2580 }
2581
2582
2583 /* Dump CFG statistics on FILE. */
2584
2585 void
dump_cfg_stats(FILE * file)2586 dump_cfg_stats (FILE *file)
2587 {
2588 static long max_num_merged_labels = 0;
2589 unsigned long size, total = 0;
2590 long num_edges;
2591 basic_block bb;
2592 const char * const fmt_str = "%-30s%-13s%12s\n";
2593 const char * const fmt_str_1 = "%-30s%13d" PRsa (11) "\n";
2594 const char * const fmt_str_2 = "%-30s%13ld" PRsa (11) "\n";
2595 const char * const fmt_str_3 = "%-43s" PRsa (11) "\n";
2596 const char *funcname = current_function_name ();
2597
2598 fprintf (file, "\nCFG Statistics for %s\n\n", funcname);
2599
2600 fprintf (file, "---------------------------------------------------------\n");
2601 fprintf (file, fmt_str, "", " Number of ", "Memory");
2602 fprintf (file, fmt_str, "", " instances ", "used ");
2603 fprintf (file, "---------------------------------------------------------\n");
2604
2605 size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
2606 total += size;
2607 fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
2608 SIZE_AMOUNT (size));
2609
2610 num_edges = 0;
2611 FOR_EACH_BB_FN (bb, cfun)
2612 num_edges += EDGE_COUNT (bb->succs);
2613 size = num_edges * sizeof (class edge_def);
2614 total += size;
2615 fprintf (file, fmt_str_2, "Edges", num_edges, SIZE_AMOUNT (size));
2616
2617 fprintf (file, "---------------------------------------------------------\n");
2618 fprintf (file, fmt_str_3, "Total memory used by CFG data",
2619 SIZE_AMOUNT (total));
2620 fprintf (file, "---------------------------------------------------------\n");
2621 fprintf (file, "\n");
2622
2623 if (cfg_stats.num_merged_labels > max_num_merged_labels)
2624 max_num_merged_labels = cfg_stats.num_merged_labels;
2625
2626 fprintf (file, "Coalesced label blocks: %ld (Max so far: %ld)\n",
2627 cfg_stats.num_merged_labels, max_num_merged_labels);
2628
2629 fprintf (file, "\n");
2630 }
2631
2632
2633 /* Dump CFG statistics on stderr. Keep extern so that it's always
2634 linked in the final executable. */
2635
2636 DEBUG_FUNCTION void
debug_cfg_stats(void)2637 debug_cfg_stats (void)
2638 {
2639 dump_cfg_stats (stderr);
2640 }
2641
2642 /*---------------------------------------------------------------------------
2643 Miscellaneous helpers
2644 ---------------------------------------------------------------------------*/
2645
2646 /* Return true if T, a GIMPLE_CALL, can make an abnormal transfer of control
2647 flow. Transfers of control flow associated with EH are excluded. */
2648
2649 static bool
call_can_make_abnormal_goto(gimple * t)2650 call_can_make_abnormal_goto (gimple *t)
2651 {
2652 /* If the function has no non-local labels, then a call cannot make an
2653 abnormal transfer of control. */
2654 if (!cfun->has_nonlocal_label
2655 && !cfun->calls_setjmp)
2656 return false;
2657
2658 /* Likewise if the call has no side effects. */
2659 if (!gimple_has_side_effects (t))
2660 return false;
2661
2662 /* Likewise if the called function is leaf. */
2663 if (gimple_call_flags (t) & ECF_LEAF)
2664 return false;
2665
2666 return true;
2667 }
2668
2669
2670 /* Return true if T can make an abnormal transfer of control flow.
2671 Transfers of control flow associated with EH are excluded. */
2672
2673 bool
stmt_can_make_abnormal_goto(gimple * t)2674 stmt_can_make_abnormal_goto (gimple *t)
2675 {
2676 if (computed_goto_p (t))
2677 return true;
2678 if (is_gimple_call (t))
2679 return call_can_make_abnormal_goto (t);
2680 return false;
2681 }
2682
2683
2684 /* Return true if T represents a stmt that always transfers control. */
2685
2686 bool
is_ctrl_stmt(gimple * t)2687 is_ctrl_stmt (gimple *t)
2688 {
2689 switch (gimple_code (t))
2690 {
2691 case GIMPLE_COND:
2692 case GIMPLE_SWITCH:
2693 case GIMPLE_GOTO:
2694 case GIMPLE_RETURN:
2695 case GIMPLE_RESX:
2696 return true;
2697 default:
2698 return false;
2699 }
2700 }
2701
2702
2703 /* Return true if T is a statement that may alter the flow of control
2704 (e.g., a call to a non-returning function). */
2705
2706 bool
is_ctrl_altering_stmt(gimple * t)2707 is_ctrl_altering_stmt (gimple *t)
2708 {
2709 gcc_assert (t);
2710
2711 switch (gimple_code (t))
2712 {
2713 case GIMPLE_CALL:
2714 /* Per stmt call flag indicates whether the call could alter
2715 controlflow. */
2716 if (gimple_call_ctrl_altering_p (t))
2717 return true;
2718 break;
2719
2720 case GIMPLE_EH_DISPATCH:
2721 /* EH_DISPATCH branches to the individual catch handlers at
2722 this level of a try or allowed-exceptions region. It can
2723 fallthru to the next statement as well. */
2724 return true;
2725
2726 case GIMPLE_ASM:
2727 if (gimple_asm_nlabels (as_a <gasm *> (t)) > 0)
2728 return true;
2729 break;
2730
2731 CASE_GIMPLE_OMP:
2732 /* OpenMP directives alter control flow. */
2733 return true;
2734
2735 case GIMPLE_TRANSACTION:
2736 /* A transaction start alters control flow. */
2737 return true;
2738
2739 default:
2740 break;
2741 }
2742
2743 /* If a statement can throw, it alters control flow. */
2744 return stmt_can_throw_internal (cfun, t);
2745 }
2746
2747
2748 /* Return true if T is a simple local goto. */
2749
2750 bool
simple_goto_p(gimple * t)2751 simple_goto_p (gimple *t)
2752 {
2753 return (gimple_code (t) == GIMPLE_GOTO
2754 && TREE_CODE (gimple_goto_dest (t)) == LABEL_DECL);
2755 }
2756
2757
2758 /* Return true if STMT should start a new basic block. PREV_STMT is
2759 the statement preceding STMT. It is used when STMT is a label or a
2760 case label. Labels should only start a new basic block if their
2761 previous statement wasn't a label. Otherwise, sequence of labels
2762 would generate unnecessary basic blocks that only contain a single
2763 label. */
2764
2765 static inline bool
stmt_starts_bb_p(gimple * stmt,gimple * prev_stmt)2766 stmt_starts_bb_p (gimple *stmt, gimple *prev_stmt)
2767 {
2768 if (stmt == NULL)
2769 return false;
2770
2771 /* PREV_STMT is only set to a debug stmt if the debug stmt is before
2772 any nondebug stmts in the block. We don't want to start another
2773 block in this case: the debug stmt will already have started the
2774 one STMT would start if we weren't outputting debug stmts. */
2775 if (prev_stmt && is_gimple_debug (prev_stmt))
2776 return false;
2777
2778 /* Labels start a new basic block only if the preceding statement
2779 wasn't a label of the same type. This prevents the creation of
2780 consecutive blocks that have nothing but a single label. */
2781 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2782 {
2783 /* Nonlocal and computed GOTO targets always start a new block. */
2784 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
2785 || FORCED_LABEL (gimple_label_label (label_stmt)))
2786 return true;
2787
2788 if (glabel *plabel = safe_dyn_cast <glabel *> (prev_stmt))
2789 {
2790 if (DECL_NONLOCAL (gimple_label_label (plabel))
2791 || !DECL_ARTIFICIAL (gimple_label_label (plabel)))
2792 return true;
2793
2794 cfg_stats.num_merged_labels++;
2795 return false;
2796 }
2797 else
2798 return true;
2799 }
2800 else if (gimple_code (stmt) == GIMPLE_CALL)
2801 {
2802 if (gimple_call_flags (stmt) & ECF_RETURNS_TWICE)
2803 /* setjmp acts similar to a nonlocal GOTO target and thus should
2804 start a new block. */
2805 return true;
2806 if (gimple_call_internal_p (stmt, IFN_PHI)
2807 && prev_stmt
2808 && gimple_code (prev_stmt) != GIMPLE_LABEL
2809 && (gimple_code (prev_stmt) != GIMPLE_CALL
2810 || ! gimple_call_internal_p (prev_stmt, IFN_PHI)))
2811 /* PHI nodes start a new block unless preceeded by a label
2812 or another PHI. */
2813 return true;
2814 }
2815
2816 return false;
2817 }
2818
2819
2820 /* Return true if T should end a basic block. */
2821
2822 bool
stmt_ends_bb_p(gimple * t)2823 stmt_ends_bb_p (gimple *t)
2824 {
2825 return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t);
2826 }
2827
2828 /* Remove block annotations and other data structures. */
2829
2830 void
delete_tree_cfg_annotations(struct function * fn)2831 delete_tree_cfg_annotations (struct function *fn)
2832 {
2833 vec_free (label_to_block_map_for_fn (fn));
2834 }
2835
2836 /* Return the virtual phi in BB. */
2837
2838 gphi *
get_virtual_phi(basic_block bb)2839 get_virtual_phi (basic_block bb)
2840 {
2841 for (gphi_iterator gsi = gsi_start_phis (bb);
2842 !gsi_end_p (gsi);
2843 gsi_next (&gsi))
2844 {
2845 gphi *phi = gsi.phi ();
2846
2847 if (virtual_operand_p (PHI_RESULT (phi)))
2848 return phi;
2849 }
2850
2851 return NULL;
2852 }
2853
2854 /* Return the first statement in basic block BB. */
2855
2856 gimple *
first_stmt(basic_block bb)2857 first_stmt (basic_block bb)
2858 {
2859 gimple_stmt_iterator i = gsi_start_bb (bb);
2860 gimple *stmt = NULL;
2861
2862 while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i))))
2863 {
2864 gsi_next (&i);
2865 stmt = NULL;
2866 }
2867 return stmt;
2868 }
2869
2870 /* Return the first non-label statement in basic block BB. */
2871
2872 static gimple *
first_non_label_stmt(basic_block bb)2873 first_non_label_stmt (basic_block bb)
2874 {
2875 gimple_stmt_iterator i = gsi_start_bb (bb);
2876 while (!gsi_end_p (i) && gimple_code (gsi_stmt (i)) == GIMPLE_LABEL)
2877 gsi_next (&i);
2878 return !gsi_end_p (i) ? gsi_stmt (i) : NULL;
2879 }
2880
2881 /* Return the last statement in basic block BB. */
2882
2883 gimple *
last_stmt(basic_block bb)2884 last_stmt (basic_block bb)
2885 {
2886 gimple_stmt_iterator i = gsi_last_bb (bb);
2887 gimple *stmt = NULL;
2888
2889 while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i))))
2890 {
2891 gsi_prev (&i);
2892 stmt = NULL;
2893 }
2894 return stmt;
2895 }
2896
2897 /* Return the last statement of an otherwise empty block. Return NULL
2898 if the block is totally empty, or if it contains more than one
2899 statement. */
2900
2901 gimple *
last_and_only_stmt(basic_block bb)2902 last_and_only_stmt (basic_block bb)
2903 {
2904 gimple_stmt_iterator i = gsi_last_nondebug_bb (bb);
2905 gimple *last, *prev;
2906
2907 if (gsi_end_p (i))
2908 return NULL;
2909
2910 last = gsi_stmt (i);
2911 gsi_prev_nondebug (&i);
2912 if (gsi_end_p (i))
2913 return last;
2914
2915 /* Empty statements should no longer appear in the instruction stream.
2916 Everything that might have appeared before should be deleted by
2917 remove_useless_stmts, and the optimizers should just gsi_remove
2918 instead of smashing with build_empty_stmt.
2919
2920 Thus the only thing that should appear here in a block containing
2921 one executable statement is a label. */
2922 prev = gsi_stmt (i);
2923 if (gimple_code (prev) == GIMPLE_LABEL)
2924 return last;
2925 else
2926 return NULL;
2927 }
2928
2929 /* Returns the basic block after which the new basic block created
2930 by splitting edge EDGE_IN should be placed. Tries to keep the new block
2931 near its "logical" location. This is of most help to humans looking
2932 at debugging dumps. */
2933
2934 basic_block
split_edge_bb_loc(edge edge_in)2935 split_edge_bb_loc (edge edge_in)
2936 {
2937 basic_block dest = edge_in->dest;
2938 basic_block dest_prev = dest->prev_bb;
2939
2940 if (dest_prev)
2941 {
2942 edge e = find_edge (dest_prev, dest);
2943 if (e && !(e->flags & EDGE_COMPLEX))
2944 return edge_in->src;
2945 }
2946 return dest_prev;
2947 }
2948
2949 /* Split a (typically critical) edge EDGE_IN. Return the new block.
2950 Abort on abnormal edges. */
2951
2952 static basic_block
gimple_split_edge(edge edge_in)2953 gimple_split_edge (edge edge_in)
2954 {
2955 basic_block new_bb, after_bb, dest;
2956 edge new_edge, e;
2957
2958 /* Abnormal edges cannot be split. */
2959 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
2960
2961 dest = edge_in->dest;
2962
2963 after_bb = split_edge_bb_loc (edge_in);
2964
2965 new_bb = create_empty_bb (after_bb);
2966 new_bb->count = edge_in->count ();
2967
2968 /* We want to avoid re-allocating PHIs when we first
2969 add the fallthru edge from new_bb to dest but we also
2970 want to avoid changing PHI argument order when
2971 first redirecting edge_in away from dest. The former
2972 avoids changing PHI argument order by adding them
2973 last and then the redirection swapping it back into
2974 place by means of unordered remove.
2975 So hack around things by temporarily removing all PHIs
2976 from the destination during the edge redirection and then
2977 making sure the edges stay in order. */
2978 gimple_seq saved_phis = phi_nodes (dest);
2979 unsigned old_dest_idx = edge_in->dest_idx;
2980 set_phi_nodes (dest, NULL);
2981 new_edge = make_single_succ_edge (new_bb, dest, EDGE_FALLTHRU);
2982 e = redirect_edge_and_branch (edge_in, new_bb);
2983 gcc_assert (e == edge_in && new_edge->dest_idx == old_dest_idx);
2984 /* set_phi_nodes sets the BB of the PHI nodes, so do it manually here. */
2985 dest->il.gimple.phi_nodes = saved_phis;
2986
2987 return new_bb;
2988 }
2989
2990
2991 /* Verify properties of the address expression T whose base should be
2992 TREE_ADDRESSABLE if VERIFY_ADDRESSABLE is true. */
2993
2994 static bool
verify_address(tree t,bool verify_addressable)2995 verify_address (tree t, bool verify_addressable)
2996 {
2997 bool old_constant;
2998 bool old_side_effects;
2999 bool new_constant;
3000 bool new_side_effects;
3001
3002 old_constant = TREE_CONSTANT (t);
3003 old_side_effects = TREE_SIDE_EFFECTS (t);
3004
3005 recompute_tree_invariant_for_addr_expr (t);
3006 new_side_effects = TREE_SIDE_EFFECTS (t);
3007 new_constant = TREE_CONSTANT (t);
3008
3009 if (old_constant != new_constant)
3010 {
3011 error ("constant not recomputed when %<ADDR_EXPR%> changed");
3012 return true;
3013 }
3014 if (old_side_effects != new_side_effects)
3015 {
3016 error ("side effects not recomputed when %<ADDR_EXPR%> changed");
3017 return true;
3018 }
3019
3020 tree base = TREE_OPERAND (t, 0);
3021 while (handled_component_p (base))
3022 base = TREE_OPERAND (base, 0);
3023
3024 if (!(VAR_P (base)
3025 || TREE_CODE (base) == PARM_DECL
3026 || TREE_CODE (base) == RESULT_DECL))
3027 return false;
3028
3029 if (verify_addressable && !TREE_ADDRESSABLE (base))
3030 {
3031 error ("address taken but %<TREE_ADDRESSABLE%> bit not set");
3032 return true;
3033 }
3034
3035 return false;
3036 }
3037
3038
3039 /* Verify if EXPR is either a GIMPLE ID or a GIMPLE indirect reference.
3040 Returns true if there is an error, otherwise false. */
3041
3042 static bool
verify_types_in_gimple_min_lval(tree expr)3043 verify_types_in_gimple_min_lval (tree expr)
3044 {
3045 tree op;
3046
3047 if (is_gimple_id (expr))
3048 return false;
3049
3050 if (TREE_CODE (expr) != TARGET_MEM_REF
3051 && TREE_CODE (expr) != MEM_REF)
3052 {
3053 error ("invalid expression for min lvalue");
3054 return true;
3055 }
3056
3057 /* TARGET_MEM_REFs are strange beasts. */
3058 if (TREE_CODE (expr) == TARGET_MEM_REF)
3059 return false;
3060
3061 op = TREE_OPERAND (expr, 0);
3062 if (!is_gimple_val (op))
3063 {
3064 error ("invalid operand in indirect reference");
3065 debug_generic_stmt (op);
3066 return true;
3067 }
3068 /* Memory references now generally can involve a value conversion. */
3069
3070 return false;
3071 }
3072
3073 /* Verify if EXPR is a valid GIMPLE reference expression. If
3074 REQUIRE_LVALUE is true verifies it is an lvalue. Returns true
3075 if there is an error, otherwise false. */
3076
3077 static bool
verify_types_in_gimple_reference(tree expr,bool require_lvalue)3078 verify_types_in_gimple_reference (tree expr, bool require_lvalue)
3079 {
3080 const char *code_name = get_tree_code_name (TREE_CODE (expr));
3081
3082 if (TREE_CODE (expr) == REALPART_EXPR
3083 || TREE_CODE (expr) == IMAGPART_EXPR
3084 || TREE_CODE (expr) == BIT_FIELD_REF)
3085 {
3086 tree op = TREE_OPERAND (expr, 0);
3087 if (!is_gimple_reg_type (TREE_TYPE (expr)))
3088 {
3089 error ("non-scalar %qs", code_name);
3090 return true;
3091 }
3092
3093 if (TREE_CODE (expr) == BIT_FIELD_REF)
3094 {
3095 tree t1 = TREE_OPERAND (expr, 1);
3096 tree t2 = TREE_OPERAND (expr, 2);
3097 poly_uint64 size, bitpos;
3098 if (!poly_int_tree_p (t1, &size)
3099 || !poly_int_tree_p (t2, &bitpos)
3100 || !types_compatible_p (bitsizetype, TREE_TYPE (t1))
3101 || !types_compatible_p (bitsizetype, TREE_TYPE (t2)))
3102 {
3103 error ("invalid position or size operand to %qs", code_name);
3104 return true;
3105 }
3106 if (INTEGRAL_TYPE_P (TREE_TYPE (expr))
3107 && maybe_ne (TYPE_PRECISION (TREE_TYPE (expr)), size))
3108 {
3109 error ("integral result type precision does not match "
3110 "field size of %qs", code_name);
3111 return true;
3112 }
3113 else if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
3114 && TYPE_MODE (TREE_TYPE (expr)) != BLKmode
3115 && maybe_ne (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr))),
3116 size))
3117 {
3118 error ("mode size of non-integral result does not "
3119 "match field size of %qs",
3120 code_name);
3121 return true;
3122 }
3123 if (INTEGRAL_TYPE_P (TREE_TYPE (op))
3124 && !type_has_mode_precision_p (TREE_TYPE (op)))
3125 {
3126 error ("%qs of non-mode-precision operand", code_name);
3127 return true;
3128 }
3129 if (!AGGREGATE_TYPE_P (TREE_TYPE (op))
3130 && maybe_gt (size + bitpos,
3131 tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (op)))))
3132 {
3133 error ("position plus size exceeds size of referenced object in "
3134 "%qs", code_name);
3135 return true;
3136 }
3137 }
3138
3139 if ((TREE_CODE (expr) == REALPART_EXPR
3140 || TREE_CODE (expr) == IMAGPART_EXPR)
3141 && !useless_type_conversion_p (TREE_TYPE (expr),
3142 TREE_TYPE (TREE_TYPE (op))))
3143 {
3144 error ("type mismatch in %qs reference", code_name);
3145 debug_generic_stmt (TREE_TYPE (expr));
3146 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3147 return true;
3148 }
3149 expr = op;
3150 }
3151
3152 while (handled_component_p (expr))
3153 {
3154 code_name = get_tree_code_name (TREE_CODE (expr));
3155
3156 if (TREE_CODE (expr) == REALPART_EXPR
3157 || TREE_CODE (expr) == IMAGPART_EXPR
3158 || TREE_CODE (expr) == BIT_FIELD_REF)
3159 {
3160 error ("non-top-level %qs", code_name);
3161 return true;
3162 }
3163
3164 tree op = TREE_OPERAND (expr, 0);
3165
3166 if (TREE_CODE (expr) == ARRAY_REF
3167 || TREE_CODE (expr) == ARRAY_RANGE_REF)
3168 {
3169 if (!is_gimple_val (TREE_OPERAND (expr, 1))
3170 || (TREE_OPERAND (expr, 2)
3171 && !is_gimple_val (TREE_OPERAND (expr, 2)))
3172 || (TREE_OPERAND (expr, 3)
3173 && !is_gimple_val (TREE_OPERAND (expr, 3))))
3174 {
3175 error ("invalid operands to %qs", code_name);
3176 debug_generic_stmt (expr);
3177 return true;
3178 }
3179 }
3180
3181 /* Verify if the reference array element types are compatible. */
3182 if (TREE_CODE (expr) == ARRAY_REF
3183 && !useless_type_conversion_p (TREE_TYPE (expr),
3184 TREE_TYPE (TREE_TYPE (op))))
3185 {
3186 error ("type mismatch in %qs", code_name);
3187 debug_generic_stmt (TREE_TYPE (expr));
3188 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3189 return true;
3190 }
3191 if (TREE_CODE (expr) == ARRAY_RANGE_REF
3192 && !useless_type_conversion_p (TREE_TYPE (TREE_TYPE (expr)),
3193 TREE_TYPE (TREE_TYPE (op))))
3194 {
3195 error ("type mismatch in %qs", code_name);
3196 debug_generic_stmt (TREE_TYPE (TREE_TYPE (expr)));
3197 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3198 return true;
3199 }
3200
3201 if (TREE_CODE (expr) == COMPONENT_REF)
3202 {
3203 if (TREE_OPERAND (expr, 2)
3204 && !is_gimple_val (TREE_OPERAND (expr, 2)))
3205 {
3206 error ("invalid %qs offset operator", code_name);
3207 return true;
3208 }
3209 if (!useless_type_conversion_p (TREE_TYPE (expr),
3210 TREE_TYPE (TREE_OPERAND (expr, 1))))
3211 {
3212 error ("type mismatch in %qs", code_name);
3213 debug_generic_stmt (TREE_TYPE (expr));
3214 debug_generic_stmt (TREE_TYPE (TREE_OPERAND (expr, 1)));
3215 return true;
3216 }
3217 }
3218
3219 if (TREE_CODE (expr) == VIEW_CONVERT_EXPR)
3220 {
3221 /* For VIEW_CONVERT_EXPRs which are allowed here too, we only check
3222 that their operand is not an SSA name or an invariant when
3223 requiring an lvalue (this usually means there is a SRA or IPA-SRA
3224 bug). Otherwise there is nothing to verify, gross mismatches at
3225 most invoke undefined behavior. */
3226 if (require_lvalue
3227 && (TREE_CODE (op) == SSA_NAME
3228 || is_gimple_min_invariant (op)))
3229 {
3230 error ("conversion of %qs on the left hand side of %qs",
3231 get_tree_code_name (TREE_CODE (op)), code_name);
3232 debug_generic_stmt (expr);
3233 return true;
3234 }
3235 else if (TREE_CODE (op) == SSA_NAME
3236 && TYPE_SIZE (TREE_TYPE (expr)) != TYPE_SIZE (TREE_TYPE (op)))
3237 {
3238 error ("conversion of register to a different size in %qs",
3239 code_name);
3240 debug_generic_stmt (expr);
3241 return true;
3242 }
3243 else if (!handled_component_p (op))
3244 return false;
3245 }
3246
3247 expr = op;
3248 }
3249
3250 code_name = get_tree_code_name (TREE_CODE (expr));
3251
3252 if (TREE_CODE (expr) == MEM_REF)
3253 {
3254 if (!is_gimple_mem_ref_addr (TREE_OPERAND (expr, 0))
3255 || (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
3256 && verify_address (TREE_OPERAND (expr, 0), false)))
3257 {
3258 error ("invalid address operand in %qs", code_name);
3259 debug_generic_stmt (expr);
3260 return true;
3261 }
3262 if (!poly_int_tree_p (TREE_OPERAND (expr, 1))
3263 || !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1))))
3264 {
3265 error ("invalid offset operand in %qs", code_name);
3266 debug_generic_stmt (expr);
3267 return true;
3268 }
3269 if (MR_DEPENDENCE_CLIQUE (expr) != 0
3270 && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique)
3271 {
3272 error ("invalid clique in %qs", code_name);
3273 debug_generic_stmt (expr);
3274 return true;
3275 }
3276 }
3277 else if (TREE_CODE (expr) == TARGET_MEM_REF)
3278 {
3279 if (!TMR_BASE (expr)
3280 || !is_gimple_mem_ref_addr (TMR_BASE (expr))
3281 || (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
3282 && verify_address (TMR_BASE (expr), false)))
3283 {
3284 error ("invalid address operand in %qs", code_name);
3285 return true;
3286 }
3287 if (!TMR_OFFSET (expr)
3288 || !poly_int_tree_p (TMR_OFFSET (expr))
3289 || !POINTER_TYPE_P (TREE_TYPE (TMR_OFFSET (expr))))
3290 {
3291 error ("invalid offset operand in %qs", code_name);
3292 debug_generic_stmt (expr);
3293 return true;
3294 }
3295 if (MR_DEPENDENCE_CLIQUE (expr) != 0
3296 && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique)
3297 {
3298 error ("invalid clique in %qs", code_name);
3299 debug_generic_stmt (expr);
3300 return true;
3301 }
3302 }
3303 else if (TREE_CODE (expr) == INDIRECT_REF)
3304 {
3305 error ("%qs in gimple IL", code_name);
3306 debug_generic_stmt (expr);
3307 return true;
3308 }
3309
3310 return ((require_lvalue || !is_gimple_min_invariant (expr))
3311 && verify_types_in_gimple_min_lval (expr));
3312 }
3313
3314 /* Returns true if there is one pointer type in TYPE_POINTER_TO (SRC_OBJ)
3315 list of pointer-to types that is trivially convertible to DEST. */
3316
3317 static bool
one_pointer_to_useless_type_conversion_p(tree dest,tree src_obj)3318 one_pointer_to_useless_type_conversion_p (tree dest, tree src_obj)
3319 {
3320 tree src;
3321
3322 if (!TYPE_POINTER_TO (src_obj))
3323 return true;
3324
3325 for (src = TYPE_POINTER_TO (src_obj); src; src = TYPE_NEXT_PTR_TO (src))
3326 if (useless_type_conversion_p (dest, src))
3327 return true;
3328
3329 return false;
3330 }
3331
3332 /* Return true if TYPE1 is a fixed-point type and if conversions to and
3333 from TYPE2 can be handled by FIXED_CONVERT_EXPR. */
3334
3335 static bool
valid_fixed_convert_types_p(tree type1,tree type2)3336 valid_fixed_convert_types_p (tree type1, tree type2)
3337 {
3338 return (FIXED_POINT_TYPE_P (type1)
3339 && (INTEGRAL_TYPE_P (type2)
3340 || SCALAR_FLOAT_TYPE_P (type2)
3341 || FIXED_POINT_TYPE_P (type2)));
3342 }
3343
3344 /* Verify the contents of a GIMPLE_CALL STMT. Returns true when there
3345 is a problem, otherwise false. */
3346
3347 static bool
verify_gimple_call(gcall * stmt)3348 verify_gimple_call (gcall *stmt)
3349 {
3350 tree fn = gimple_call_fn (stmt);
3351 tree fntype, fndecl;
3352 unsigned i;
3353
3354 if (gimple_call_internal_p (stmt))
3355 {
3356 if (fn)
3357 {
3358 error ("gimple call has two targets");
3359 debug_generic_stmt (fn);
3360 return true;
3361 }
3362 }
3363 else
3364 {
3365 if (!fn)
3366 {
3367 error ("gimple call has no target");
3368 return true;
3369 }
3370 }
3371
3372 if (fn && !is_gimple_call_addr (fn))
3373 {
3374 error ("invalid function in gimple call");
3375 debug_generic_stmt (fn);
3376 return true;
3377 }
3378
3379 if (fn
3380 && (!POINTER_TYPE_P (TREE_TYPE (fn))
3381 || (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
3382 && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != METHOD_TYPE)))
3383 {
3384 error ("non-function in gimple call");
3385 return true;
3386 }
3387
3388 fndecl = gimple_call_fndecl (stmt);
3389 if (fndecl
3390 && TREE_CODE (fndecl) == FUNCTION_DECL
3391 && DECL_LOOPING_CONST_OR_PURE_P (fndecl)
3392 && !DECL_PURE_P (fndecl)
3393 && !TREE_READONLY (fndecl))
3394 {
3395 error ("invalid pure const state for function");
3396 return true;
3397 }
3398
3399 tree lhs = gimple_call_lhs (stmt);
3400 if (lhs
3401 && (!is_gimple_lvalue (lhs)
3402 || verify_types_in_gimple_reference (lhs, true)))
3403 {
3404 error ("invalid LHS in gimple call");
3405 return true;
3406 }
3407
3408 if (gimple_call_ctrl_altering_p (stmt)
3409 && gimple_call_noreturn_p (stmt)
3410 && should_remove_lhs_p (lhs))
3411 {
3412 error ("LHS in %<noreturn%> call");
3413 return true;
3414 }
3415
3416 fntype = gimple_call_fntype (stmt);
3417 if (fntype
3418 && lhs
3419 && !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (fntype))
3420 /* ??? At least C++ misses conversions at assignments from
3421 void * call results.
3422 For now simply allow arbitrary pointer type conversions. */
3423 && !(POINTER_TYPE_P (TREE_TYPE (lhs))
3424 && POINTER_TYPE_P (TREE_TYPE (fntype))))
3425 {
3426 error ("invalid conversion in gimple call");
3427 debug_generic_stmt (TREE_TYPE (lhs));
3428 debug_generic_stmt (TREE_TYPE (fntype));
3429 return true;
3430 }
3431
3432 if (gimple_call_chain (stmt)
3433 && !is_gimple_val (gimple_call_chain (stmt)))
3434 {
3435 error ("invalid static chain in gimple call");
3436 debug_generic_stmt (gimple_call_chain (stmt));
3437 return true;
3438 }
3439
3440 /* If there is a static chain argument, the call should either be
3441 indirect, or the decl should have DECL_STATIC_CHAIN set. */
3442 if (gimple_call_chain (stmt)
3443 && fndecl
3444 && !DECL_STATIC_CHAIN (fndecl))
3445 {
3446 error ("static chain with function that doesn%'t use one");
3447 return true;
3448 }
3449
3450 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
3451 {
3452 switch (DECL_FUNCTION_CODE (fndecl))
3453 {
3454 case BUILT_IN_UNREACHABLE:
3455 case BUILT_IN_TRAP:
3456 if (gimple_call_num_args (stmt) > 0)
3457 {
3458 /* Built-in unreachable with parameters might not be caught by
3459 undefined behavior sanitizer. Front-ends do check users do not
3460 call them that way but we also produce calls to
3461 __builtin_unreachable internally, for example when IPA figures
3462 out a call cannot happen in a legal program. In such cases,
3463 we must make sure arguments are stripped off. */
3464 error ("%<__builtin_unreachable%> or %<__builtin_trap%> call "
3465 "with arguments");
3466 return true;
3467 }
3468 break;
3469 default:
3470 break;
3471 }
3472 }
3473
3474 /* ??? The C frontend passes unpromoted arguments in case it
3475 didn't see a function declaration before the call. So for now
3476 leave the call arguments mostly unverified. Once we gimplify
3477 unit-at-a-time we have a chance to fix this. */
3478
3479 for (i = 0; i < gimple_call_num_args (stmt); ++i)
3480 {
3481 tree arg = gimple_call_arg (stmt, i);
3482 if ((is_gimple_reg_type (TREE_TYPE (arg))
3483 && !is_gimple_val (arg))
3484 || (!is_gimple_reg_type (TREE_TYPE (arg))
3485 && !is_gimple_lvalue (arg)))
3486 {
3487 error ("invalid argument to gimple call");
3488 debug_generic_expr (arg);
3489 return true;
3490 }
3491 }
3492
3493 return false;
3494 }
3495
3496 /* Verifies the gimple comparison with the result type TYPE and
3497 the operands OP0 and OP1, comparison code is CODE. */
3498
3499 static bool
verify_gimple_comparison(tree type,tree op0,tree op1,enum tree_code code)3500 verify_gimple_comparison (tree type, tree op0, tree op1, enum tree_code code)
3501 {
3502 tree op0_type = TREE_TYPE (op0);
3503 tree op1_type = TREE_TYPE (op1);
3504
3505 if (!is_gimple_val (op0) || !is_gimple_val (op1))
3506 {
3507 error ("invalid operands in gimple comparison");
3508 return true;
3509 }
3510
3511 /* For comparisons we do not have the operations type as the
3512 effective type the comparison is carried out in. Instead
3513 we require that either the first operand is trivially
3514 convertible into the second, or the other way around. */
3515 if (!useless_type_conversion_p (op0_type, op1_type)
3516 && !useless_type_conversion_p (op1_type, op0_type))
3517 {
3518 error ("mismatching comparison operand types");
3519 debug_generic_expr (op0_type);
3520 debug_generic_expr (op1_type);
3521 return true;
3522 }
3523
3524 /* The resulting type of a comparison may be an effective boolean type. */
3525 if (INTEGRAL_TYPE_P (type)
3526 && (TREE_CODE (type) == BOOLEAN_TYPE
3527 || TYPE_PRECISION (type) == 1))
3528 {
3529 if ((TREE_CODE (op0_type) == VECTOR_TYPE
3530 || TREE_CODE (op1_type) == VECTOR_TYPE)
3531 && code != EQ_EXPR && code != NE_EXPR
3532 && !VECTOR_BOOLEAN_TYPE_P (op0_type)
3533 && !VECTOR_INTEGER_TYPE_P (op0_type))
3534 {
3535 error ("unsupported operation or type for vector comparison"
3536 " returning a boolean");
3537 debug_generic_expr (op0_type);
3538 debug_generic_expr (op1_type);
3539 return true;
3540 }
3541 }
3542 /* Or a boolean vector type with the same element count
3543 as the comparison operand types. */
3544 else if (TREE_CODE (type) == VECTOR_TYPE
3545 && TREE_CODE (TREE_TYPE (type)) == BOOLEAN_TYPE)
3546 {
3547 if (TREE_CODE (op0_type) != VECTOR_TYPE
3548 || TREE_CODE (op1_type) != VECTOR_TYPE)
3549 {
3550 error ("non-vector operands in vector comparison");
3551 debug_generic_expr (op0_type);
3552 debug_generic_expr (op1_type);
3553 return true;
3554 }
3555
3556 if (maybe_ne (TYPE_VECTOR_SUBPARTS (type),
3557 TYPE_VECTOR_SUBPARTS (op0_type)))
3558 {
3559 error ("invalid vector comparison resulting type");
3560 debug_generic_expr (type);
3561 return true;
3562 }
3563 }
3564 else
3565 {
3566 error ("bogus comparison result type");
3567 debug_generic_expr (type);
3568 return true;
3569 }
3570
3571 return false;
3572 }
3573
3574 /* Verify a gimple assignment statement STMT with an unary rhs.
3575 Returns true if anything is wrong. */
3576
3577 static bool
verify_gimple_assign_unary(gassign * stmt)3578 verify_gimple_assign_unary (gassign *stmt)
3579 {
3580 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
3581 tree lhs = gimple_assign_lhs (stmt);
3582 tree lhs_type = TREE_TYPE (lhs);
3583 tree rhs1 = gimple_assign_rhs1 (stmt);
3584 tree rhs1_type = TREE_TYPE (rhs1);
3585
3586 if (!is_gimple_reg (lhs))
3587 {
3588 error ("non-register as LHS of unary operation");
3589 return true;
3590 }
3591
3592 if (!is_gimple_val (rhs1))
3593 {
3594 error ("invalid operand in unary operation");
3595 return true;
3596 }
3597
3598 const char* const code_name = get_tree_code_name (rhs_code);
3599
3600 /* First handle conversions. */
3601 switch (rhs_code)
3602 {
3603 CASE_CONVERT:
3604 {
3605 /* Allow conversions between vectors with the same number of elements,
3606 provided that the conversion is OK for the element types too. */
3607 if (VECTOR_TYPE_P (lhs_type)
3608 && VECTOR_TYPE_P (rhs1_type)
3609 && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
3610 TYPE_VECTOR_SUBPARTS (rhs1_type)))
3611 {
3612 lhs_type = TREE_TYPE (lhs_type);
3613 rhs1_type = TREE_TYPE (rhs1_type);
3614 }
3615 else if (VECTOR_TYPE_P (lhs_type) || VECTOR_TYPE_P (rhs1_type))
3616 {
3617 error ("invalid vector types in nop conversion");
3618 debug_generic_expr (lhs_type);
3619 debug_generic_expr (rhs1_type);
3620 return true;
3621 }
3622
3623 /* Allow conversions from pointer type to integral type only if
3624 there is no sign or zero extension involved.
3625 For targets were the precision of ptrofftype doesn't match that
3626 of pointers we allow conversions to types where
3627 POINTERS_EXTEND_UNSIGNED specifies how that works. */
3628 if ((POINTER_TYPE_P (lhs_type)
3629 && INTEGRAL_TYPE_P (rhs1_type))
3630 || (POINTER_TYPE_P (rhs1_type)
3631 && INTEGRAL_TYPE_P (lhs_type)
3632 && (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION (lhs_type)
3633 #if defined(POINTERS_EXTEND_UNSIGNED)
3634 || (TYPE_MODE (rhs1_type) == ptr_mode
3635 && (TYPE_PRECISION (lhs_type)
3636 == BITS_PER_WORD /* word_mode */
3637 || (TYPE_PRECISION (lhs_type)
3638 == GET_MODE_PRECISION (Pmode))))
3639 #endif
3640 )))
3641 return false;
3642
3643 /* Allow conversion from integral to offset type and vice versa. */
3644 if ((TREE_CODE (lhs_type) == OFFSET_TYPE
3645 && INTEGRAL_TYPE_P (rhs1_type))
3646 || (INTEGRAL_TYPE_P (lhs_type)
3647 && TREE_CODE (rhs1_type) == OFFSET_TYPE))
3648 return false;
3649
3650 /* Otherwise assert we are converting between types of the
3651 same kind. */
3652 if (INTEGRAL_TYPE_P (lhs_type) != INTEGRAL_TYPE_P (rhs1_type))
3653 {
3654 error ("invalid types in nop conversion");
3655 debug_generic_expr (lhs_type);
3656 debug_generic_expr (rhs1_type);
3657 return true;
3658 }
3659
3660 return false;
3661 }
3662
3663 case ADDR_SPACE_CONVERT_EXPR:
3664 {
3665 if (!POINTER_TYPE_P (rhs1_type) || !POINTER_TYPE_P (lhs_type)
3666 || (TYPE_ADDR_SPACE (TREE_TYPE (rhs1_type))
3667 == TYPE_ADDR_SPACE (TREE_TYPE (lhs_type))))
3668 {
3669 error ("invalid types in address space conversion");
3670 debug_generic_expr (lhs_type);
3671 debug_generic_expr (rhs1_type);
3672 return true;
3673 }
3674
3675 return false;
3676 }
3677
3678 case FIXED_CONVERT_EXPR:
3679 {
3680 if (!valid_fixed_convert_types_p (lhs_type, rhs1_type)
3681 && !valid_fixed_convert_types_p (rhs1_type, lhs_type))
3682 {
3683 error ("invalid types in fixed-point conversion");
3684 debug_generic_expr (lhs_type);
3685 debug_generic_expr (rhs1_type);
3686 return true;
3687 }
3688
3689 return false;
3690 }
3691
3692 case FLOAT_EXPR:
3693 {
3694 if ((!INTEGRAL_TYPE_P (rhs1_type) || !SCALAR_FLOAT_TYPE_P (lhs_type))
3695 && (!VECTOR_INTEGER_TYPE_P (rhs1_type)
3696 || !VECTOR_FLOAT_TYPE_P (lhs_type)))
3697 {
3698 error ("invalid types in conversion to floating-point");
3699 debug_generic_expr (lhs_type);
3700 debug_generic_expr (rhs1_type);
3701 return true;
3702 }
3703
3704 return false;
3705 }
3706
3707 case FIX_TRUNC_EXPR:
3708 {
3709 if ((!INTEGRAL_TYPE_P (lhs_type) || !SCALAR_FLOAT_TYPE_P (rhs1_type))
3710 && (!VECTOR_INTEGER_TYPE_P (lhs_type)
3711 || !VECTOR_FLOAT_TYPE_P (rhs1_type)))
3712 {
3713 error ("invalid types in conversion to integer");
3714 debug_generic_expr (lhs_type);
3715 debug_generic_expr (rhs1_type);
3716 return true;
3717 }
3718
3719 return false;
3720 }
3721
3722 case VEC_UNPACK_HI_EXPR:
3723 case VEC_UNPACK_LO_EXPR:
3724 case VEC_UNPACK_FLOAT_HI_EXPR:
3725 case VEC_UNPACK_FLOAT_LO_EXPR:
3726 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
3727 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
3728 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3729 || TREE_CODE (lhs_type) != VECTOR_TYPE
3730 || (!INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3731 && !SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type)))
3732 || (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3733 && !SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type)))
3734 || ((rhs_code == VEC_UNPACK_HI_EXPR
3735 || rhs_code == VEC_UNPACK_LO_EXPR)
3736 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3737 != INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
3738 || ((rhs_code == VEC_UNPACK_FLOAT_HI_EXPR
3739 || rhs_code == VEC_UNPACK_FLOAT_LO_EXPR)
3740 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3741 || SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))))
3742 || ((rhs_code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
3743 || rhs_code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
3744 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3745 || SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type))))
3746 || (maybe_ne (GET_MODE_SIZE (element_mode (lhs_type)),
3747 2 * GET_MODE_SIZE (element_mode (rhs1_type)))
3748 && (!VECTOR_BOOLEAN_TYPE_P (lhs_type)
3749 || !VECTOR_BOOLEAN_TYPE_P (rhs1_type)))
3750 || maybe_ne (2 * TYPE_VECTOR_SUBPARTS (lhs_type),
3751 TYPE_VECTOR_SUBPARTS (rhs1_type)))
3752 {
3753 error ("type mismatch in %qs expression", code_name);
3754 debug_generic_expr (lhs_type);
3755 debug_generic_expr (rhs1_type);
3756 return true;
3757 }
3758
3759 return false;
3760
3761 case NEGATE_EXPR:
3762 case ABS_EXPR:
3763 case BIT_NOT_EXPR:
3764 case PAREN_EXPR:
3765 case CONJ_EXPR:
3766 break;
3767
3768 case ABSU_EXPR:
3769 if (!ANY_INTEGRAL_TYPE_P (lhs_type)
3770 || !TYPE_UNSIGNED (lhs_type)
3771 || !ANY_INTEGRAL_TYPE_P (rhs1_type)
3772 || TYPE_UNSIGNED (rhs1_type)
3773 || element_precision (lhs_type) != element_precision (rhs1_type))
3774 {
3775 error ("invalid types for %qs", code_name);
3776 debug_generic_expr (lhs_type);
3777 debug_generic_expr (rhs1_type);
3778 return true;
3779 }
3780 return false;
3781
3782 case VEC_DUPLICATE_EXPR:
3783 if (TREE_CODE (lhs_type) != VECTOR_TYPE
3784 || !useless_type_conversion_p (TREE_TYPE (lhs_type), rhs1_type))
3785 {
3786 error ("%qs should be from a scalar to a like vector", code_name);
3787 debug_generic_expr (lhs_type);
3788 debug_generic_expr (rhs1_type);
3789 return true;
3790 }
3791 return false;
3792
3793 default:
3794 gcc_unreachable ();
3795 }
3796
3797 /* For the remaining codes assert there is no conversion involved. */
3798 if (!useless_type_conversion_p (lhs_type, rhs1_type))
3799 {
3800 error ("non-trivial conversion in unary operation");
3801 debug_generic_expr (lhs_type);
3802 debug_generic_expr (rhs1_type);
3803 return true;
3804 }
3805
3806 return false;
3807 }
3808
3809 /* Verify a gimple assignment statement STMT with a binary rhs.
3810 Returns true if anything is wrong. */
3811
3812 static bool
verify_gimple_assign_binary(gassign * stmt)3813 verify_gimple_assign_binary (gassign *stmt)
3814 {
3815 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
3816 tree lhs = gimple_assign_lhs (stmt);
3817 tree lhs_type = TREE_TYPE (lhs);
3818 tree rhs1 = gimple_assign_rhs1 (stmt);
3819 tree rhs1_type = TREE_TYPE (rhs1);
3820 tree rhs2 = gimple_assign_rhs2 (stmt);
3821 tree rhs2_type = TREE_TYPE (rhs2);
3822
3823 if (!is_gimple_reg (lhs))
3824 {
3825 error ("non-register as LHS of binary operation");
3826 return true;
3827 }
3828
3829 if (!is_gimple_val (rhs1)
3830 || !is_gimple_val (rhs2))
3831 {
3832 error ("invalid operands in binary operation");
3833 return true;
3834 }
3835
3836 const char* const code_name = get_tree_code_name (rhs_code);
3837
3838 /* First handle operations that involve different types. */
3839 switch (rhs_code)
3840 {
3841 case COMPLEX_EXPR:
3842 {
3843 if (TREE_CODE (lhs_type) != COMPLEX_TYPE
3844 || !(INTEGRAL_TYPE_P (rhs1_type)
3845 || SCALAR_FLOAT_TYPE_P (rhs1_type))
3846 || !(INTEGRAL_TYPE_P (rhs2_type)
3847 || SCALAR_FLOAT_TYPE_P (rhs2_type)))
3848 {
3849 error ("type mismatch in %qs", code_name);
3850 debug_generic_expr (lhs_type);
3851 debug_generic_expr (rhs1_type);
3852 debug_generic_expr (rhs2_type);
3853 return true;
3854 }
3855
3856 return false;
3857 }
3858
3859 case LSHIFT_EXPR:
3860 case RSHIFT_EXPR:
3861 case LROTATE_EXPR:
3862 case RROTATE_EXPR:
3863 {
3864 /* Shifts and rotates are ok on integral types, fixed point
3865 types and integer vector types. */
3866 if ((!INTEGRAL_TYPE_P (rhs1_type)
3867 && !FIXED_POINT_TYPE_P (rhs1_type)
3868 && !(TREE_CODE (rhs1_type) == VECTOR_TYPE
3869 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
3870 || (!INTEGRAL_TYPE_P (rhs2_type)
3871 /* Vector shifts of vectors are also ok. */
3872 && !(TREE_CODE (rhs1_type) == VECTOR_TYPE
3873 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3874 && TREE_CODE (rhs2_type) == VECTOR_TYPE
3875 && INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
3876 || !useless_type_conversion_p (lhs_type, rhs1_type))
3877 {
3878 error ("type mismatch in %qs", code_name);
3879 debug_generic_expr (lhs_type);
3880 debug_generic_expr (rhs1_type);
3881 debug_generic_expr (rhs2_type);
3882 return true;
3883 }
3884
3885 return false;
3886 }
3887
3888 case WIDEN_LSHIFT_EXPR:
3889 {
3890 if (!INTEGRAL_TYPE_P (lhs_type)
3891 || !INTEGRAL_TYPE_P (rhs1_type)
3892 || TREE_CODE (rhs2) != INTEGER_CST
3893 || (2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)))
3894 {
3895 error ("type mismatch in %qs", code_name);
3896 debug_generic_expr (lhs_type);
3897 debug_generic_expr (rhs1_type);
3898 debug_generic_expr (rhs2_type);
3899 return true;
3900 }
3901
3902 return false;
3903 }
3904
3905 case VEC_WIDEN_LSHIFT_HI_EXPR:
3906 case VEC_WIDEN_LSHIFT_LO_EXPR:
3907 {
3908 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3909 || TREE_CODE (lhs_type) != VECTOR_TYPE
3910 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3911 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3912 || TREE_CODE (rhs2) != INTEGER_CST
3913 || (2 * TYPE_PRECISION (TREE_TYPE (rhs1_type))
3914 > TYPE_PRECISION (TREE_TYPE (lhs_type))))
3915 {
3916 error ("type mismatch in %qs", code_name);
3917 debug_generic_expr (lhs_type);
3918 debug_generic_expr (rhs1_type);
3919 debug_generic_expr (rhs2_type);
3920 return true;
3921 }
3922
3923 return false;
3924 }
3925
3926 case WIDEN_PLUS_EXPR:
3927 case WIDEN_MINUS_EXPR:
3928 case PLUS_EXPR:
3929 case MINUS_EXPR:
3930 {
3931 tree lhs_etype = lhs_type;
3932 tree rhs1_etype = rhs1_type;
3933 tree rhs2_etype = rhs2_type;
3934 if (TREE_CODE (lhs_type) == VECTOR_TYPE)
3935 {
3936 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3937 || TREE_CODE (rhs2_type) != VECTOR_TYPE)
3938 {
3939 error ("invalid non-vector operands to %qs", code_name);
3940 return true;
3941 }
3942 lhs_etype = TREE_TYPE (lhs_type);
3943 rhs1_etype = TREE_TYPE (rhs1_type);
3944 rhs2_etype = TREE_TYPE (rhs2_type);
3945 }
3946 if (POINTER_TYPE_P (lhs_etype)
3947 || POINTER_TYPE_P (rhs1_etype)
3948 || POINTER_TYPE_P (rhs2_etype))
3949 {
3950 error ("invalid (pointer) operands %qs", code_name);
3951 return true;
3952 }
3953
3954 /* Continue with generic binary expression handling. */
3955 break;
3956 }
3957
3958 case POINTER_PLUS_EXPR:
3959 {
3960 if (!POINTER_TYPE_P (rhs1_type)
3961 || !useless_type_conversion_p (lhs_type, rhs1_type)
3962 || !ptrofftype_p (rhs2_type))
3963 {
3964 error ("type mismatch in %qs", code_name);
3965 debug_generic_stmt (lhs_type);
3966 debug_generic_stmt (rhs1_type);
3967 debug_generic_stmt (rhs2_type);
3968 return true;
3969 }
3970
3971 return false;
3972 }
3973
3974 case POINTER_DIFF_EXPR:
3975 {
3976 if (!POINTER_TYPE_P (rhs1_type)
3977 || !POINTER_TYPE_P (rhs2_type)
3978 /* Because we special-case pointers to void we allow difference
3979 of arbitrary pointers with the same mode. */
3980 || TYPE_MODE (rhs1_type) != TYPE_MODE (rhs2_type)
3981 || !INTEGRAL_TYPE_P (lhs_type)
3982 || TYPE_UNSIGNED (lhs_type)
3983 || TYPE_PRECISION (lhs_type) != TYPE_PRECISION (rhs1_type))
3984 {
3985 error ("type mismatch in %qs", code_name);
3986 debug_generic_stmt (lhs_type);
3987 debug_generic_stmt (rhs1_type);
3988 debug_generic_stmt (rhs2_type);
3989 return true;
3990 }
3991
3992 return false;
3993 }
3994
3995 case TRUTH_ANDIF_EXPR:
3996 case TRUTH_ORIF_EXPR:
3997 case TRUTH_AND_EXPR:
3998 case TRUTH_OR_EXPR:
3999 case TRUTH_XOR_EXPR:
4000
4001 gcc_unreachable ();
4002
4003 case LT_EXPR:
4004 case LE_EXPR:
4005 case GT_EXPR:
4006 case GE_EXPR:
4007 case EQ_EXPR:
4008 case NE_EXPR:
4009 case UNORDERED_EXPR:
4010 case ORDERED_EXPR:
4011 case UNLT_EXPR:
4012 case UNLE_EXPR:
4013 case UNGT_EXPR:
4014 case UNGE_EXPR:
4015 case UNEQ_EXPR:
4016 case LTGT_EXPR:
4017 /* Comparisons are also binary, but the result type is not
4018 connected to the operand types. */
4019 return verify_gimple_comparison (lhs_type, rhs1, rhs2, rhs_code);
4020
4021 case WIDEN_MULT_EXPR:
4022 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
4023 return true;
4024 return ((2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type))
4025 || (TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type)));
4026
4027 case WIDEN_SUM_EXPR:
4028 {
4029 if (((TREE_CODE (rhs1_type) != VECTOR_TYPE
4030 || TREE_CODE (lhs_type) != VECTOR_TYPE)
4031 && ((!INTEGRAL_TYPE_P (rhs1_type)
4032 && !SCALAR_FLOAT_TYPE_P (rhs1_type))
4033 || (!INTEGRAL_TYPE_P (lhs_type)
4034 && !SCALAR_FLOAT_TYPE_P (lhs_type))))
4035 || !useless_type_conversion_p (lhs_type, rhs2_type)
4036 || maybe_lt (GET_MODE_SIZE (element_mode (rhs2_type)),
4037 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
4038 {
4039 error ("type mismatch in %qs", code_name);
4040 debug_generic_expr (lhs_type);
4041 debug_generic_expr (rhs1_type);
4042 debug_generic_expr (rhs2_type);
4043 return true;
4044 }
4045 return false;
4046 }
4047
4048 case VEC_WIDEN_MINUS_HI_EXPR:
4049 case VEC_WIDEN_MINUS_LO_EXPR:
4050 case VEC_WIDEN_PLUS_HI_EXPR:
4051 case VEC_WIDEN_PLUS_LO_EXPR:
4052 case VEC_WIDEN_MULT_HI_EXPR:
4053 case VEC_WIDEN_MULT_LO_EXPR:
4054 case VEC_WIDEN_MULT_EVEN_EXPR:
4055 case VEC_WIDEN_MULT_ODD_EXPR:
4056 {
4057 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4058 || TREE_CODE (lhs_type) != VECTOR_TYPE
4059 || !types_compatible_p (rhs1_type, rhs2_type)
4060 || maybe_ne (GET_MODE_SIZE (element_mode (lhs_type)),
4061 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
4062 {
4063 error ("type mismatch in %qs", code_name);
4064 debug_generic_expr (lhs_type);
4065 debug_generic_expr (rhs1_type);
4066 debug_generic_expr (rhs2_type);
4067 return true;
4068 }
4069 return false;
4070 }
4071
4072 case VEC_PACK_TRUNC_EXPR:
4073 /* ??? We currently use VEC_PACK_TRUNC_EXPR to simply concat
4074 vector boolean types. */
4075 if (VECTOR_BOOLEAN_TYPE_P (lhs_type)
4076 && VECTOR_BOOLEAN_TYPE_P (rhs1_type)
4077 && types_compatible_p (rhs1_type, rhs2_type)
4078 && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
4079 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
4080 return false;
4081
4082 /* Fallthru. */
4083 case VEC_PACK_SAT_EXPR:
4084 case VEC_PACK_FIX_TRUNC_EXPR:
4085 {
4086 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4087 || TREE_CODE (lhs_type) != VECTOR_TYPE
4088 || !((rhs_code == VEC_PACK_FIX_TRUNC_EXPR
4089 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))
4090 && INTEGRAL_TYPE_P (TREE_TYPE (lhs_type)))
4091 || (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4092 == INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))))
4093 || !types_compatible_p (rhs1_type, rhs2_type)
4094 || maybe_ne (GET_MODE_SIZE (element_mode (rhs1_type)),
4095 2 * GET_MODE_SIZE (element_mode (lhs_type)))
4096 || maybe_ne (2 * TYPE_VECTOR_SUBPARTS (rhs1_type),
4097 TYPE_VECTOR_SUBPARTS (lhs_type)))
4098 {
4099 error ("type mismatch in %qs", code_name);
4100 debug_generic_expr (lhs_type);
4101 debug_generic_expr (rhs1_type);
4102 debug_generic_expr (rhs2_type);
4103 return true;
4104 }
4105
4106 return false;
4107 }
4108
4109 case VEC_PACK_FLOAT_EXPR:
4110 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4111 || TREE_CODE (lhs_type) != VECTOR_TYPE
4112 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4113 || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type))
4114 || !types_compatible_p (rhs1_type, rhs2_type)
4115 || maybe_ne (GET_MODE_SIZE (element_mode (rhs1_type)),
4116 2 * GET_MODE_SIZE (element_mode (lhs_type)))
4117 || maybe_ne (2 * TYPE_VECTOR_SUBPARTS (rhs1_type),
4118 TYPE_VECTOR_SUBPARTS (lhs_type)))
4119 {
4120 error ("type mismatch in %qs", code_name);
4121 debug_generic_expr (lhs_type);
4122 debug_generic_expr (rhs1_type);
4123 debug_generic_expr (rhs2_type);
4124 return true;
4125 }
4126
4127 return false;
4128
4129 case MULT_EXPR:
4130 case MULT_HIGHPART_EXPR:
4131 case TRUNC_DIV_EXPR:
4132 case CEIL_DIV_EXPR:
4133 case FLOOR_DIV_EXPR:
4134 case ROUND_DIV_EXPR:
4135 case TRUNC_MOD_EXPR:
4136 case CEIL_MOD_EXPR:
4137 case FLOOR_MOD_EXPR:
4138 case ROUND_MOD_EXPR:
4139 case RDIV_EXPR:
4140 case EXACT_DIV_EXPR:
4141 case MIN_EXPR:
4142 case MAX_EXPR:
4143 case BIT_IOR_EXPR:
4144 case BIT_XOR_EXPR:
4145 case BIT_AND_EXPR:
4146 /* Continue with generic binary expression handling. */
4147 break;
4148
4149 case VEC_SERIES_EXPR:
4150 if (!useless_type_conversion_p (rhs1_type, rhs2_type))
4151 {
4152 error ("type mismatch in %qs", code_name);
4153 debug_generic_expr (rhs1_type);
4154 debug_generic_expr (rhs2_type);
4155 return true;
4156 }
4157 if (TREE_CODE (lhs_type) != VECTOR_TYPE
4158 || !useless_type_conversion_p (TREE_TYPE (lhs_type), rhs1_type))
4159 {
4160 error ("vector type expected in %qs", code_name);
4161 debug_generic_expr (lhs_type);
4162 return true;
4163 }
4164 return false;
4165
4166 default:
4167 gcc_unreachable ();
4168 }
4169
4170 if (!useless_type_conversion_p (lhs_type, rhs1_type)
4171 || !useless_type_conversion_p (lhs_type, rhs2_type))
4172 {
4173 error ("type mismatch in binary expression");
4174 debug_generic_stmt (lhs_type);
4175 debug_generic_stmt (rhs1_type);
4176 debug_generic_stmt (rhs2_type);
4177 return true;
4178 }
4179
4180 return false;
4181 }
4182
4183 /* Verify a gimple assignment statement STMT with a ternary rhs.
4184 Returns true if anything is wrong. */
4185
4186 static bool
verify_gimple_assign_ternary(gassign * stmt)4187 verify_gimple_assign_ternary (gassign *stmt)
4188 {
4189 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
4190 tree lhs = gimple_assign_lhs (stmt);
4191 tree lhs_type = TREE_TYPE (lhs);
4192 tree rhs1 = gimple_assign_rhs1 (stmt);
4193 tree rhs1_type = TREE_TYPE (rhs1);
4194 tree rhs2 = gimple_assign_rhs2 (stmt);
4195 tree rhs2_type = TREE_TYPE (rhs2);
4196 tree rhs3 = gimple_assign_rhs3 (stmt);
4197 tree rhs3_type = TREE_TYPE (rhs3);
4198
4199 if (!is_gimple_reg (lhs))
4200 {
4201 error ("non-register as LHS of ternary operation");
4202 return true;
4203 }
4204
4205 if ((rhs_code == COND_EXPR
4206 ? !is_gimple_condexpr (rhs1) : !is_gimple_val (rhs1))
4207 || !is_gimple_val (rhs2)
4208 || !is_gimple_val (rhs3))
4209 {
4210 error ("invalid operands in ternary operation");
4211 return true;
4212 }
4213
4214 const char* const code_name = get_tree_code_name (rhs_code);
4215
4216 /* First handle operations that involve different types. */
4217 switch (rhs_code)
4218 {
4219 case WIDEN_MULT_PLUS_EXPR:
4220 case WIDEN_MULT_MINUS_EXPR:
4221 if ((!INTEGRAL_TYPE_P (rhs1_type)
4222 && !FIXED_POINT_TYPE_P (rhs1_type))
4223 || !useless_type_conversion_p (rhs1_type, rhs2_type)
4224 || !useless_type_conversion_p (lhs_type, rhs3_type)
4225 || 2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)
4226 || TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type))
4227 {
4228 error ("type mismatch in %qs", code_name);
4229 debug_generic_expr (lhs_type);
4230 debug_generic_expr (rhs1_type);
4231 debug_generic_expr (rhs2_type);
4232 debug_generic_expr (rhs3_type);
4233 return true;
4234 }
4235 break;
4236
4237 case VEC_COND_EXPR:
4238 if (!VECTOR_BOOLEAN_TYPE_P (rhs1_type)
4239 || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
4240 TYPE_VECTOR_SUBPARTS (lhs_type)))
4241 {
4242 error ("the first argument of a %qs must be of a "
4243 "boolean vector type of the same number of elements "
4244 "as the result", code_name);
4245 debug_generic_expr (lhs_type);
4246 debug_generic_expr (rhs1_type);
4247 return true;
4248 }
4249 if (!is_gimple_val (rhs1))
4250 return true;
4251 /* Fallthrough. */
4252 case COND_EXPR:
4253 if (!is_gimple_val (rhs1)
4254 && verify_gimple_comparison (TREE_TYPE (rhs1),
4255 TREE_OPERAND (rhs1, 0),
4256 TREE_OPERAND (rhs1, 1),
4257 TREE_CODE (rhs1)))
4258 return true;
4259 if (!useless_type_conversion_p (lhs_type, rhs2_type)
4260 || !useless_type_conversion_p (lhs_type, rhs3_type))
4261 {
4262 error ("type mismatch in %qs", code_name);
4263 debug_generic_expr (lhs_type);
4264 debug_generic_expr (rhs2_type);
4265 debug_generic_expr (rhs3_type);
4266 return true;
4267 }
4268 break;
4269
4270 case VEC_PERM_EXPR:
4271 if (!useless_type_conversion_p (lhs_type, rhs1_type)
4272 || !useless_type_conversion_p (lhs_type, rhs2_type))
4273 {
4274 error ("type mismatch in %qs", code_name);
4275 debug_generic_expr (lhs_type);
4276 debug_generic_expr (rhs1_type);
4277 debug_generic_expr (rhs2_type);
4278 debug_generic_expr (rhs3_type);
4279 return true;
4280 }
4281
4282 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4283 || TREE_CODE (rhs2_type) != VECTOR_TYPE
4284 || TREE_CODE (rhs3_type) != VECTOR_TYPE)
4285 {
4286 error ("vector types expected in %qs", code_name);
4287 debug_generic_expr (lhs_type);
4288 debug_generic_expr (rhs1_type);
4289 debug_generic_expr (rhs2_type);
4290 debug_generic_expr (rhs3_type);
4291 return true;
4292 }
4293
4294 if (maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
4295 TYPE_VECTOR_SUBPARTS (rhs2_type))
4296 || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs2_type),
4297 TYPE_VECTOR_SUBPARTS (rhs3_type))
4298 || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs3_type),
4299 TYPE_VECTOR_SUBPARTS (lhs_type)))
4300 {
4301 error ("vectors with different element number found in %qs",
4302 code_name);
4303 debug_generic_expr (lhs_type);
4304 debug_generic_expr (rhs1_type);
4305 debug_generic_expr (rhs2_type);
4306 debug_generic_expr (rhs3_type);
4307 return true;
4308 }
4309
4310 if (TREE_CODE (TREE_TYPE (rhs3_type)) != INTEGER_TYPE
4311 || (TREE_CODE (rhs3) != VECTOR_CST
4312 && (GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE
4313 (TREE_TYPE (rhs3_type)))
4314 != GET_MODE_BITSIZE (SCALAR_TYPE_MODE
4315 (TREE_TYPE (rhs1_type))))))
4316 {
4317 error ("invalid mask type in %qs", code_name);
4318 debug_generic_expr (lhs_type);
4319 debug_generic_expr (rhs1_type);
4320 debug_generic_expr (rhs2_type);
4321 debug_generic_expr (rhs3_type);
4322 return true;
4323 }
4324
4325 return false;
4326
4327 case SAD_EXPR:
4328 if (!useless_type_conversion_p (rhs1_type, rhs2_type)
4329 || !useless_type_conversion_p (lhs_type, rhs3_type)
4330 || 2 * GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1_type)))
4331 > GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (lhs_type))))
4332 {
4333 error ("type mismatch in %qs", code_name);
4334 debug_generic_expr (lhs_type);
4335 debug_generic_expr (rhs1_type);
4336 debug_generic_expr (rhs2_type);
4337 debug_generic_expr (rhs3_type);
4338 return true;
4339 }
4340
4341 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4342 || TREE_CODE (rhs2_type) != VECTOR_TYPE
4343 || TREE_CODE (rhs3_type) != VECTOR_TYPE)
4344 {
4345 error ("vector types expected in %qs", code_name);
4346 debug_generic_expr (lhs_type);
4347 debug_generic_expr (rhs1_type);
4348 debug_generic_expr (rhs2_type);
4349 debug_generic_expr (rhs3_type);
4350 return true;
4351 }
4352
4353 return false;
4354
4355 case BIT_INSERT_EXPR:
4356 if (! useless_type_conversion_p (lhs_type, rhs1_type))
4357 {
4358 error ("type mismatch in %qs", code_name);
4359 debug_generic_expr (lhs_type);
4360 debug_generic_expr (rhs1_type);
4361 return true;
4362 }
4363 if (! ((INTEGRAL_TYPE_P (rhs1_type)
4364 && INTEGRAL_TYPE_P (rhs2_type))
4365 /* Vector element insert. */
4366 || (VECTOR_TYPE_P (rhs1_type)
4367 && types_compatible_p (TREE_TYPE (rhs1_type), rhs2_type))
4368 /* Aligned sub-vector insert. */
4369 || (VECTOR_TYPE_P (rhs1_type)
4370 && VECTOR_TYPE_P (rhs2_type)
4371 && types_compatible_p (TREE_TYPE (rhs1_type),
4372 TREE_TYPE (rhs2_type))
4373 && multiple_p (TYPE_VECTOR_SUBPARTS (rhs1_type),
4374 TYPE_VECTOR_SUBPARTS (rhs2_type))
4375 && multiple_of_p (bitsizetype, rhs3, TYPE_SIZE (rhs2_type)))))
4376 {
4377 error ("not allowed type combination in %qs", code_name);
4378 debug_generic_expr (rhs1_type);
4379 debug_generic_expr (rhs2_type);
4380 return true;
4381 }
4382 if (! tree_fits_uhwi_p (rhs3)
4383 || ! types_compatible_p (bitsizetype, TREE_TYPE (rhs3))
4384 || ! tree_fits_uhwi_p (TYPE_SIZE (rhs2_type)))
4385 {
4386 error ("invalid position or size in %qs", code_name);
4387 return true;
4388 }
4389 if (INTEGRAL_TYPE_P (rhs1_type)
4390 && !type_has_mode_precision_p (rhs1_type))
4391 {
4392 error ("%qs into non-mode-precision operand", code_name);
4393 return true;
4394 }
4395 if (INTEGRAL_TYPE_P (rhs1_type))
4396 {
4397 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
4398 if (bitpos >= TYPE_PRECISION (rhs1_type)
4399 || (bitpos + TYPE_PRECISION (rhs2_type)
4400 > TYPE_PRECISION (rhs1_type)))
4401 {
4402 error ("insertion out of range in %qs", code_name);
4403 return true;
4404 }
4405 }
4406 else if (VECTOR_TYPE_P (rhs1_type))
4407 {
4408 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
4409 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TYPE_SIZE (rhs2_type));
4410 if (bitpos % bitsize != 0)
4411 {
4412 error ("%qs not at element boundary", code_name);
4413 return true;
4414 }
4415 }
4416 return false;
4417
4418 case DOT_PROD_EXPR:
4419 {
4420 if (((TREE_CODE (rhs1_type) != VECTOR_TYPE
4421 || TREE_CODE (lhs_type) != VECTOR_TYPE)
4422 && ((!INTEGRAL_TYPE_P (rhs1_type)
4423 && !SCALAR_FLOAT_TYPE_P (rhs1_type))
4424 || (!INTEGRAL_TYPE_P (lhs_type)
4425 && !SCALAR_FLOAT_TYPE_P (lhs_type))))
4426 || !types_compatible_p (rhs1_type, rhs2_type)
4427 || !useless_type_conversion_p (lhs_type, rhs3_type)
4428 || maybe_lt (GET_MODE_SIZE (element_mode (rhs3_type)),
4429 2 * GET_MODE_SIZE (element_mode (rhs1_type))))
4430 {
4431 error ("type mismatch in %qs", code_name);
4432 debug_generic_expr (lhs_type);
4433 debug_generic_expr (rhs1_type);
4434 debug_generic_expr (rhs2_type);
4435 return true;
4436 }
4437 return false;
4438 }
4439
4440 case REALIGN_LOAD_EXPR:
4441 /* FIXME. */
4442 return false;
4443
4444 default:
4445 gcc_unreachable ();
4446 }
4447 return false;
4448 }
4449
4450 /* Verify a gimple assignment statement STMT with a single rhs.
4451 Returns true if anything is wrong. */
4452
4453 static bool
verify_gimple_assign_single(gassign * stmt)4454 verify_gimple_assign_single (gassign *stmt)
4455 {
4456 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
4457 tree lhs = gimple_assign_lhs (stmt);
4458 tree lhs_type = TREE_TYPE (lhs);
4459 tree rhs1 = gimple_assign_rhs1 (stmt);
4460 tree rhs1_type = TREE_TYPE (rhs1);
4461 bool res = false;
4462
4463 const char* const code_name = get_tree_code_name (rhs_code);
4464
4465 if (!useless_type_conversion_p (lhs_type, rhs1_type))
4466 {
4467 error ("non-trivial conversion in %qs", code_name);
4468 debug_generic_expr (lhs_type);
4469 debug_generic_expr (rhs1_type);
4470 return true;
4471 }
4472
4473 if (gimple_clobber_p (stmt)
4474 && !(DECL_P (lhs) || TREE_CODE (lhs) == MEM_REF))
4475 {
4476 error ("%qs LHS in clobber statement",
4477 get_tree_code_name (TREE_CODE (lhs)));
4478 debug_generic_expr (lhs);
4479 return true;
4480 }
4481
4482 if (handled_component_p (lhs)
4483 || TREE_CODE (lhs) == MEM_REF
4484 || TREE_CODE (lhs) == TARGET_MEM_REF)
4485 res |= verify_types_in_gimple_reference (lhs, true);
4486
4487 /* Special codes we cannot handle via their class. */
4488 switch (rhs_code)
4489 {
4490 case ADDR_EXPR:
4491 {
4492 tree op = TREE_OPERAND (rhs1, 0);
4493 if (!is_gimple_addressable (op))
4494 {
4495 error ("invalid operand in %qs", code_name);
4496 return true;
4497 }
4498
4499 /* Technically there is no longer a need for matching types, but
4500 gimple hygiene asks for this check. In LTO we can end up
4501 combining incompatible units and thus end up with addresses
4502 of globals that change their type to a common one. */
4503 if (!in_lto_p
4504 && !types_compatible_p (TREE_TYPE (op),
4505 TREE_TYPE (TREE_TYPE (rhs1)))
4506 && !one_pointer_to_useless_type_conversion_p (TREE_TYPE (rhs1),
4507 TREE_TYPE (op)))
4508 {
4509 error ("type mismatch in %qs", code_name);
4510 debug_generic_stmt (TREE_TYPE (rhs1));
4511 debug_generic_stmt (TREE_TYPE (op));
4512 return true;
4513 }
4514
4515 return (verify_address (rhs1, true)
4516 || verify_types_in_gimple_reference (op, true));
4517 }
4518
4519 /* tcc_reference */
4520 case INDIRECT_REF:
4521 error ("%qs in gimple IL", code_name);
4522 return true;
4523
4524 case COMPONENT_REF:
4525 case BIT_FIELD_REF:
4526 case ARRAY_REF:
4527 case ARRAY_RANGE_REF:
4528 case VIEW_CONVERT_EXPR:
4529 case REALPART_EXPR:
4530 case IMAGPART_EXPR:
4531 case TARGET_MEM_REF:
4532 case MEM_REF:
4533 if (!is_gimple_reg (lhs)
4534 && is_gimple_reg_type (TREE_TYPE (lhs)))
4535 {
4536 error ("invalid RHS for gimple memory store: %qs", code_name);
4537 debug_generic_stmt (lhs);
4538 debug_generic_stmt (rhs1);
4539 return true;
4540 }
4541 return res || verify_types_in_gimple_reference (rhs1, false);
4542
4543 /* tcc_constant */
4544 case SSA_NAME:
4545 case INTEGER_CST:
4546 case REAL_CST:
4547 case FIXED_CST:
4548 case COMPLEX_CST:
4549 case VECTOR_CST:
4550 case STRING_CST:
4551 return res;
4552
4553 /* tcc_declaration */
4554 case CONST_DECL:
4555 return res;
4556 case VAR_DECL:
4557 case PARM_DECL:
4558 if (!is_gimple_reg (lhs)
4559 && !is_gimple_reg (rhs1)
4560 && is_gimple_reg_type (TREE_TYPE (lhs)))
4561 {
4562 error ("invalid RHS for gimple memory store: %qs", code_name);
4563 debug_generic_stmt (lhs);
4564 debug_generic_stmt (rhs1);
4565 return true;
4566 }
4567 return res;
4568
4569 case CONSTRUCTOR:
4570 if (TREE_CODE (rhs1_type) == VECTOR_TYPE)
4571 {
4572 unsigned int i;
4573 tree elt_i, elt_v, elt_t = NULL_TREE;
4574
4575 if (CONSTRUCTOR_NELTS (rhs1) == 0)
4576 return res;
4577 /* For vector CONSTRUCTORs we require that either it is empty
4578 CONSTRUCTOR, or it is a CONSTRUCTOR of smaller vector elements
4579 (then the element count must be correct to cover the whole
4580 outer vector and index must be NULL on all elements, or it is
4581 a CONSTRUCTOR of scalar elements, where we as an exception allow
4582 smaller number of elements (assuming zero filling) and
4583 consecutive indexes as compared to NULL indexes (such
4584 CONSTRUCTORs can appear in the IL from FEs). */
4585 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), i, elt_i, elt_v)
4586 {
4587 if (elt_t == NULL_TREE)
4588 {
4589 elt_t = TREE_TYPE (elt_v);
4590 if (TREE_CODE (elt_t) == VECTOR_TYPE)
4591 {
4592 tree elt_t = TREE_TYPE (elt_v);
4593 if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
4594 TREE_TYPE (elt_t)))
4595 {
4596 error ("incorrect type of vector %qs elements",
4597 code_name);
4598 debug_generic_stmt (rhs1);
4599 return true;
4600 }
4601 else if (maybe_ne (CONSTRUCTOR_NELTS (rhs1)
4602 * TYPE_VECTOR_SUBPARTS (elt_t),
4603 TYPE_VECTOR_SUBPARTS (rhs1_type)))
4604 {
4605 error ("incorrect number of vector %qs elements",
4606 code_name);
4607 debug_generic_stmt (rhs1);
4608 return true;
4609 }
4610 }
4611 else if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
4612 elt_t))
4613 {
4614 error ("incorrect type of vector %qs elements",
4615 code_name);
4616 debug_generic_stmt (rhs1);
4617 return true;
4618 }
4619 else if (maybe_gt (CONSTRUCTOR_NELTS (rhs1),
4620 TYPE_VECTOR_SUBPARTS (rhs1_type)))
4621 {
4622 error ("incorrect number of vector %qs elements",
4623 code_name);
4624 debug_generic_stmt (rhs1);
4625 return true;
4626 }
4627 }
4628 else if (!useless_type_conversion_p (elt_t, TREE_TYPE (elt_v)))
4629 {
4630 error ("incorrect type of vector CONSTRUCTOR elements");
4631 debug_generic_stmt (rhs1);
4632 return true;
4633 }
4634 if (elt_i != NULL_TREE
4635 && (TREE_CODE (elt_t) == VECTOR_TYPE
4636 || TREE_CODE (elt_i) != INTEGER_CST
4637 || compare_tree_int (elt_i, i) != 0))
4638 {
4639 error ("vector %qs with non-NULL element index",
4640 code_name);
4641 debug_generic_stmt (rhs1);
4642 return true;
4643 }
4644 if (!is_gimple_val (elt_v))
4645 {
4646 error ("vector %qs element is not a GIMPLE value",
4647 code_name);
4648 debug_generic_stmt (rhs1);
4649 return true;
4650 }
4651 }
4652 }
4653 else if (CONSTRUCTOR_NELTS (rhs1) != 0)
4654 {
4655 error ("non-vector %qs with elements", code_name);
4656 debug_generic_stmt (rhs1);
4657 return true;
4658 }
4659 return res;
4660
4661 case ASSERT_EXPR:
4662 /* FIXME. */
4663 rhs1 = fold (ASSERT_EXPR_COND (rhs1));
4664 if (rhs1 == boolean_false_node)
4665 {
4666 error ("%qs with an always-false condition", code_name);
4667 debug_generic_stmt (rhs1);
4668 return true;
4669 }
4670 break;
4671
4672 case OBJ_TYPE_REF:
4673 case WITH_SIZE_EXPR:
4674 /* FIXME. */
4675 return res;
4676
4677 default:;
4678 }
4679
4680 return res;
4681 }
4682
4683 /* Verify the contents of a GIMPLE_ASSIGN STMT. Returns true when there
4684 is a problem, otherwise false. */
4685
4686 static bool
verify_gimple_assign(gassign * stmt)4687 verify_gimple_assign (gassign *stmt)
4688 {
4689 switch (gimple_assign_rhs_class (stmt))
4690 {
4691 case GIMPLE_SINGLE_RHS:
4692 return verify_gimple_assign_single (stmt);
4693
4694 case GIMPLE_UNARY_RHS:
4695 return verify_gimple_assign_unary (stmt);
4696
4697 case GIMPLE_BINARY_RHS:
4698 return verify_gimple_assign_binary (stmt);
4699
4700 case GIMPLE_TERNARY_RHS:
4701 return verify_gimple_assign_ternary (stmt);
4702
4703 default:
4704 gcc_unreachable ();
4705 }
4706 }
4707
4708 /* Verify the contents of a GIMPLE_RETURN STMT. Returns true when there
4709 is a problem, otherwise false. */
4710
4711 static bool
verify_gimple_return(greturn * stmt)4712 verify_gimple_return (greturn *stmt)
4713 {
4714 tree op = gimple_return_retval (stmt);
4715 tree restype = TREE_TYPE (TREE_TYPE (cfun->decl));
4716
4717 /* We cannot test for present return values as we do not fix up missing
4718 return values from the original source. */
4719 if (op == NULL)
4720 return false;
4721
4722 if (!is_gimple_val (op)
4723 && TREE_CODE (op) != RESULT_DECL)
4724 {
4725 error ("invalid operand in return statement");
4726 debug_generic_stmt (op);
4727 return true;
4728 }
4729
4730 if ((TREE_CODE (op) == RESULT_DECL
4731 && DECL_BY_REFERENCE (op))
4732 || (TREE_CODE (op) == SSA_NAME
4733 && SSA_NAME_VAR (op)
4734 && TREE_CODE (SSA_NAME_VAR (op)) == RESULT_DECL
4735 && DECL_BY_REFERENCE (SSA_NAME_VAR (op))))
4736 op = TREE_TYPE (op);
4737
4738 if (!useless_type_conversion_p (restype, TREE_TYPE (op)))
4739 {
4740 error ("invalid conversion in return statement");
4741 debug_generic_stmt (restype);
4742 debug_generic_stmt (TREE_TYPE (op));
4743 return true;
4744 }
4745
4746 return false;
4747 }
4748
4749
4750 /* Verify the contents of a GIMPLE_GOTO STMT. Returns true when there
4751 is a problem, otherwise false. */
4752
4753 static bool
verify_gimple_goto(ggoto * stmt)4754 verify_gimple_goto (ggoto *stmt)
4755 {
4756 tree dest = gimple_goto_dest (stmt);
4757
4758 /* ??? We have two canonical forms of direct goto destinations, a
4759 bare LABEL_DECL and an ADDR_EXPR of a LABEL_DECL. */
4760 if (TREE_CODE (dest) != LABEL_DECL
4761 && (!is_gimple_val (dest)
4762 || !POINTER_TYPE_P (TREE_TYPE (dest))))
4763 {
4764 error ("goto destination is neither a label nor a pointer");
4765 return true;
4766 }
4767
4768 return false;
4769 }
4770
4771 /* Verify the contents of a GIMPLE_SWITCH STMT. Returns true when there
4772 is a problem, otherwise false. */
4773
4774 static bool
verify_gimple_switch(gswitch * stmt)4775 verify_gimple_switch (gswitch *stmt)
4776 {
4777 unsigned int i, n;
4778 tree elt, prev_upper_bound = NULL_TREE;
4779 tree index_type, elt_type = NULL_TREE;
4780
4781 if (!is_gimple_val (gimple_switch_index (stmt)))
4782 {
4783 error ("invalid operand to switch statement");
4784 debug_generic_stmt (gimple_switch_index (stmt));
4785 return true;
4786 }
4787
4788 index_type = TREE_TYPE (gimple_switch_index (stmt));
4789 if (! INTEGRAL_TYPE_P (index_type))
4790 {
4791 error ("non-integral type switch statement");
4792 debug_generic_expr (index_type);
4793 return true;
4794 }
4795
4796 elt = gimple_switch_label (stmt, 0);
4797 if (CASE_LOW (elt) != NULL_TREE
4798 || CASE_HIGH (elt) != NULL_TREE
4799 || CASE_CHAIN (elt) != NULL_TREE)
4800 {
4801 error ("invalid default case label in switch statement");
4802 debug_generic_expr (elt);
4803 return true;
4804 }
4805
4806 n = gimple_switch_num_labels (stmt);
4807 for (i = 1; i < n; i++)
4808 {
4809 elt = gimple_switch_label (stmt, i);
4810
4811 if (CASE_CHAIN (elt))
4812 {
4813 error ("invalid %<CASE_CHAIN%>");
4814 debug_generic_expr (elt);
4815 return true;
4816 }
4817 if (! CASE_LOW (elt))
4818 {
4819 error ("invalid case label in switch statement");
4820 debug_generic_expr (elt);
4821 return true;
4822 }
4823 if (CASE_HIGH (elt)
4824 && ! tree_int_cst_lt (CASE_LOW (elt), CASE_HIGH (elt)))
4825 {
4826 error ("invalid case range in switch statement");
4827 debug_generic_expr (elt);
4828 return true;
4829 }
4830
4831 if (! elt_type)
4832 {
4833 elt_type = TREE_TYPE (CASE_LOW (elt));
4834 if (TYPE_PRECISION (index_type) < TYPE_PRECISION (elt_type))
4835 {
4836 error ("type precision mismatch in switch statement");
4837 return true;
4838 }
4839 }
4840 if (TREE_TYPE (CASE_LOW (elt)) != elt_type
4841 || (CASE_HIGH (elt) && TREE_TYPE (CASE_HIGH (elt)) != elt_type))
4842 {
4843 error ("type mismatch for case label in switch statement");
4844 debug_generic_expr (elt);
4845 return true;
4846 }
4847
4848 if (prev_upper_bound)
4849 {
4850 if (! tree_int_cst_lt (prev_upper_bound, CASE_LOW (elt)))
4851 {
4852 error ("case labels not sorted in switch statement");
4853 return true;
4854 }
4855 }
4856
4857 prev_upper_bound = CASE_HIGH (elt);
4858 if (! prev_upper_bound)
4859 prev_upper_bound = CASE_LOW (elt);
4860 }
4861
4862 return false;
4863 }
4864
4865 /* Verify a gimple debug statement STMT.
4866 Returns true if anything is wrong. */
4867
4868 static bool
verify_gimple_debug(gimple * stmt ATTRIBUTE_UNUSED)4869 verify_gimple_debug (gimple *stmt ATTRIBUTE_UNUSED)
4870 {
4871 /* There isn't much that could be wrong in a gimple debug stmt. A
4872 gimple debug bind stmt, for example, maps a tree, that's usually
4873 a VAR_DECL or a PARM_DECL, but that could also be some scalarized
4874 component or member of an aggregate type, to another tree, that
4875 can be an arbitrary expression. These stmts expand into debug
4876 insns, and are converted to debug notes by var-tracking.c. */
4877 return false;
4878 }
4879
4880 /* Verify a gimple label statement STMT.
4881 Returns true if anything is wrong. */
4882
4883 static bool
verify_gimple_label(glabel * stmt)4884 verify_gimple_label (glabel *stmt)
4885 {
4886 tree decl = gimple_label_label (stmt);
4887 int uid;
4888 bool err = false;
4889
4890 if (TREE_CODE (decl) != LABEL_DECL)
4891 return true;
4892 if (!DECL_NONLOCAL (decl) && !FORCED_LABEL (decl)
4893 && DECL_CONTEXT (decl) != current_function_decl)
4894 {
4895 error ("label context is not the current function declaration");
4896 err |= true;
4897 }
4898
4899 uid = LABEL_DECL_UID (decl);
4900 if (cfun->cfg
4901 && (uid == -1
4902 || (*label_to_block_map_for_fn (cfun))[uid] != gimple_bb (stmt)))
4903 {
4904 error ("incorrect entry in %<label_to_block_map%>");
4905 err |= true;
4906 }
4907
4908 uid = EH_LANDING_PAD_NR (decl);
4909 if (uid)
4910 {
4911 eh_landing_pad lp = get_eh_landing_pad_from_number (uid);
4912 if (decl != lp->post_landing_pad)
4913 {
4914 error ("incorrect setting of landing pad number");
4915 err |= true;
4916 }
4917 }
4918
4919 return err;
4920 }
4921
4922 /* Verify a gimple cond statement STMT.
4923 Returns true if anything is wrong. */
4924
4925 static bool
verify_gimple_cond(gcond * stmt)4926 verify_gimple_cond (gcond *stmt)
4927 {
4928 if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
4929 {
4930 error ("invalid comparison code in gimple cond");
4931 return true;
4932 }
4933 if (!(!gimple_cond_true_label (stmt)
4934 || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
4935 || !(!gimple_cond_false_label (stmt)
4936 || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
4937 {
4938 error ("invalid labels in gimple cond");
4939 return true;
4940 }
4941
4942 return verify_gimple_comparison (boolean_type_node,
4943 gimple_cond_lhs (stmt),
4944 gimple_cond_rhs (stmt),
4945 gimple_cond_code (stmt));
4946 }
4947
4948 /* Verify the GIMPLE statement STMT. Returns true if there is an
4949 error, otherwise false. */
4950
4951 static bool
verify_gimple_stmt(gimple * stmt)4952 verify_gimple_stmt (gimple *stmt)
4953 {
4954 switch (gimple_code (stmt))
4955 {
4956 case GIMPLE_ASSIGN:
4957 return verify_gimple_assign (as_a <gassign *> (stmt));
4958
4959 case GIMPLE_LABEL:
4960 return verify_gimple_label (as_a <glabel *> (stmt));
4961
4962 case GIMPLE_CALL:
4963 return verify_gimple_call (as_a <gcall *> (stmt));
4964
4965 case GIMPLE_COND:
4966 return verify_gimple_cond (as_a <gcond *> (stmt));
4967
4968 case GIMPLE_GOTO:
4969 return verify_gimple_goto (as_a <ggoto *> (stmt));
4970
4971 case GIMPLE_SWITCH:
4972 return verify_gimple_switch (as_a <gswitch *> (stmt));
4973
4974 case GIMPLE_RETURN:
4975 return verify_gimple_return (as_a <greturn *> (stmt));
4976
4977 case GIMPLE_ASM:
4978 return false;
4979
4980 case GIMPLE_TRANSACTION:
4981 return verify_gimple_transaction (as_a <gtransaction *> (stmt));
4982
4983 /* Tuples that do not have tree operands. */
4984 case GIMPLE_NOP:
4985 case GIMPLE_PREDICT:
4986 case GIMPLE_RESX:
4987 case GIMPLE_EH_DISPATCH:
4988 case GIMPLE_EH_MUST_NOT_THROW:
4989 return false;
4990
4991 CASE_GIMPLE_OMP:
4992 /* OpenMP directives are validated by the FE and never operated
4993 on by the optimizers. Furthermore, GIMPLE_OMP_FOR may contain
4994 non-gimple expressions when the main index variable has had
4995 its address taken. This does not affect the loop itself
4996 because the header of an GIMPLE_OMP_FOR is merely used to determine
4997 how to setup the parallel iteration. */
4998 return false;
4999
5000 case GIMPLE_DEBUG:
5001 return verify_gimple_debug (stmt);
5002
5003 default:
5004 gcc_unreachable ();
5005 }
5006 }
5007
5008 /* Verify the contents of a GIMPLE_PHI. Returns true if there is a problem,
5009 and false otherwise. */
5010
5011 static bool
verify_gimple_phi(gphi * phi)5012 verify_gimple_phi (gphi *phi)
5013 {
5014 bool err = false;
5015 unsigned i;
5016 tree phi_result = gimple_phi_result (phi);
5017 bool virtual_p;
5018
5019 if (!phi_result)
5020 {
5021 error ("invalid %<PHI%> result");
5022 return true;
5023 }
5024
5025 virtual_p = virtual_operand_p (phi_result);
5026 if (TREE_CODE (phi_result) != SSA_NAME
5027 || (virtual_p
5028 && SSA_NAME_VAR (phi_result) != gimple_vop (cfun)))
5029 {
5030 error ("invalid %<PHI%> result");
5031 err = true;
5032 }
5033
5034 for (i = 0; i < gimple_phi_num_args (phi); i++)
5035 {
5036 tree t = gimple_phi_arg_def (phi, i);
5037
5038 if (!t)
5039 {
5040 error ("missing %<PHI%> def");
5041 err |= true;
5042 continue;
5043 }
5044 /* Addressable variables do have SSA_NAMEs but they
5045 are not considered gimple values. */
5046 else if ((TREE_CODE (t) == SSA_NAME
5047 && virtual_p != virtual_operand_p (t))
5048 || (virtual_p
5049 && (TREE_CODE (t) != SSA_NAME
5050 || SSA_NAME_VAR (t) != gimple_vop (cfun)))
5051 || (!virtual_p
5052 && !is_gimple_val (t)))
5053 {
5054 error ("invalid %<PHI%> argument");
5055 debug_generic_expr (t);
5056 err |= true;
5057 }
5058 #ifdef ENABLE_TYPES_CHECKING
5059 if (!useless_type_conversion_p (TREE_TYPE (phi_result), TREE_TYPE (t)))
5060 {
5061 error ("incompatible types in %<PHI%> argument %u", i);
5062 debug_generic_stmt (TREE_TYPE (phi_result));
5063 debug_generic_stmt (TREE_TYPE (t));
5064 err |= true;
5065 }
5066 #endif
5067 }
5068
5069 return err;
5070 }
5071
5072 /* Verify the GIMPLE statements inside the sequence STMTS. */
5073
5074 static bool
verify_gimple_in_seq_2(gimple_seq stmts)5075 verify_gimple_in_seq_2 (gimple_seq stmts)
5076 {
5077 gimple_stmt_iterator ittr;
5078 bool err = false;
5079
5080 for (ittr = gsi_start (stmts); !gsi_end_p (ittr); gsi_next (&ittr))
5081 {
5082 gimple *stmt = gsi_stmt (ittr);
5083
5084 switch (gimple_code (stmt))
5085 {
5086 case GIMPLE_BIND:
5087 err |= verify_gimple_in_seq_2 (
5088 gimple_bind_body (as_a <gbind *> (stmt)));
5089 break;
5090
5091 case GIMPLE_TRY:
5092 err |= verify_gimple_in_seq_2 (gimple_try_eval (stmt));
5093 err |= verify_gimple_in_seq_2 (gimple_try_cleanup (stmt));
5094 break;
5095
5096 case GIMPLE_EH_FILTER:
5097 err |= verify_gimple_in_seq_2 (gimple_eh_filter_failure (stmt));
5098 break;
5099
5100 case GIMPLE_EH_ELSE:
5101 {
5102 geh_else *eh_else = as_a <geh_else *> (stmt);
5103 err |= verify_gimple_in_seq_2 (gimple_eh_else_n_body (eh_else));
5104 err |= verify_gimple_in_seq_2 (gimple_eh_else_e_body (eh_else));
5105 }
5106 break;
5107
5108 case GIMPLE_CATCH:
5109 err |= verify_gimple_in_seq_2 (gimple_catch_handler (
5110 as_a <gcatch *> (stmt)));
5111 break;
5112
5113 case GIMPLE_TRANSACTION:
5114 err |= verify_gimple_transaction (as_a <gtransaction *> (stmt));
5115 break;
5116
5117 default:
5118 {
5119 bool err2 = verify_gimple_stmt (stmt);
5120 if (err2)
5121 debug_gimple_stmt (stmt);
5122 err |= err2;
5123 }
5124 }
5125 }
5126
5127 return err;
5128 }
5129
5130 /* Verify the contents of a GIMPLE_TRANSACTION. Returns true if there
5131 is a problem, otherwise false. */
5132
5133 static bool
verify_gimple_transaction(gtransaction * stmt)5134 verify_gimple_transaction (gtransaction *stmt)
5135 {
5136 tree lab;
5137
5138 lab = gimple_transaction_label_norm (stmt);
5139 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5140 return true;
5141 lab = gimple_transaction_label_uninst (stmt);
5142 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5143 return true;
5144 lab = gimple_transaction_label_over (stmt);
5145 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5146 return true;
5147
5148 return verify_gimple_in_seq_2 (gimple_transaction_body (stmt));
5149 }
5150
5151
5152 /* Verify the GIMPLE statements inside the statement list STMTS. */
5153
5154 DEBUG_FUNCTION void
verify_gimple_in_seq(gimple_seq stmts)5155 verify_gimple_in_seq (gimple_seq stmts)
5156 {
5157 timevar_push (TV_TREE_STMT_VERIFY);
5158 if (verify_gimple_in_seq_2 (stmts))
5159 internal_error ("%<verify_gimple%> failed");
5160 timevar_pop (TV_TREE_STMT_VERIFY);
5161 }
5162
5163 /* Return true when the T can be shared. */
5164
5165 static bool
tree_node_can_be_shared(tree t)5166 tree_node_can_be_shared (tree t)
5167 {
5168 if (IS_TYPE_OR_DECL_P (t)
5169 || TREE_CODE (t) == SSA_NAME
5170 || TREE_CODE (t) == IDENTIFIER_NODE
5171 || TREE_CODE (t) == CASE_LABEL_EXPR
5172 || is_gimple_min_invariant (t))
5173 return true;
5174
5175 if (t == error_mark_node)
5176 return true;
5177
5178 return false;
5179 }
5180
5181 /* Called via walk_tree. Verify tree sharing. */
5182
5183 static tree
verify_node_sharing_1(tree * tp,int * walk_subtrees,void * data)5184 verify_node_sharing_1 (tree *tp, int *walk_subtrees, void *data)
5185 {
5186 hash_set<void *> *visited = (hash_set<void *> *) data;
5187
5188 if (tree_node_can_be_shared (*tp))
5189 {
5190 *walk_subtrees = false;
5191 return NULL;
5192 }
5193
5194 if (visited->add (*tp))
5195 return *tp;
5196
5197 return NULL;
5198 }
5199
5200 /* Called via walk_gimple_stmt. Verify tree sharing. */
5201
5202 static tree
verify_node_sharing(tree * tp,int * walk_subtrees,void * data)5203 verify_node_sharing (tree *tp, int *walk_subtrees, void *data)
5204 {
5205 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5206 return verify_node_sharing_1 (tp, walk_subtrees, wi->info);
5207 }
5208
5209 static bool eh_error_found;
5210 bool
verify_eh_throw_stmt_node(gimple * const & stmt,const int &,hash_set<gimple * > * visited)5211 verify_eh_throw_stmt_node (gimple *const &stmt, const int &,
5212 hash_set<gimple *> *visited)
5213 {
5214 if (!visited->contains (stmt))
5215 {
5216 error ("dead statement in EH table");
5217 debug_gimple_stmt (stmt);
5218 eh_error_found = true;
5219 }
5220 return true;
5221 }
5222
5223 /* Verify if the location LOCs block is in BLOCKS. */
5224
5225 static bool
verify_location(hash_set<tree> * blocks,location_t loc)5226 verify_location (hash_set<tree> *blocks, location_t loc)
5227 {
5228 tree block = LOCATION_BLOCK (loc);
5229 if (block != NULL_TREE
5230 && !blocks->contains (block))
5231 {
5232 error ("location references block not in block tree");
5233 return true;
5234 }
5235 if (block != NULL_TREE)
5236 return verify_location (blocks, BLOCK_SOURCE_LOCATION (block));
5237 return false;
5238 }
5239
5240 /* Called via walk_tree. Verify that expressions have no blocks. */
5241
5242 static tree
verify_expr_no_block(tree * tp,int * walk_subtrees,void *)5243 verify_expr_no_block (tree *tp, int *walk_subtrees, void *)
5244 {
5245 if (!EXPR_P (*tp))
5246 {
5247 *walk_subtrees = false;
5248 return NULL;
5249 }
5250
5251 location_t loc = EXPR_LOCATION (*tp);
5252 if (LOCATION_BLOCK (loc) != NULL)
5253 return *tp;
5254
5255 return NULL;
5256 }
5257
5258 /* Called via walk_tree. Verify locations of expressions. */
5259
5260 static tree
verify_expr_location_1(tree * tp,int * walk_subtrees,void * data)5261 verify_expr_location_1 (tree *tp, int *walk_subtrees, void *data)
5262 {
5263 hash_set<tree> *blocks = (hash_set<tree> *) data;
5264 tree t = *tp;
5265
5266 /* ??? This doesn't really belong here but there's no good place to
5267 stick this remainder of old verify_expr. */
5268 /* ??? This barfs on debug stmts which contain binds to vars with
5269 different function context. */
5270 #if 0
5271 if (VAR_P (t)
5272 || TREE_CODE (t) == PARM_DECL
5273 || TREE_CODE (t) == RESULT_DECL)
5274 {
5275 tree context = decl_function_context (t);
5276 if (context != cfun->decl
5277 && !SCOPE_FILE_SCOPE_P (context)
5278 && !TREE_STATIC (t)
5279 && !DECL_EXTERNAL (t))
5280 {
5281 error ("local declaration from a different function");
5282 return t;
5283 }
5284 }
5285 #endif
5286
5287 if (VAR_P (t) && DECL_HAS_DEBUG_EXPR_P (t))
5288 {
5289 tree x = DECL_DEBUG_EXPR (t);
5290 tree addr = walk_tree (&x, verify_expr_no_block, NULL, NULL);
5291 if (addr)
5292 return addr;
5293 }
5294 if ((VAR_P (t)
5295 || TREE_CODE (t) == PARM_DECL
5296 || TREE_CODE (t) == RESULT_DECL)
5297 && DECL_HAS_VALUE_EXPR_P (t))
5298 {
5299 tree x = DECL_VALUE_EXPR (t);
5300 tree addr = walk_tree (&x, verify_expr_no_block, NULL, NULL);
5301 if (addr)
5302 return addr;
5303 }
5304
5305 if (!EXPR_P (t))
5306 {
5307 *walk_subtrees = false;
5308 return NULL;
5309 }
5310
5311 location_t loc = EXPR_LOCATION (t);
5312 if (verify_location (blocks, loc))
5313 return t;
5314
5315 return NULL;
5316 }
5317
5318 /* Called via walk_gimple_op. Verify locations of expressions. */
5319
5320 static tree
verify_expr_location(tree * tp,int * walk_subtrees,void * data)5321 verify_expr_location (tree *tp, int *walk_subtrees, void *data)
5322 {
5323 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5324 return verify_expr_location_1 (tp, walk_subtrees, wi->info);
5325 }
5326
5327 /* Insert all subblocks of BLOCK into BLOCKS and recurse. */
5328
5329 static void
collect_subblocks(hash_set<tree> * blocks,tree block)5330 collect_subblocks (hash_set<tree> *blocks, tree block)
5331 {
5332 tree t;
5333 for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
5334 {
5335 blocks->add (t);
5336 collect_subblocks (blocks, t);
5337 }
5338 }
5339
5340 /* Disable warnings about missing quoting in GCC diagnostics for
5341 the verification errors. Their format strings don't follow
5342 GCC diagnostic conventions and trigger an ICE in the end. */
5343 #if __GNUC__ >= 10
5344 # pragma GCC diagnostic push
5345 # pragma GCC diagnostic ignored "-Wformat-diag"
5346 #endif
5347
5348 /* Verify the GIMPLE statements in the CFG of FN. */
5349
5350 DEBUG_FUNCTION void
verify_gimple_in_cfg(struct function * fn,bool verify_nothrow)5351 verify_gimple_in_cfg (struct function *fn, bool verify_nothrow)
5352 {
5353 basic_block bb;
5354 bool err = false;
5355
5356 timevar_push (TV_TREE_STMT_VERIFY);
5357 hash_set<void *> visited;
5358 hash_set<gimple *> visited_throwing_stmts;
5359
5360 /* Collect all BLOCKs referenced by the BLOCK tree of FN. */
5361 hash_set<tree> blocks;
5362 if (DECL_INITIAL (fn->decl))
5363 {
5364 blocks.add (DECL_INITIAL (fn->decl));
5365 collect_subblocks (&blocks, DECL_INITIAL (fn->decl));
5366 }
5367
5368 FOR_EACH_BB_FN (bb, fn)
5369 {
5370 gimple_stmt_iterator gsi;
5371 edge_iterator ei;
5372 edge e;
5373
5374 for (gphi_iterator gpi = gsi_start_phis (bb);
5375 !gsi_end_p (gpi);
5376 gsi_next (&gpi))
5377 {
5378 gphi *phi = gpi.phi ();
5379 bool err2 = false;
5380 unsigned i;
5381
5382 if (gimple_bb (phi) != bb)
5383 {
5384 error ("gimple_bb (phi) is set to a wrong basic block");
5385 err2 = true;
5386 }
5387
5388 err2 |= verify_gimple_phi (phi);
5389
5390 /* Only PHI arguments have locations. */
5391 if (gimple_location (phi) != UNKNOWN_LOCATION)
5392 {
5393 error ("PHI node with location");
5394 err2 = true;
5395 }
5396
5397 for (i = 0; i < gimple_phi_num_args (phi); i++)
5398 {
5399 tree arg = gimple_phi_arg_def (phi, i);
5400 tree addr = walk_tree (&arg, verify_node_sharing_1,
5401 &visited, NULL);
5402 if (addr)
5403 {
5404 error ("incorrect sharing of tree nodes");
5405 debug_generic_expr (addr);
5406 err2 |= true;
5407 }
5408 location_t loc = gimple_phi_arg_location (phi, i);
5409 if (virtual_operand_p (gimple_phi_result (phi))
5410 && loc != UNKNOWN_LOCATION)
5411 {
5412 error ("virtual PHI with argument locations");
5413 err2 = true;
5414 }
5415 addr = walk_tree (&arg, verify_expr_location_1, &blocks, NULL);
5416 if (addr)
5417 {
5418 debug_generic_expr (addr);
5419 err2 = true;
5420 }
5421 err2 |= verify_location (&blocks, loc);
5422 }
5423
5424 if (err2)
5425 debug_gimple_stmt (phi);
5426 err |= err2;
5427 }
5428
5429 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5430 {
5431 gimple *stmt = gsi_stmt (gsi);
5432 bool err2 = false;
5433 struct walk_stmt_info wi;
5434 tree addr;
5435 int lp_nr;
5436
5437 if (gimple_bb (stmt) != bb)
5438 {
5439 error ("gimple_bb (stmt) is set to a wrong basic block");
5440 err2 = true;
5441 }
5442
5443 err2 |= verify_gimple_stmt (stmt);
5444 err2 |= verify_location (&blocks, gimple_location (stmt));
5445
5446 memset (&wi, 0, sizeof (wi));
5447 wi.info = (void *) &visited;
5448 addr = walk_gimple_op (stmt, verify_node_sharing, &wi);
5449 if (addr)
5450 {
5451 error ("incorrect sharing of tree nodes");
5452 debug_generic_expr (addr);
5453 err2 |= true;
5454 }
5455
5456 memset (&wi, 0, sizeof (wi));
5457 wi.info = (void *) &blocks;
5458 addr = walk_gimple_op (stmt, verify_expr_location, &wi);
5459 if (addr)
5460 {
5461 debug_generic_expr (addr);
5462 err2 |= true;
5463 }
5464
5465 /* If the statement is marked as part of an EH region, then it is
5466 expected that the statement could throw. Verify that when we
5467 have optimizations that simplify statements such that we prove
5468 that they cannot throw, that we update other data structures
5469 to match. */
5470 lp_nr = lookup_stmt_eh_lp (stmt);
5471 if (lp_nr != 0)
5472 visited_throwing_stmts.add (stmt);
5473 if (lp_nr > 0)
5474 {
5475 if (!stmt_could_throw_p (cfun, stmt))
5476 {
5477 if (verify_nothrow)
5478 {
5479 error ("statement marked for throw, but doesn%'t");
5480 err2 |= true;
5481 }
5482 }
5483 else if (!gsi_one_before_end_p (gsi))
5484 {
5485 error ("statement marked for throw in middle of block");
5486 err2 |= true;
5487 }
5488 }
5489
5490 if (err2)
5491 debug_gimple_stmt (stmt);
5492 err |= err2;
5493 }
5494
5495 FOR_EACH_EDGE (e, ei, bb->succs)
5496 if (e->goto_locus != UNKNOWN_LOCATION)
5497 err |= verify_location (&blocks, e->goto_locus);
5498 }
5499
5500 hash_map<gimple *, int> *eh_table = get_eh_throw_stmt_table (cfun);
5501 eh_error_found = false;
5502 if (eh_table)
5503 eh_table->traverse<hash_set<gimple *> *, verify_eh_throw_stmt_node>
5504 (&visited_throwing_stmts);
5505
5506 if (err || eh_error_found)
5507 internal_error ("verify_gimple failed");
5508
5509 verify_histograms ();
5510 timevar_pop (TV_TREE_STMT_VERIFY);
5511 }
5512
5513
5514 /* Verifies that the flow information is OK. */
5515
5516 static int
gimple_verify_flow_info(void)5517 gimple_verify_flow_info (void)
5518 {
5519 int err = 0;
5520 basic_block bb;
5521 gimple_stmt_iterator gsi;
5522 gimple *stmt;
5523 edge e;
5524 edge_iterator ei;
5525
5526 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
5527 || ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
5528 {
5529 error ("ENTRY_BLOCK has IL associated with it");
5530 err = 1;
5531 }
5532
5533 if (EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
5534 || EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
5535 {
5536 error ("EXIT_BLOCK has IL associated with it");
5537 err = 1;
5538 }
5539
5540 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
5541 if (e->flags & EDGE_FALLTHRU)
5542 {
5543 error ("fallthru to exit from bb %d", e->src->index);
5544 err = 1;
5545 }
5546
5547 FOR_EACH_BB_FN (bb, cfun)
5548 {
5549 bool found_ctrl_stmt = false;
5550
5551 stmt = NULL;
5552
5553 /* Skip labels on the start of basic block. */
5554 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5555 {
5556 tree label;
5557 gimple *prev_stmt = stmt;
5558
5559 stmt = gsi_stmt (gsi);
5560
5561 if (gimple_code (stmt) != GIMPLE_LABEL)
5562 break;
5563
5564 label = gimple_label_label (as_a <glabel *> (stmt));
5565 if (prev_stmt && DECL_NONLOCAL (label))
5566 {
5567 error ("nonlocal label ");
5568 print_generic_expr (stderr, label);
5569 fprintf (stderr, " is not first in a sequence of labels in bb %d",
5570 bb->index);
5571 err = 1;
5572 }
5573
5574 if (prev_stmt && EH_LANDING_PAD_NR (label) != 0)
5575 {
5576 error ("EH landing pad label ");
5577 print_generic_expr (stderr, label);
5578 fprintf (stderr, " is not first in a sequence of labels in bb %d",
5579 bb->index);
5580 err = 1;
5581 }
5582
5583 if (label_to_block (cfun, label) != bb)
5584 {
5585 error ("label ");
5586 print_generic_expr (stderr, label);
5587 fprintf (stderr, " to block does not match in bb %d",
5588 bb->index);
5589 err = 1;
5590 }
5591
5592 if (decl_function_context (label) != current_function_decl)
5593 {
5594 error ("label ");
5595 print_generic_expr (stderr, label);
5596 fprintf (stderr, " has incorrect context in bb %d",
5597 bb->index);
5598 err = 1;
5599 }
5600 }
5601
5602 /* Verify that body of basic block BB is free of control flow. */
5603 for (; !gsi_end_p (gsi); gsi_next (&gsi))
5604 {
5605 gimple *stmt = gsi_stmt (gsi);
5606
5607 if (found_ctrl_stmt)
5608 {
5609 error ("control flow in the middle of basic block %d",
5610 bb->index);
5611 err = 1;
5612 }
5613
5614 if (stmt_ends_bb_p (stmt))
5615 found_ctrl_stmt = true;
5616
5617 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
5618 {
5619 error ("label ");
5620 print_generic_expr (stderr, gimple_label_label (label_stmt));
5621 fprintf (stderr, " in the middle of basic block %d", bb->index);
5622 err = 1;
5623 }
5624 }
5625
5626 gsi = gsi_last_nondebug_bb (bb);
5627 if (gsi_end_p (gsi))
5628 continue;
5629
5630 stmt = gsi_stmt (gsi);
5631
5632 if (gimple_code (stmt) == GIMPLE_LABEL)
5633 continue;
5634
5635 err |= verify_eh_edges (stmt);
5636
5637 if (is_ctrl_stmt (stmt))
5638 {
5639 FOR_EACH_EDGE (e, ei, bb->succs)
5640 if (e->flags & EDGE_FALLTHRU)
5641 {
5642 error ("fallthru edge after a control statement in bb %d",
5643 bb->index);
5644 err = 1;
5645 }
5646 }
5647
5648 if (gimple_code (stmt) != GIMPLE_COND)
5649 {
5650 /* Verify that there are no edges with EDGE_TRUE/FALSE_FLAG set
5651 after anything else but if statement. */
5652 FOR_EACH_EDGE (e, ei, bb->succs)
5653 if (e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))
5654 {
5655 error ("true/false edge after a non-GIMPLE_COND in bb %d",
5656 bb->index);
5657 err = 1;
5658 }
5659 }
5660
5661 switch (gimple_code (stmt))
5662 {
5663 case GIMPLE_COND:
5664 {
5665 edge true_edge;
5666 edge false_edge;
5667
5668 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
5669
5670 if (!true_edge
5671 || !false_edge
5672 || !(true_edge->flags & EDGE_TRUE_VALUE)
5673 || !(false_edge->flags & EDGE_FALSE_VALUE)
5674 || (true_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL))
5675 || (false_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL))
5676 || EDGE_COUNT (bb->succs) >= 3)
5677 {
5678 error ("wrong outgoing edge flags at end of bb %d",
5679 bb->index);
5680 err = 1;
5681 }
5682 }
5683 break;
5684
5685 case GIMPLE_GOTO:
5686 if (simple_goto_p (stmt))
5687 {
5688 error ("explicit goto at end of bb %d", bb->index);
5689 err = 1;
5690 }
5691 else
5692 {
5693 /* FIXME. We should double check that the labels in the
5694 destination blocks have their address taken. */
5695 FOR_EACH_EDGE (e, ei, bb->succs)
5696 if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE
5697 | EDGE_FALSE_VALUE))
5698 || !(e->flags & EDGE_ABNORMAL))
5699 {
5700 error ("wrong outgoing edge flags at end of bb %d",
5701 bb->index);
5702 err = 1;
5703 }
5704 }
5705 break;
5706
5707 case GIMPLE_CALL:
5708 if (!gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
5709 break;
5710 /* fallthru */
5711 case GIMPLE_RETURN:
5712 if (!single_succ_p (bb)
5713 || (single_succ_edge (bb)->flags
5714 & (EDGE_FALLTHRU | EDGE_ABNORMAL
5715 | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
5716 {
5717 error ("wrong outgoing edge flags at end of bb %d", bb->index);
5718 err = 1;
5719 }
5720 if (single_succ (bb) != EXIT_BLOCK_PTR_FOR_FN (cfun))
5721 {
5722 error ("return edge does not point to exit in bb %d",
5723 bb->index);
5724 err = 1;
5725 }
5726 break;
5727
5728 case GIMPLE_SWITCH:
5729 {
5730 gswitch *switch_stmt = as_a <gswitch *> (stmt);
5731 tree prev;
5732 edge e;
5733 size_t i, n;
5734
5735 n = gimple_switch_num_labels (switch_stmt);
5736
5737 /* Mark all the destination basic blocks. */
5738 for (i = 0; i < n; ++i)
5739 {
5740 basic_block label_bb = gimple_switch_label_bb (cfun, switch_stmt, i);
5741 gcc_assert (!label_bb->aux || label_bb->aux == (void *)1);
5742 label_bb->aux = (void *)1;
5743 }
5744
5745 /* Verify that the case labels are sorted. */
5746 prev = gimple_switch_label (switch_stmt, 0);
5747 for (i = 1; i < n; ++i)
5748 {
5749 tree c = gimple_switch_label (switch_stmt, i);
5750 if (!CASE_LOW (c))
5751 {
5752 error ("found default case not at the start of "
5753 "case vector");
5754 err = 1;
5755 continue;
5756 }
5757 if (CASE_LOW (prev)
5758 && !tree_int_cst_lt (CASE_LOW (prev), CASE_LOW (c)))
5759 {
5760 error ("case labels not sorted: ");
5761 print_generic_expr (stderr, prev);
5762 fprintf (stderr," is greater than ");
5763 print_generic_expr (stderr, c);
5764 fprintf (stderr," but comes before it.\n");
5765 err = 1;
5766 }
5767 prev = c;
5768 }
5769 /* VRP will remove the default case if it can prove it will
5770 never be executed. So do not verify there always exists
5771 a default case here. */
5772
5773 FOR_EACH_EDGE (e, ei, bb->succs)
5774 {
5775 if (!e->dest->aux)
5776 {
5777 error ("extra outgoing edge %d->%d",
5778 bb->index, e->dest->index);
5779 err = 1;
5780 }
5781
5782 e->dest->aux = (void *)2;
5783 if ((e->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL
5784 | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
5785 {
5786 error ("wrong outgoing edge flags at end of bb %d",
5787 bb->index);
5788 err = 1;
5789 }
5790 }
5791
5792 /* Check that we have all of them. */
5793 for (i = 0; i < n; ++i)
5794 {
5795 basic_block label_bb = gimple_switch_label_bb (cfun,
5796 switch_stmt, i);
5797
5798 if (label_bb->aux != (void *)2)
5799 {
5800 error ("missing edge %i->%i", bb->index, label_bb->index);
5801 err = 1;
5802 }
5803 }
5804
5805 FOR_EACH_EDGE (e, ei, bb->succs)
5806 e->dest->aux = (void *)0;
5807 }
5808 break;
5809
5810 case GIMPLE_EH_DISPATCH:
5811 err |= verify_eh_dispatch_edge (as_a <geh_dispatch *> (stmt));
5812 break;
5813
5814 default:
5815 break;
5816 }
5817 }
5818
5819 if (dom_info_state (CDI_DOMINATORS) >= DOM_NO_FAST_QUERY)
5820 verify_dominators (CDI_DOMINATORS);
5821
5822 return err;
5823 }
5824
5825 #if __GNUC__ >= 10
5826 # pragma GCC diagnostic pop
5827 #endif
5828
5829 /* Updates phi nodes after creating a forwarder block joined
5830 by edge FALLTHRU. */
5831
5832 static void
gimple_make_forwarder_block(edge fallthru)5833 gimple_make_forwarder_block (edge fallthru)
5834 {
5835 edge e;
5836 edge_iterator ei;
5837 basic_block dummy, bb;
5838 tree var;
5839 gphi_iterator gsi;
5840 bool forward_location_p;
5841
5842 dummy = fallthru->src;
5843 bb = fallthru->dest;
5844
5845 if (single_pred_p (bb))
5846 return;
5847
5848 /* We can forward location info if we have only one predecessor. */
5849 forward_location_p = single_pred_p (dummy);
5850
5851 /* If we redirected a branch we must create new PHI nodes at the
5852 start of BB. */
5853 for (gsi = gsi_start_phis (dummy); !gsi_end_p (gsi); gsi_next (&gsi))
5854 {
5855 gphi *phi, *new_phi;
5856
5857 phi = gsi.phi ();
5858 var = gimple_phi_result (phi);
5859 new_phi = create_phi_node (var, bb);
5860 gimple_phi_set_result (phi, copy_ssa_name (var, phi));
5861 add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
5862 forward_location_p
5863 ? gimple_phi_arg_location (phi, 0) : UNKNOWN_LOCATION);
5864 }
5865
5866 /* Add the arguments we have stored on edges. */
5867 FOR_EACH_EDGE (e, ei, bb->preds)
5868 {
5869 if (e == fallthru)
5870 continue;
5871
5872 flush_pending_stmts (e);
5873 }
5874 }
5875
5876
5877 /* Return a non-special label in the head of basic block BLOCK.
5878 Create one if it doesn't exist. */
5879
5880 tree
gimple_block_label(basic_block bb)5881 gimple_block_label (basic_block bb)
5882 {
5883 gimple_stmt_iterator i, s = gsi_start_bb (bb);
5884 bool first = true;
5885 tree label;
5886 glabel *stmt;
5887
5888 for (i = s; !gsi_end_p (i); first = false, gsi_next (&i))
5889 {
5890 stmt = dyn_cast <glabel *> (gsi_stmt (i));
5891 if (!stmt)
5892 break;
5893 label = gimple_label_label (stmt);
5894 if (!DECL_NONLOCAL (label))
5895 {
5896 if (!first)
5897 gsi_move_before (&i, &s);
5898 return label;
5899 }
5900 }
5901
5902 label = create_artificial_label (UNKNOWN_LOCATION);
5903 stmt = gimple_build_label (label);
5904 gsi_insert_before (&s, stmt, GSI_NEW_STMT);
5905 return label;
5906 }
5907
5908
5909 /* Attempt to perform edge redirection by replacing a possibly complex
5910 jump instruction by a goto or by removing the jump completely.
5911 This can apply only if all edges now point to the same block. The
5912 parameters and return values are equivalent to
5913 redirect_edge_and_branch. */
5914
5915 static edge
gimple_try_redirect_by_replacing_jump(edge e,basic_block target)5916 gimple_try_redirect_by_replacing_jump (edge e, basic_block target)
5917 {
5918 basic_block src = e->src;
5919 gimple_stmt_iterator i;
5920 gimple *stmt;
5921
5922 /* We can replace or remove a complex jump only when we have exactly
5923 two edges. */
5924 if (EDGE_COUNT (src->succs) != 2
5925 /* Verify that all targets will be TARGET. Specifically, the
5926 edge that is not E must also go to TARGET. */
5927 || EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target)
5928 return NULL;
5929
5930 i = gsi_last_bb (src);
5931 if (gsi_end_p (i))
5932 return NULL;
5933
5934 stmt = gsi_stmt (i);
5935
5936 if (gimple_code (stmt) == GIMPLE_COND || gimple_code (stmt) == GIMPLE_SWITCH)
5937 {
5938 gsi_remove (&i, true);
5939 e = ssa_redirect_edge (e, target);
5940 e->flags = EDGE_FALLTHRU;
5941 return e;
5942 }
5943
5944 return NULL;
5945 }
5946
5947
5948 /* Redirect E to DEST. Return NULL on failure. Otherwise, return the
5949 edge representing the redirected branch. */
5950
5951 static edge
gimple_redirect_edge_and_branch(edge e,basic_block dest)5952 gimple_redirect_edge_and_branch (edge e, basic_block dest)
5953 {
5954 basic_block bb = e->src;
5955 gimple_stmt_iterator gsi;
5956 edge ret;
5957 gimple *stmt;
5958
5959 if (e->flags & EDGE_ABNORMAL)
5960 return NULL;
5961
5962 if (e->dest == dest)
5963 return NULL;
5964
5965 if (e->flags & EDGE_EH)
5966 return redirect_eh_edge (e, dest);
5967
5968 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
5969 {
5970 ret = gimple_try_redirect_by_replacing_jump (e, dest);
5971 if (ret)
5972 return ret;
5973 }
5974
5975 gsi = gsi_last_nondebug_bb (bb);
5976 stmt = gsi_end_p (gsi) ? NULL : gsi_stmt (gsi);
5977
5978 switch (stmt ? gimple_code (stmt) : GIMPLE_ERROR_MARK)
5979 {
5980 case GIMPLE_COND:
5981 /* For COND_EXPR, we only need to redirect the edge. */
5982 break;
5983
5984 case GIMPLE_GOTO:
5985 /* No non-abnormal edges should lead from a non-simple goto, and
5986 simple ones should be represented implicitly. */
5987 gcc_unreachable ();
5988
5989 case GIMPLE_SWITCH:
5990 {
5991 gswitch *switch_stmt = as_a <gswitch *> (stmt);
5992 tree label = gimple_block_label (dest);
5993 tree cases = get_cases_for_edge (e, switch_stmt);
5994
5995 /* If we have a list of cases associated with E, then use it
5996 as it's a lot faster than walking the entire case vector. */
5997 if (cases)
5998 {
5999 edge e2 = find_edge (e->src, dest);
6000 tree last, first;
6001
6002 first = cases;
6003 while (cases)
6004 {
6005 last = cases;
6006 CASE_LABEL (cases) = label;
6007 cases = CASE_CHAIN (cases);
6008 }
6009
6010 /* If there was already an edge in the CFG, then we need
6011 to move all the cases associated with E to E2. */
6012 if (e2)
6013 {
6014 tree cases2 = get_cases_for_edge (e2, switch_stmt);
6015
6016 CASE_CHAIN (last) = CASE_CHAIN (cases2);
6017 CASE_CHAIN (cases2) = first;
6018 }
6019 bitmap_set_bit (touched_switch_bbs, gimple_bb (stmt)->index);
6020 }
6021 else
6022 {
6023 size_t i, n = gimple_switch_num_labels (switch_stmt);
6024
6025 for (i = 0; i < n; i++)
6026 {
6027 tree elt = gimple_switch_label (switch_stmt, i);
6028 if (label_to_block (cfun, CASE_LABEL (elt)) == e->dest)
6029 CASE_LABEL (elt) = label;
6030 }
6031 }
6032 }
6033 break;
6034
6035 case GIMPLE_ASM:
6036 {
6037 gasm *asm_stmt = as_a <gasm *> (stmt);
6038 int i, n = gimple_asm_nlabels (asm_stmt);
6039 tree label = NULL;
6040
6041 for (i = 0; i < n; ++i)
6042 {
6043 tree cons = gimple_asm_label_op (asm_stmt, i);
6044 if (label_to_block (cfun, TREE_VALUE (cons)) == e->dest)
6045 {
6046 if (!label)
6047 label = gimple_block_label (dest);
6048 TREE_VALUE (cons) = label;
6049 }
6050 }
6051
6052 /* If we didn't find any label matching the former edge in the
6053 asm labels, we must be redirecting the fallthrough
6054 edge. */
6055 gcc_assert (label || (e->flags & EDGE_FALLTHRU));
6056 }
6057 break;
6058
6059 case GIMPLE_RETURN:
6060 gsi_remove (&gsi, true);
6061 e->flags |= EDGE_FALLTHRU;
6062 break;
6063
6064 case GIMPLE_OMP_RETURN:
6065 case GIMPLE_OMP_CONTINUE:
6066 case GIMPLE_OMP_SECTIONS_SWITCH:
6067 case GIMPLE_OMP_FOR:
6068 /* The edges from OMP constructs can be simply redirected. */
6069 break;
6070
6071 case GIMPLE_EH_DISPATCH:
6072 if (!(e->flags & EDGE_FALLTHRU))
6073 redirect_eh_dispatch_edge (as_a <geh_dispatch *> (stmt), e, dest);
6074 break;
6075
6076 case GIMPLE_TRANSACTION:
6077 if (e->flags & EDGE_TM_ABORT)
6078 gimple_transaction_set_label_over (as_a <gtransaction *> (stmt),
6079 gimple_block_label (dest));
6080 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
6081 gimple_transaction_set_label_uninst (as_a <gtransaction *> (stmt),
6082 gimple_block_label (dest));
6083 else
6084 gimple_transaction_set_label_norm (as_a <gtransaction *> (stmt),
6085 gimple_block_label (dest));
6086 break;
6087
6088 default:
6089 /* Otherwise it must be a fallthru edge, and we don't need to
6090 do anything besides redirecting it. */
6091 gcc_assert (e->flags & EDGE_FALLTHRU);
6092 break;
6093 }
6094
6095 /* Update/insert PHI nodes as necessary. */
6096
6097 /* Now update the edges in the CFG. */
6098 e = ssa_redirect_edge (e, dest);
6099
6100 return e;
6101 }
6102
6103 /* Returns true if it is possible to remove edge E by redirecting
6104 it to the destination of the other edge from E->src. */
6105
6106 static bool
gimple_can_remove_branch_p(const_edge e)6107 gimple_can_remove_branch_p (const_edge e)
6108 {
6109 if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
6110 return false;
6111
6112 return true;
6113 }
6114
6115 /* Simple wrapper, as we can always redirect fallthru edges. */
6116
6117 static basic_block
gimple_redirect_edge_and_branch_force(edge e,basic_block dest)6118 gimple_redirect_edge_and_branch_force (edge e, basic_block dest)
6119 {
6120 e = gimple_redirect_edge_and_branch (e, dest);
6121 gcc_assert (e);
6122
6123 return NULL;
6124 }
6125
6126
6127 /* Splits basic block BB after statement STMT (but at least after the
6128 labels). If STMT is NULL, BB is split just after the labels. */
6129
6130 static basic_block
gimple_split_block(basic_block bb,void * stmt)6131 gimple_split_block (basic_block bb, void *stmt)
6132 {
6133 gimple_stmt_iterator gsi;
6134 gimple_stmt_iterator gsi_tgt;
6135 gimple_seq list;
6136 basic_block new_bb;
6137 edge e;
6138 edge_iterator ei;
6139
6140 new_bb = create_empty_bb (bb);
6141
6142 /* Redirect the outgoing edges. */
6143 new_bb->succs = bb->succs;
6144 bb->succs = NULL;
6145 FOR_EACH_EDGE (e, ei, new_bb->succs)
6146 e->src = new_bb;
6147
6148 /* Get a stmt iterator pointing to the first stmt to move. */
6149 if (!stmt || gimple_code ((gimple *) stmt) == GIMPLE_LABEL)
6150 gsi = gsi_after_labels (bb);
6151 else
6152 {
6153 gsi = gsi_for_stmt ((gimple *) stmt);
6154 gsi_next (&gsi);
6155 }
6156
6157 /* Move everything from GSI to the new basic block. */
6158 if (gsi_end_p (gsi))
6159 return new_bb;
6160
6161 /* Split the statement list - avoid re-creating new containers as this
6162 brings ugly quadratic memory consumption in the inliner.
6163 (We are still quadratic since we need to update stmt BB pointers,
6164 sadly.) */
6165 gsi_split_seq_before (&gsi, &list);
6166 set_bb_seq (new_bb, list);
6167 for (gsi_tgt = gsi_start (list);
6168 !gsi_end_p (gsi_tgt); gsi_next (&gsi_tgt))
6169 gimple_set_bb (gsi_stmt (gsi_tgt), new_bb);
6170
6171 return new_bb;
6172 }
6173
6174
6175 /* Moves basic block BB after block AFTER. */
6176
6177 static bool
gimple_move_block_after(basic_block bb,basic_block after)6178 gimple_move_block_after (basic_block bb, basic_block after)
6179 {
6180 if (bb->prev_bb == after)
6181 return true;
6182
6183 unlink_block (bb);
6184 link_block (bb, after);
6185
6186 return true;
6187 }
6188
6189
6190 /* Return TRUE if block BB has no executable statements, otherwise return
6191 FALSE. */
6192
6193 static bool
gimple_empty_block_p(basic_block bb)6194 gimple_empty_block_p (basic_block bb)
6195 {
6196 /* BB must have no executable statements. */
6197 gimple_stmt_iterator gsi = gsi_after_labels (bb);
6198 if (phi_nodes (bb))
6199 return false;
6200 while (!gsi_end_p (gsi))
6201 {
6202 gimple *stmt = gsi_stmt (gsi);
6203 if (is_gimple_debug (stmt))
6204 ;
6205 else if (gimple_code (stmt) == GIMPLE_NOP
6206 || gimple_code (stmt) == GIMPLE_PREDICT)
6207 ;
6208 else
6209 return false;
6210 gsi_next (&gsi);
6211 }
6212 return true;
6213 }
6214
6215
6216 /* Split a basic block if it ends with a conditional branch and if the
6217 other part of the block is not empty. */
6218
6219 static basic_block
gimple_split_block_before_cond_jump(basic_block bb)6220 gimple_split_block_before_cond_jump (basic_block bb)
6221 {
6222 gimple *last, *split_point;
6223 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
6224 if (gsi_end_p (gsi))
6225 return NULL;
6226 last = gsi_stmt (gsi);
6227 if (gimple_code (last) != GIMPLE_COND
6228 && gimple_code (last) != GIMPLE_SWITCH)
6229 return NULL;
6230 gsi_prev (&gsi);
6231 split_point = gsi_stmt (gsi);
6232 return split_block (bb, split_point)->dest;
6233 }
6234
6235
6236 /* Return true if basic_block can be duplicated. */
6237
6238 static bool
gimple_can_duplicate_bb_p(const_basic_block bb)6239 gimple_can_duplicate_bb_p (const_basic_block bb)
6240 {
6241 gimple *last = last_stmt (CONST_CAST_BB (bb));
6242
6243 /* Do checks that can only fail for the last stmt, to minimize the work in the
6244 stmt loop. */
6245 if (last) {
6246 /* A transaction is a single entry multiple exit region. It
6247 must be duplicated in its entirety or not at all. */
6248 if (gimple_code (last) == GIMPLE_TRANSACTION)
6249 return false;
6250
6251 /* An IFN_UNIQUE call must be duplicated as part of its group,
6252 or not at all. */
6253 if (is_gimple_call (last)
6254 && gimple_call_internal_p (last)
6255 && gimple_call_internal_unique_p (last))
6256 return false;
6257 }
6258
6259 for (gimple_stmt_iterator gsi = gsi_start_bb (CONST_CAST_BB (bb));
6260 !gsi_end_p (gsi); gsi_next (&gsi))
6261 {
6262 gimple *g = gsi_stmt (gsi);
6263
6264 /* An IFN_GOMP_SIMT_ENTER_ALLOC/IFN_GOMP_SIMT_EXIT call must be
6265 duplicated as part of its group, or not at all.
6266 The IFN_GOMP_SIMT_VOTE_ANY and IFN_GOMP_SIMT_XCHG_* are part of such a
6267 group, so the same holds there. */
6268 if (is_gimple_call (g)
6269 && (gimple_call_internal_p (g, IFN_GOMP_SIMT_ENTER_ALLOC)
6270 || gimple_call_internal_p (g, IFN_GOMP_SIMT_EXIT)
6271 || gimple_call_internal_p (g, IFN_GOMP_SIMT_VOTE_ANY)
6272 || gimple_call_internal_p (g, IFN_GOMP_SIMT_XCHG_BFLY)
6273 || gimple_call_internal_p (g, IFN_GOMP_SIMT_XCHG_IDX)))
6274 return false;
6275 }
6276
6277 return true;
6278 }
6279
6280 /* Create a duplicate of the basic block BB. NOTE: This does not
6281 preserve SSA form. */
6282
6283 static basic_block
gimple_duplicate_bb(basic_block bb,copy_bb_data * id)6284 gimple_duplicate_bb (basic_block bb, copy_bb_data *id)
6285 {
6286 basic_block new_bb;
6287 gimple_stmt_iterator gsi_tgt;
6288
6289 new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
6290
6291 /* Copy the PHI nodes. We ignore PHI node arguments here because
6292 the incoming edges have not been setup yet. */
6293 for (gphi_iterator gpi = gsi_start_phis (bb);
6294 !gsi_end_p (gpi);
6295 gsi_next (&gpi))
6296 {
6297 gphi *phi, *copy;
6298 phi = gpi.phi ();
6299 copy = create_phi_node (NULL_TREE, new_bb);
6300 create_new_def_for (gimple_phi_result (phi), copy,
6301 gimple_phi_result_ptr (copy));
6302 gimple_set_uid (copy, gimple_uid (phi));
6303 }
6304
6305 gsi_tgt = gsi_start_bb (new_bb);
6306 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
6307 !gsi_end_p (gsi);
6308 gsi_next (&gsi))
6309 {
6310 def_operand_p def_p;
6311 ssa_op_iter op_iter;
6312 tree lhs;
6313 gimple *stmt, *copy;
6314
6315 stmt = gsi_stmt (gsi);
6316 if (gimple_code (stmt) == GIMPLE_LABEL)
6317 continue;
6318
6319 /* Don't duplicate label debug stmts. */
6320 if (gimple_debug_bind_p (stmt)
6321 && TREE_CODE (gimple_debug_bind_get_var (stmt))
6322 == LABEL_DECL)
6323 continue;
6324
6325 /* Create a new copy of STMT and duplicate STMT's virtual
6326 operands. */
6327 copy = gimple_copy (stmt);
6328 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
6329
6330 maybe_duplicate_eh_stmt (copy, stmt);
6331 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
6332
6333 /* When copying around a stmt writing into a local non-user
6334 aggregate, make sure it won't share stack slot with other
6335 vars. */
6336 lhs = gimple_get_lhs (stmt);
6337 if (lhs && TREE_CODE (lhs) != SSA_NAME)
6338 {
6339 tree base = get_base_address (lhs);
6340 if (base
6341 && (VAR_P (base) || TREE_CODE (base) == RESULT_DECL)
6342 && DECL_IGNORED_P (base)
6343 && !TREE_STATIC (base)
6344 && !DECL_EXTERNAL (base)
6345 && (!VAR_P (base) || !DECL_HAS_VALUE_EXPR_P (base)))
6346 DECL_NONSHAREABLE (base) = 1;
6347 }
6348
6349 /* If requested remap dependence info of cliques brought in
6350 via inlining. */
6351 if (id)
6352 for (unsigned i = 0; i < gimple_num_ops (copy); ++i)
6353 {
6354 tree op = gimple_op (copy, i);
6355 if (!op)
6356 continue;
6357 if (TREE_CODE (op) == ADDR_EXPR
6358 || TREE_CODE (op) == WITH_SIZE_EXPR)
6359 op = TREE_OPERAND (op, 0);
6360 while (handled_component_p (op))
6361 op = TREE_OPERAND (op, 0);
6362 if ((TREE_CODE (op) == MEM_REF
6363 || TREE_CODE (op) == TARGET_MEM_REF)
6364 && MR_DEPENDENCE_CLIQUE (op) > 1
6365 && MR_DEPENDENCE_CLIQUE (op) != bb->loop_father->owned_clique)
6366 {
6367 if (!id->dependence_map)
6368 id->dependence_map = new hash_map<dependence_hash,
6369 unsigned short>;
6370 bool existed;
6371 unsigned short &newc = id->dependence_map->get_or_insert
6372 (MR_DEPENDENCE_CLIQUE (op), &existed);
6373 if (!existed)
6374 {
6375 gcc_assert (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
6376 newc = ++cfun->last_clique;
6377 }
6378 MR_DEPENDENCE_CLIQUE (op) = newc;
6379 }
6380 }
6381
6382 /* Create new names for all the definitions created by COPY and
6383 add replacement mappings for each new name. */
6384 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
6385 create_new_def_for (DEF_FROM_PTR (def_p), copy, def_p);
6386 }
6387
6388 return new_bb;
6389 }
6390
6391 /* Adds phi node arguments for edge E_COPY after basic block duplication. */
6392
6393 static void
add_phi_args_after_copy_edge(edge e_copy)6394 add_phi_args_after_copy_edge (edge e_copy)
6395 {
6396 basic_block bb, bb_copy = e_copy->src, dest;
6397 edge e;
6398 edge_iterator ei;
6399 gphi *phi, *phi_copy;
6400 tree def;
6401 gphi_iterator psi, psi_copy;
6402
6403 if (gimple_seq_empty_p (phi_nodes (e_copy->dest)))
6404 return;
6405
6406 bb = bb_copy->flags & BB_DUPLICATED ? get_bb_original (bb_copy) : bb_copy;
6407
6408 if (e_copy->dest->flags & BB_DUPLICATED)
6409 dest = get_bb_original (e_copy->dest);
6410 else
6411 dest = e_copy->dest;
6412
6413 e = find_edge (bb, dest);
6414 if (!e)
6415 {
6416 /* During loop unrolling the target of the latch edge is copied.
6417 In this case we are not looking for edge to dest, but to
6418 duplicated block whose original was dest. */
6419 FOR_EACH_EDGE (e, ei, bb->succs)
6420 {
6421 if ((e->dest->flags & BB_DUPLICATED)
6422 && get_bb_original (e->dest) == dest)
6423 break;
6424 }
6425
6426 gcc_assert (e != NULL);
6427 }
6428
6429 for (psi = gsi_start_phis (e->dest),
6430 psi_copy = gsi_start_phis (e_copy->dest);
6431 !gsi_end_p (psi);
6432 gsi_next (&psi), gsi_next (&psi_copy))
6433 {
6434 phi = psi.phi ();
6435 phi_copy = psi_copy.phi ();
6436 def = PHI_ARG_DEF_FROM_EDGE (phi, e);
6437 add_phi_arg (phi_copy, def, e_copy,
6438 gimple_phi_arg_location_from_edge (phi, e));
6439 }
6440 }
6441
6442
6443 /* Basic block BB_COPY was created by code duplication. Add phi node
6444 arguments for edges going out of BB_COPY. The blocks that were
6445 duplicated have BB_DUPLICATED set. */
6446
6447 void
add_phi_args_after_copy_bb(basic_block bb_copy)6448 add_phi_args_after_copy_bb (basic_block bb_copy)
6449 {
6450 edge e_copy;
6451 edge_iterator ei;
6452
6453 FOR_EACH_EDGE (e_copy, ei, bb_copy->succs)
6454 {
6455 add_phi_args_after_copy_edge (e_copy);
6456 }
6457 }
6458
6459 /* Blocks in REGION_COPY array of length N_REGION were created by
6460 duplication of basic blocks. Add phi node arguments for edges
6461 going from these blocks. If E_COPY is not NULL, also add
6462 phi node arguments for its destination.*/
6463
6464 void
add_phi_args_after_copy(basic_block * region_copy,unsigned n_region,edge e_copy)6465 add_phi_args_after_copy (basic_block *region_copy, unsigned n_region,
6466 edge e_copy)
6467 {
6468 unsigned i;
6469
6470 for (i = 0; i < n_region; i++)
6471 region_copy[i]->flags |= BB_DUPLICATED;
6472
6473 for (i = 0; i < n_region; i++)
6474 add_phi_args_after_copy_bb (region_copy[i]);
6475 if (e_copy)
6476 add_phi_args_after_copy_edge (e_copy);
6477
6478 for (i = 0; i < n_region; i++)
6479 region_copy[i]->flags &= ~BB_DUPLICATED;
6480 }
6481
6482 /* Duplicates a REGION (set of N_REGION basic blocks) with just a single
6483 important exit edge EXIT. By important we mean that no SSA name defined
6484 inside region is live over the other exit edges of the region. All entry
6485 edges to the region must go to ENTRY->dest. The edge ENTRY is redirected
6486 to the duplicate of the region. Dominance and loop information is
6487 updated if UPDATE_DOMINANCE is true, but not the SSA web. If
6488 UPDATE_DOMINANCE is false then we assume that the caller will update the
6489 dominance information after calling this function. The new basic
6490 blocks are stored to REGION_COPY in the same order as they had in REGION,
6491 provided that REGION_COPY is not NULL.
6492 The function returns false if it is unable to copy the region,
6493 true otherwise. */
6494
6495 bool
gimple_duplicate_sese_region(edge entry,edge exit,basic_block * region,unsigned n_region,basic_block * region_copy,bool update_dominance)6496 gimple_duplicate_sese_region (edge entry, edge exit,
6497 basic_block *region, unsigned n_region,
6498 basic_block *region_copy,
6499 bool update_dominance)
6500 {
6501 unsigned i;
6502 bool free_region_copy = false, copying_header = false;
6503 class loop *loop = entry->dest->loop_father;
6504 edge exit_copy;
6505 vec<basic_block> doms = vNULL;
6506 edge redirected;
6507 profile_count total_count = profile_count::uninitialized ();
6508 profile_count entry_count = profile_count::uninitialized ();
6509
6510 if (!can_copy_bbs_p (region, n_region))
6511 return false;
6512
6513 /* Some sanity checking. Note that we do not check for all possible
6514 missuses of the functions. I.e. if you ask to copy something weird,
6515 it will work, but the state of structures probably will not be
6516 correct. */
6517 for (i = 0; i < n_region; i++)
6518 {
6519 /* We do not handle subloops, i.e. all the blocks must belong to the
6520 same loop. */
6521 if (region[i]->loop_father != loop)
6522 return false;
6523
6524 if (region[i] != entry->dest
6525 && region[i] == loop->header)
6526 return false;
6527 }
6528
6529 /* In case the function is used for loop header copying (which is the primary
6530 use), ensure that EXIT and its copy will be new latch and entry edges. */
6531 if (loop->header == entry->dest)
6532 {
6533 copying_header = true;
6534
6535 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
6536 return false;
6537
6538 for (i = 0; i < n_region; i++)
6539 if (region[i] != exit->src
6540 && dominated_by_p (CDI_DOMINATORS, region[i], exit->src))
6541 return false;
6542 }
6543
6544 initialize_original_copy_tables ();
6545
6546 if (copying_header)
6547 set_loop_copy (loop, loop_outer (loop));
6548 else
6549 set_loop_copy (loop, loop);
6550
6551 if (!region_copy)
6552 {
6553 region_copy = XNEWVEC (basic_block, n_region);
6554 free_region_copy = true;
6555 }
6556
6557 /* Record blocks outside the region that are dominated by something
6558 inside. */
6559 if (update_dominance)
6560 {
6561 doms.create (0);
6562 doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
6563 }
6564
6565 if (entry->dest->count.initialized_p ())
6566 {
6567 total_count = entry->dest->count;
6568 entry_count = entry->count ();
6569 /* Fix up corner cases, to avoid division by zero or creation of negative
6570 frequencies. */
6571 if (entry_count > total_count)
6572 entry_count = total_count;
6573 }
6574
6575 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
6576 split_edge_bb_loc (entry), update_dominance);
6577 if (total_count.initialized_p () && entry_count.initialized_p ())
6578 {
6579 scale_bbs_frequencies_profile_count (region, n_region,
6580 total_count - entry_count,
6581 total_count);
6582 scale_bbs_frequencies_profile_count (region_copy, n_region, entry_count,
6583 total_count);
6584 }
6585
6586 if (copying_header)
6587 {
6588 loop->header = exit->dest;
6589 loop->latch = exit->src;
6590 }
6591
6592 /* Redirect the entry and add the phi node arguments. */
6593 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
6594 gcc_assert (redirected != NULL);
6595 flush_pending_stmts (entry);
6596
6597 /* Concerning updating of dominators: We must recount dominators
6598 for entry block and its copy. Anything that is outside of the
6599 region, but was dominated by something inside needs recounting as
6600 well. */
6601 if (update_dominance)
6602 {
6603 set_immediate_dominator (CDI_DOMINATORS, entry->dest, entry->src);
6604 doms.safe_push (get_bb_original (entry->dest));
6605 iterate_fix_dominators (CDI_DOMINATORS, doms, false);
6606 doms.release ();
6607 }
6608
6609 /* Add the other PHI node arguments. */
6610 add_phi_args_after_copy (region_copy, n_region, NULL);
6611
6612 if (free_region_copy)
6613 free (region_copy);
6614
6615 free_original_copy_tables ();
6616 return true;
6617 }
6618
6619 /* Checks if BB is part of the region defined by N_REGION BBS. */
6620 static bool
bb_part_of_region_p(basic_block bb,basic_block * bbs,unsigned n_region)6621 bb_part_of_region_p (basic_block bb, basic_block* bbs, unsigned n_region)
6622 {
6623 unsigned int n;
6624
6625 for (n = 0; n < n_region; n++)
6626 {
6627 if (bb == bbs[n])
6628 return true;
6629 }
6630 return false;
6631 }
6632
6633 /* Duplicates REGION consisting of N_REGION blocks. The new blocks
6634 are stored to REGION_COPY in the same order in that they appear
6635 in REGION, if REGION_COPY is not NULL. ENTRY is the entry to
6636 the region, EXIT an exit from it. The condition guarding EXIT
6637 is moved to ENTRY. Returns true if duplication succeeds, false
6638 otherwise.
6639
6640 For example,
6641
6642 some_code;
6643 if (cond)
6644 A;
6645 else
6646 B;
6647
6648 is transformed to
6649
6650 if (cond)
6651 {
6652 some_code;
6653 A;
6654 }
6655 else
6656 {
6657 some_code;
6658 B;
6659 }
6660 */
6661
6662 bool
gimple_duplicate_sese_tail(edge entry,edge exit,basic_block * region,unsigned n_region,basic_block * region_copy)6663 gimple_duplicate_sese_tail (edge entry, edge exit,
6664 basic_block *region, unsigned n_region,
6665 basic_block *region_copy)
6666 {
6667 unsigned i;
6668 bool free_region_copy = false;
6669 class loop *loop = exit->dest->loop_father;
6670 class loop *orig_loop = entry->dest->loop_father;
6671 basic_block switch_bb, entry_bb, nentry_bb;
6672 vec<basic_block> doms;
6673 profile_count total_count = profile_count::uninitialized (),
6674 exit_count = profile_count::uninitialized ();
6675 edge exits[2], nexits[2], e;
6676 gimple_stmt_iterator gsi;
6677 gimple *cond_stmt;
6678 edge sorig, snew;
6679 basic_block exit_bb;
6680 gphi_iterator psi;
6681 gphi *phi;
6682 tree def;
6683 class loop *target, *aloop, *cloop;
6684
6685 gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
6686 exits[0] = exit;
6687 exits[1] = EDGE_SUCC (exit->src, EDGE_SUCC (exit->src, 0) == exit);
6688
6689 if (!can_copy_bbs_p (region, n_region))
6690 return false;
6691
6692 initialize_original_copy_tables ();
6693 set_loop_copy (orig_loop, loop);
6694
6695 target= loop;
6696 for (aloop = orig_loop->inner; aloop; aloop = aloop->next)
6697 {
6698 if (bb_part_of_region_p (aloop->header, region, n_region))
6699 {
6700 cloop = duplicate_loop (aloop, target);
6701 duplicate_subloops (aloop, cloop);
6702 }
6703 }
6704
6705 if (!region_copy)
6706 {
6707 region_copy = XNEWVEC (basic_block, n_region);
6708 free_region_copy = true;
6709 }
6710
6711 gcc_assert (!need_ssa_update_p (cfun));
6712
6713 /* Record blocks outside the region that are dominated by something
6714 inside. */
6715 doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
6716
6717 total_count = exit->src->count;
6718 exit_count = exit->count ();
6719 /* Fix up corner cases, to avoid division by zero or creation of negative
6720 frequencies. */
6721 if (exit_count > total_count)
6722 exit_count = total_count;
6723
6724 copy_bbs (region, n_region, region_copy, exits, 2, nexits, orig_loop,
6725 split_edge_bb_loc (exit), true);
6726 if (total_count.initialized_p () && exit_count.initialized_p ())
6727 {
6728 scale_bbs_frequencies_profile_count (region, n_region,
6729 total_count - exit_count,
6730 total_count);
6731 scale_bbs_frequencies_profile_count (region_copy, n_region, exit_count,
6732 total_count);
6733 }
6734
6735 /* Create the switch block, and put the exit condition to it. */
6736 entry_bb = entry->dest;
6737 nentry_bb = get_bb_copy (entry_bb);
6738 if (!last_stmt (entry->src)
6739 || !stmt_ends_bb_p (last_stmt (entry->src)))
6740 switch_bb = entry->src;
6741 else
6742 switch_bb = split_edge (entry);
6743 set_immediate_dominator (CDI_DOMINATORS, nentry_bb, switch_bb);
6744
6745 gsi = gsi_last_bb (switch_bb);
6746 cond_stmt = last_stmt (exit->src);
6747 gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND);
6748 cond_stmt = gimple_copy (cond_stmt);
6749
6750 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
6751
6752 sorig = single_succ_edge (switch_bb);
6753 sorig->flags = exits[1]->flags;
6754 sorig->probability = exits[1]->probability;
6755 snew = make_edge (switch_bb, nentry_bb, exits[0]->flags);
6756 snew->probability = exits[0]->probability;
6757
6758
6759 /* Register the new edge from SWITCH_BB in loop exit lists. */
6760 rescan_loop_exit (snew, true, false);
6761
6762 /* Add the PHI node arguments. */
6763 add_phi_args_after_copy (region_copy, n_region, snew);
6764
6765 /* Get rid of now superfluous conditions and associated edges (and phi node
6766 arguments). */
6767 exit_bb = exit->dest;
6768
6769 e = redirect_edge_and_branch (exits[0], exits[1]->dest);
6770 PENDING_STMT (e) = NULL;
6771
6772 /* The latch of ORIG_LOOP was copied, and so was the backedge
6773 to the original header. We redirect this backedge to EXIT_BB. */
6774 for (i = 0; i < n_region; i++)
6775 if (get_bb_original (region_copy[i]) == orig_loop->latch)
6776 {
6777 gcc_assert (single_succ_edge (region_copy[i]));
6778 e = redirect_edge_and_branch (single_succ_edge (region_copy[i]), exit_bb);
6779 PENDING_STMT (e) = NULL;
6780 for (psi = gsi_start_phis (exit_bb);
6781 !gsi_end_p (psi);
6782 gsi_next (&psi))
6783 {
6784 phi = psi.phi ();
6785 def = PHI_ARG_DEF (phi, nexits[0]->dest_idx);
6786 add_phi_arg (phi, def, e, gimple_phi_arg_location_from_edge (phi, e));
6787 }
6788 }
6789 e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
6790 PENDING_STMT (e) = NULL;
6791
6792 /* Anything that is outside of the region, but was dominated by something
6793 inside needs to update dominance info. */
6794 iterate_fix_dominators (CDI_DOMINATORS, doms, false);
6795 doms.release ();
6796 /* Update the SSA web. */
6797 update_ssa (TODO_update_ssa);
6798
6799 if (free_region_copy)
6800 free (region_copy);
6801
6802 free_original_copy_tables ();
6803 return true;
6804 }
6805
6806 /* Add all the blocks dominated by ENTRY to the array BBS_P. Stop
6807 adding blocks when the dominator traversal reaches EXIT. This
6808 function silently assumes that ENTRY strictly dominates EXIT. */
6809
6810 void
gather_blocks_in_sese_region(basic_block entry,basic_block exit,vec<basic_block> * bbs_p)6811 gather_blocks_in_sese_region (basic_block entry, basic_block exit,
6812 vec<basic_block> *bbs_p)
6813 {
6814 basic_block son;
6815
6816 for (son = first_dom_son (CDI_DOMINATORS, entry);
6817 son;
6818 son = next_dom_son (CDI_DOMINATORS, son))
6819 {
6820 bbs_p->safe_push (son);
6821 if (son != exit)
6822 gather_blocks_in_sese_region (son, exit, bbs_p);
6823 }
6824 }
6825
6826 /* Replaces *TP with a duplicate (belonging to function TO_CONTEXT).
6827 The duplicates are recorded in VARS_MAP. */
6828
6829 static void
replace_by_duplicate_decl(tree * tp,hash_map<tree,tree> * vars_map,tree to_context)6830 replace_by_duplicate_decl (tree *tp, hash_map<tree, tree> *vars_map,
6831 tree to_context)
6832 {
6833 tree t = *tp, new_t;
6834 struct function *f = DECL_STRUCT_FUNCTION (to_context);
6835
6836 if (DECL_CONTEXT (t) == to_context)
6837 return;
6838
6839 bool existed;
6840 tree &loc = vars_map->get_or_insert (t, &existed);
6841
6842 if (!existed)
6843 {
6844 if (SSA_VAR_P (t))
6845 {
6846 new_t = copy_var_decl (t, DECL_NAME (t), TREE_TYPE (t));
6847 add_local_decl (f, new_t);
6848 }
6849 else
6850 {
6851 gcc_assert (TREE_CODE (t) == CONST_DECL);
6852 new_t = copy_node (t);
6853 }
6854 DECL_CONTEXT (new_t) = to_context;
6855
6856 loc = new_t;
6857 }
6858 else
6859 new_t = loc;
6860
6861 *tp = new_t;
6862 }
6863
6864
6865 /* Creates an ssa name in TO_CONTEXT equivalent to NAME.
6866 VARS_MAP maps old ssa names and var_decls to the new ones. */
6867
6868 static tree
replace_ssa_name(tree name,hash_map<tree,tree> * vars_map,tree to_context)6869 replace_ssa_name (tree name, hash_map<tree, tree> *vars_map,
6870 tree to_context)
6871 {
6872 tree new_name;
6873
6874 gcc_assert (!virtual_operand_p (name));
6875
6876 tree *loc = vars_map->get (name);
6877
6878 if (!loc)
6879 {
6880 tree decl = SSA_NAME_VAR (name);
6881 if (decl)
6882 {
6883 gcc_assert (!SSA_NAME_IS_DEFAULT_DEF (name));
6884 replace_by_duplicate_decl (&decl, vars_map, to_context);
6885 new_name = make_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
6886 decl, SSA_NAME_DEF_STMT (name));
6887 }
6888 else
6889 new_name = copy_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
6890 name, SSA_NAME_DEF_STMT (name));
6891
6892 /* Now that we've used the def stmt to define new_name, make sure it
6893 doesn't define name anymore. */
6894 SSA_NAME_DEF_STMT (name) = NULL;
6895
6896 vars_map->put (name, new_name);
6897 }
6898 else
6899 new_name = *loc;
6900
6901 return new_name;
6902 }
6903
6904 struct move_stmt_d
6905 {
6906 tree orig_block;
6907 tree new_block;
6908 tree from_context;
6909 tree to_context;
6910 hash_map<tree, tree> *vars_map;
6911 htab_t new_label_map;
6912 hash_map<void *, void *> *eh_map;
6913 bool remap_decls_p;
6914 };
6915
6916 /* Helper for move_block_to_fn. Set TREE_BLOCK in every expression
6917 contained in *TP if it has been ORIG_BLOCK previously and change the
6918 DECL_CONTEXT of every local variable referenced in *TP. */
6919
6920 static tree
move_stmt_op(tree * tp,int * walk_subtrees,void * data)6921 move_stmt_op (tree *tp, int *walk_subtrees, void *data)
6922 {
6923 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6924 struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
6925 tree t = *tp;
6926
6927 if (EXPR_P (t))
6928 {
6929 tree block = TREE_BLOCK (t);
6930 if (block == NULL_TREE)
6931 ;
6932 else if (block == p->orig_block
6933 || p->orig_block == NULL_TREE)
6934 {
6935 /* tree_node_can_be_shared says we can share invariant
6936 addresses but unshare_expr copies them anyways. Make sure
6937 to unshare before adjusting the block in place - we do not
6938 always see a copy here. */
6939 if (TREE_CODE (t) == ADDR_EXPR
6940 && is_gimple_min_invariant (t))
6941 *tp = t = unshare_expr (t);
6942 TREE_SET_BLOCK (t, p->new_block);
6943 }
6944 else if (flag_checking)
6945 {
6946 while (block && TREE_CODE (block) == BLOCK && block != p->orig_block)
6947 block = BLOCK_SUPERCONTEXT (block);
6948 gcc_assert (block == p->orig_block);
6949 }
6950 }
6951 else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
6952 {
6953 if (TREE_CODE (t) == SSA_NAME)
6954 *tp = replace_ssa_name (t, p->vars_map, p->to_context);
6955 else if (TREE_CODE (t) == PARM_DECL
6956 && gimple_in_ssa_p (cfun))
6957 *tp = *(p->vars_map->get (t));
6958 else if (TREE_CODE (t) == LABEL_DECL)
6959 {
6960 if (p->new_label_map)
6961 {
6962 struct tree_map in, *out;
6963 in.base.from = t;
6964 out = (struct tree_map *)
6965 htab_find_with_hash (p->new_label_map, &in, DECL_UID (t));
6966 if (out)
6967 *tp = t = out->to;
6968 }
6969
6970 /* For FORCED_LABELs we can end up with references from other
6971 functions if some SESE regions are outlined. It is UB to
6972 jump in between them, but they could be used just for printing
6973 addresses etc. In that case, DECL_CONTEXT on the label should
6974 be the function containing the glabel stmt with that LABEL_DECL,
6975 rather than whatever function a reference to the label was seen
6976 last time. */
6977 if (!FORCED_LABEL (t) && !DECL_NONLOCAL (t))
6978 DECL_CONTEXT (t) = p->to_context;
6979 }
6980 else if (p->remap_decls_p)
6981 {
6982 /* Replace T with its duplicate. T should no longer appear in the
6983 parent function, so this looks wasteful; however, it may appear
6984 in referenced_vars, and more importantly, as virtual operands of
6985 statements, and in alias lists of other variables. It would be
6986 quite difficult to expunge it from all those places. ??? It might
6987 suffice to do this for addressable variables. */
6988 if ((VAR_P (t) && !is_global_var (t))
6989 || TREE_CODE (t) == CONST_DECL)
6990 replace_by_duplicate_decl (tp, p->vars_map, p->to_context);
6991 }
6992 *walk_subtrees = 0;
6993 }
6994 else if (TYPE_P (t))
6995 *walk_subtrees = 0;
6996
6997 return NULL_TREE;
6998 }
6999
7000 /* Helper for move_stmt_r. Given an EH region number for the source
7001 function, map that to the duplicate EH regio number in the dest. */
7002
7003 static int
move_stmt_eh_region_nr(int old_nr,struct move_stmt_d * p)7004 move_stmt_eh_region_nr (int old_nr, struct move_stmt_d *p)
7005 {
7006 eh_region old_r, new_r;
7007
7008 old_r = get_eh_region_from_number (old_nr);
7009 new_r = static_cast<eh_region> (*p->eh_map->get (old_r));
7010
7011 return new_r->index;
7012 }
7013
7014 /* Similar, but operate on INTEGER_CSTs. */
7015
7016 static tree
move_stmt_eh_region_tree_nr(tree old_t_nr,struct move_stmt_d * p)7017 move_stmt_eh_region_tree_nr (tree old_t_nr, struct move_stmt_d *p)
7018 {
7019 int old_nr, new_nr;
7020
7021 old_nr = tree_to_shwi (old_t_nr);
7022 new_nr = move_stmt_eh_region_nr (old_nr, p);
7023
7024 return build_int_cst (integer_type_node, new_nr);
7025 }
7026
7027 /* Like move_stmt_op, but for gimple statements.
7028
7029 Helper for move_block_to_fn. Set GIMPLE_BLOCK in every expression
7030 contained in the current statement in *GSI_P and change the
7031 DECL_CONTEXT of every local variable referenced in the current
7032 statement. */
7033
7034 static tree
move_stmt_r(gimple_stmt_iterator * gsi_p,bool * handled_ops_p,struct walk_stmt_info * wi)7035 move_stmt_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7036 struct walk_stmt_info *wi)
7037 {
7038 struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
7039 gimple *stmt = gsi_stmt (*gsi_p);
7040 tree block = gimple_block (stmt);
7041
7042 if (block == p->orig_block
7043 || (p->orig_block == NULL_TREE
7044 && block != NULL_TREE))
7045 gimple_set_block (stmt, p->new_block);
7046
7047 switch (gimple_code (stmt))
7048 {
7049 case GIMPLE_CALL:
7050 /* Remap the region numbers for __builtin_eh_{pointer,filter}. */
7051 {
7052 tree r, fndecl = gimple_call_fndecl (stmt);
7053 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
7054 switch (DECL_FUNCTION_CODE (fndecl))
7055 {
7056 case BUILT_IN_EH_COPY_VALUES:
7057 r = gimple_call_arg (stmt, 1);
7058 r = move_stmt_eh_region_tree_nr (r, p);
7059 gimple_call_set_arg (stmt, 1, r);
7060 /* FALLTHRU */
7061
7062 case BUILT_IN_EH_POINTER:
7063 case BUILT_IN_EH_FILTER:
7064 r = gimple_call_arg (stmt, 0);
7065 r = move_stmt_eh_region_tree_nr (r, p);
7066 gimple_call_set_arg (stmt, 0, r);
7067 break;
7068
7069 default:
7070 break;
7071 }
7072 }
7073 break;
7074
7075 case GIMPLE_RESX:
7076 {
7077 gresx *resx_stmt = as_a <gresx *> (stmt);
7078 int r = gimple_resx_region (resx_stmt);
7079 r = move_stmt_eh_region_nr (r, p);
7080 gimple_resx_set_region (resx_stmt, r);
7081 }
7082 break;
7083
7084 case GIMPLE_EH_DISPATCH:
7085 {
7086 geh_dispatch *eh_dispatch_stmt = as_a <geh_dispatch *> (stmt);
7087 int r = gimple_eh_dispatch_region (eh_dispatch_stmt);
7088 r = move_stmt_eh_region_nr (r, p);
7089 gimple_eh_dispatch_set_region (eh_dispatch_stmt, r);
7090 }
7091 break;
7092
7093 case GIMPLE_OMP_RETURN:
7094 case GIMPLE_OMP_CONTINUE:
7095 break;
7096
7097 case GIMPLE_LABEL:
7098 {
7099 /* For FORCED_LABEL, move_stmt_op doesn't adjust DECL_CONTEXT,
7100 so that such labels can be referenced from other regions.
7101 Make sure to update it when seeing a GIMPLE_LABEL though,
7102 that is the owner of the label. */
7103 walk_gimple_op (stmt, move_stmt_op, wi);
7104 *handled_ops_p = true;
7105 tree label = gimple_label_label (as_a <glabel *> (stmt));
7106 if (FORCED_LABEL (label) || DECL_NONLOCAL (label))
7107 DECL_CONTEXT (label) = p->to_context;
7108 }
7109 break;
7110
7111 default:
7112 if (is_gimple_omp (stmt))
7113 {
7114 /* Do not remap variables inside OMP directives. Variables
7115 referenced in clauses and directive header belong to the
7116 parent function and should not be moved into the child
7117 function. */
7118 bool save_remap_decls_p = p->remap_decls_p;
7119 p->remap_decls_p = false;
7120 *handled_ops_p = true;
7121
7122 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), move_stmt_r,
7123 move_stmt_op, wi);
7124
7125 p->remap_decls_p = save_remap_decls_p;
7126 }
7127 break;
7128 }
7129
7130 return NULL_TREE;
7131 }
7132
7133 /* Move basic block BB from function CFUN to function DEST_FN. The
7134 block is moved out of the original linked list and placed after
7135 block AFTER in the new list. Also, the block is removed from the
7136 original array of blocks and placed in DEST_FN's array of blocks.
7137 If UPDATE_EDGE_COUNT_P is true, the edge counts on both CFGs is
7138 updated to reflect the moved edges.
7139
7140 The local variables are remapped to new instances, VARS_MAP is used
7141 to record the mapping. */
7142
7143 static void
move_block_to_fn(struct function * dest_cfun,basic_block bb,basic_block after,bool update_edge_count_p,struct move_stmt_d * d)7144 move_block_to_fn (struct function *dest_cfun, basic_block bb,
7145 basic_block after, bool update_edge_count_p,
7146 struct move_stmt_d *d)
7147 {
7148 struct control_flow_graph *cfg;
7149 edge_iterator ei;
7150 edge e;
7151 gimple_stmt_iterator si;
7152 unsigned old_len;
7153
7154 /* Remove BB from dominance structures. */
7155 delete_from_dominance_info (CDI_DOMINATORS, bb);
7156
7157 /* Move BB from its current loop to the copy in the new function. */
7158 if (current_loops)
7159 {
7160 class loop *new_loop = (class loop *)bb->loop_father->aux;
7161 if (new_loop)
7162 bb->loop_father = new_loop;
7163 }
7164
7165 /* Link BB to the new linked list. */
7166 move_block_after (bb, after);
7167
7168 /* Update the edge count in the corresponding flowgraphs. */
7169 if (update_edge_count_p)
7170 FOR_EACH_EDGE (e, ei, bb->succs)
7171 {
7172 cfun->cfg->x_n_edges--;
7173 dest_cfun->cfg->x_n_edges++;
7174 }
7175
7176 /* Remove BB from the original basic block array. */
7177 (*cfun->cfg->x_basic_block_info)[bb->index] = NULL;
7178 cfun->cfg->x_n_basic_blocks--;
7179
7180 /* Grow DEST_CFUN's basic block array if needed. */
7181 cfg = dest_cfun->cfg;
7182 cfg->x_n_basic_blocks++;
7183 if (bb->index >= cfg->x_last_basic_block)
7184 cfg->x_last_basic_block = bb->index + 1;
7185
7186 old_len = vec_safe_length (cfg->x_basic_block_info);
7187 if ((unsigned) cfg->x_last_basic_block >= old_len)
7188 vec_safe_grow_cleared (cfg->x_basic_block_info,
7189 cfg->x_last_basic_block + 1);
7190
7191 (*cfg->x_basic_block_info)[bb->index] = bb;
7192
7193 /* Remap the variables in phi nodes. */
7194 for (gphi_iterator psi = gsi_start_phis (bb);
7195 !gsi_end_p (psi); )
7196 {
7197 gphi *phi = psi.phi ();
7198 use_operand_p use;
7199 tree op = PHI_RESULT (phi);
7200 ssa_op_iter oi;
7201 unsigned i;
7202
7203 if (virtual_operand_p (op))
7204 {
7205 /* Remove the phi nodes for virtual operands (alias analysis will be
7206 run for the new function, anyway). But replace all uses that
7207 might be outside of the region we move. */
7208 use_operand_p use_p;
7209 imm_use_iterator iter;
7210 gimple *use_stmt;
7211 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
7212 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7213 SET_USE (use_p, SSA_NAME_VAR (op));
7214 remove_phi_node (&psi, true);
7215 continue;
7216 }
7217
7218 SET_PHI_RESULT (phi,
7219 replace_ssa_name (op, d->vars_map, dest_cfun->decl));
7220 FOR_EACH_PHI_ARG (use, phi, oi, SSA_OP_USE)
7221 {
7222 op = USE_FROM_PTR (use);
7223 if (TREE_CODE (op) == SSA_NAME)
7224 SET_USE (use, replace_ssa_name (op, d->vars_map, dest_cfun->decl));
7225 }
7226
7227 for (i = 0; i < EDGE_COUNT (bb->preds); i++)
7228 {
7229 location_t locus = gimple_phi_arg_location (phi, i);
7230 tree block = LOCATION_BLOCK (locus);
7231
7232 if (locus == UNKNOWN_LOCATION)
7233 continue;
7234 if (d->orig_block == NULL_TREE || block == d->orig_block)
7235 {
7236 locus = set_block (locus, d->new_block);
7237 gimple_phi_arg_set_location (phi, i, locus);
7238 }
7239 }
7240
7241 gsi_next (&psi);
7242 }
7243
7244 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
7245 {
7246 gimple *stmt = gsi_stmt (si);
7247 struct walk_stmt_info wi;
7248
7249 memset (&wi, 0, sizeof (wi));
7250 wi.info = d;
7251 walk_gimple_stmt (&si, move_stmt_r, move_stmt_op, &wi);
7252
7253 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
7254 {
7255 tree label = gimple_label_label (label_stmt);
7256 int uid = LABEL_DECL_UID (label);
7257
7258 gcc_assert (uid > -1);
7259
7260 old_len = vec_safe_length (cfg->x_label_to_block_map);
7261 if (old_len <= (unsigned) uid)
7262 vec_safe_grow_cleared (cfg->x_label_to_block_map, uid + 1);
7263
7264 (*cfg->x_label_to_block_map)[uid] = bb;
7265 (*cfun->cfg->x_label_to_block_map)[uid] = NULL;
7266
7267 gcc_assert (DECL_CONTEXT (label) == dest_cfun->decl);
7268
7269 if (uid >= dest_cfun->cfg->last_label_uid)
7270 dest_cfun->cfg->last_label_uid = uid + 1;
7271 }
7272
7273 maybe_duplicate_eh_stmt_fn (dest_cfun, stmt, cfun, stmt, d->eh_map, 0);
7274 remove_stmt_from_eh_lp_fn (cfun, stmt);
7275
7276 gimple_duplicate_stmt_histograms (dest_cfun, stmt, cfun, stmt);
7277 gimple_remove_stmt_histograms (cfun, stmt);
7278
7279 /* We cannot leave any operands allocated from the operand caches of
7280 the current function. */
7281 free_stmt_operands (cfun, stmt);
7282 push_cfun (dest_cfun);
7283 update_stmt (stmt);
7284 if (is_gimple_call (stmt))
7285 notice_special_calls (as_a <gcall *> (stmt));
7286 pop_cfun ();
7287 }
7288
7289 FOR_EACH_EDGE (e, ei, bb->succs)
7290 if (e->goto_locus != UNKNOWN_LOCATION)
7291 {
7292 tree block = LOCATION_BLOCK (e->goto_locus);
7293 if (d->orig_block == NULL_TREE
7294 || block == d->orig_block)
7295 e->goto_locus = set_block (e->goto_locus, d->new_block);
7296 }
7297 }
7298
7299 /* Examine the statements in BB (which is in SRC_CFUN); find and return
7300 the outermost EH region. Use REGION as the incoming base EH region.
7301 If there is no single outermost region, return NULL and set *ALL to
7302 true. */
7303
7304 static eh_region
find_outermost_region_in_block(struct function * src_cfun,basic_block bb,eh_region region,bool * all)7305 find_outermost_region_in_block (struct function *src_cfun,
7306 basic_block bb, eh_region region,
7307 bool *all)
7308 {
7309 gimple_stmt_iterator si;
7310
7311 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
7312 {
7313 gimple *stmt = gsi_stmt (si);
7314 eh_region stmt_region;
7315 int lp_nr;
7316
7317 lp_nr = lookup_stmt_eh_lp_fn (src_cfun, stmt);
7318 stmt_region = get_eh_region_from_lp_number_fn (src_cfun, lp_nr);
7319 if (stmt_region)
7320 {
7321 if (region == NULL)
7322 region = stmt_region;
7323 else if (stmt_region != region)
7324 {
7325 region = eh_region_outermost (src_cfun, stmt_region, region);
7326 if (region == NULL)
7327 {
7328 *all = true;
7329 return NULL;
7330 }
7331 }
7332 }
7333 }
7334
7335 return region;
7336 }
7337
7338 static tree
new_label_mapper(tree decl,void * data)7339 new_label_mapper (tree decl, void *data)
7340 {
7341 htab_t hash = (htab_t) data;
7342 struct tree_map *m;
7343 void **slot;
7344
7345 gcc_assert (TREE_CODE (decl) == LABEL_DECL);
7346
7347 m = XNEW (struct tree_map);
7348 m->hash = DECL_UID (decl);
7349 m->base.from = decl;
7350 m->to = create_artificial_label (UNKNOWN_LOCATION);
7351 LABEL_DECL_UID (m->to) = LABEL_DECL_UID (decl);
7352 if (LABEL_DECL_UID (m->to) >= cfun->cfg->last_label_uid)
7353 cfun->cfg->last_label_uid = LABEL_DECL_UID (m->to) + 1;
7354
7355 slot = htab_find_slot_with_hash (hash, m, m->hash, INSERT);
7356 gcc_assert (*slot == NULL);
7357
7358 *slot = m;
7359
7360 return m->to;
7361 }
7362
7363 /* Tree walker to replace the decls used inside value expressions by
7364 duplicates. */
7365
7366 static tree
replace_block_vars_by_duplicates_1(tree * tp,int * walk_subtrees,void * data)7367 replace_block_vars_by_duplicates_1 (tree *tp, int *walk_subtrees, void *data)
7368 {
7369 struct replace_decls_d *rd = (struct replace_decls_d *)data;
7370
7371 switch (TREE_CODE (*tp))
7372 {
7373 case VAR_DECL:
7374 case PARM_DECL:
7375 case RESULT_DECL:
7376 replace_by_duplicate_decl (tp, rd->vars_map, rd->to_context);
7377 break;
7378 default:
7379 break;
7380 }
7381
7382 if (IS_TYPE_OR_DECL_P (*tp))
7383 *walk_subtrees = false;
7384
7385 return NULL;
7386 }
7387
7388 /* Change DECL_CONTEXT of all BLOCK_VARS in block, including
7389 subblocks. */
7390
7391 static void
replace_block_vars_by_duplicates(tree block,hash_map<tree,tree> * vars_map,tree to_context)7392 replace_block_vars_by_duplicates (tree block, hash_map<tree, tree> *vars_map,
7393 tree to_context)
7394 {
7395 tree *tp, t;
7396
7397 for (tp = &BLOCK_VARS (block); *tp; tp = &DECL_CHAIN (*tp))
7398 {
7399 t = *tp;
7400 if (!VAR_P (t) && TREE_CODE (t) != CONST_DECL)
7401 continue;
7402 replace_by_duplicate_decl (&t, vars_map, to_context);
7403 if (t != *tp)
7404 {
7405 if (VAR_P (*tp) && DECL_HAS_VALUE_EXPR_P (*tp))
7406 {
7407 tree x = DECL_VALUE_EXPR (*tp);
7408 struct replace_decls_d rd = { vars_map, to_context };
7409 unshare_expr (x);
7410 walk_tree (&x, replace_block_vars_by_duplicates_1, &rd, NULL);
7411 SET_DECL_VALUE_EXPR (t, x);
7412 DECL_HAS_VALUE_EXPR_P (t) = 1;
7413 }
7414 DECL_CHAIN (t) = DECL_CHAIN (*tp);
7415 *tp = t;
7416 }
7417 }
7418
7419 for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
7420 replace_block_vars_by_duplicates (block, vars_map, to_context);
7421 }
7422
7423 /* Fixup the loop arrays and numbers after moving LOOP and its subloops
7424 from FN1 to FN2. */
7425
7426 static void
fixup_loop_arrays_after_move(struct function * fn1,struct function * fn2,class loop * loop)7427 fixup_loop_arrays_after_move (struct function *fn1, struct function *fn2,
7428 class loop *loop)
7429 {
7430 /* Discard it from the old loop array. */
7431 (*get_loops (fn1))[loop->num] = NULL;
7432
7433 /* Place it in the new loop array, assigning it a new number. */
7434 loop->num = number_of_loops (fn2);
7435 vec_safe_push (loops_for_fn (fn2)->larray, loop);
7436
7437 /* Recurse to children. */
7438 for (loop = loop->inner; loop; loop = loop->next)
7439 fixup_loop_arrays_after_move (fn1, fn2, loop);
7440 }
7441
7442 /* Verify that the blocks in BBS_P are a single-entry, single-exit region
7443 delimited by ENTRY_BB and EXIT_BB, possibly containing noreturn blocks. */
7444
7445 DEBUG_FUNCTION void
verify_sese(basic_block entry,basic_block exit,vec<basic_block> * bbs_p)7446 verify_sese (basic_block entry, basic_block exit, vec<basic_block> *bbs_p)
7447 {
7448 basic_block bb;
7449 edge_iterator ei;
7450 edge e;
7451 bitmap bbs = BITMAP_ALLOC (NULL);
7452 int i;
7453
7454 gcc_assert (entry != NULL);
7455 gcc_assert (entry != exit);
7456 gcc_assert (bbs_p != NULL);
7457
7458 gcc_assert (bbs_p->length () > 0);
7459
7460 FOR_EACH_VEC_ELT (*bbs_p, i, bb)
7461 bitmap_set_bit (bbs, bb->index);
7462
7463 gcc_assert (bitmap_bit_p (bbs, entry->index));
7464 gcc_assert (exit == NULL || bitmap_bit_p (bbs, exit->index));
7465
7466 FOR_EACH_VEC_ELT (*bbs_p, i, bb)
7467 {
7468 if (bb == entry)
7469 {
7470 gcc_assert (single_pred_p (entry));
7471 gcc_assert (!bitmap_bit_p (bbs, single_pred (entry)->index));
7472 }
7473 else
7474 for (ei = ei_start (bb->preds); !ei_end_p (ei); ei_next (&ei))
7475 {
7476 e = ei_edge (ei);
7477 gcc_assert (bitmap_bit_p (bbs, e->src->index));
7478 }
7479
7480 if (bb == exit)
7481 {
7482 gcc_assert (single_succ_p (exit));
7483 gcc_assert (!bitmap_bit_p (bbs, single_succ (exit)->index));
7484 }
7485 else
7486 for (ei = ei_start (bb->succs); !ei_end_p (ei); ei_next (&ei))
7487 {
7488 e = ei_edge (ei);
7489 gcc_assert (bitmap_bit_p (bbs, e->dest->index));
7490 }
7491 }
7492
7493 BITMAP_FREE (bbs);
7494 }
7495
7496 /* If FROM is an SSA_NAME, mark the version in bitmap DATA. */
7497
7498 bool
gather_ssa_name_hash_map_from(tree const & from,tree const &,void * data)7499 gather_ssa_name_hash_map_from (tree const &from, tree const &, void *data)
7500 {
7501 bitmap release_names = (bitmap)data;
7502
7503 if (TREE_CODE (from) != SSA_NAME)
7504 return true;
7505
7506 bitmap_set_bit (release_names, SSA_NAME_VERSION (from));
7507 return true;
7508 }
7509
7510 /* Return LOOP_DIST_ALIAS call if present in BB. */
7511
7512 static gimple *
find_loop_dist_alias(basic_block bb)7513 find_loop_dist_alias (basic_block bb)
7514 {
7515 gimple *g = last_stmt (bb);
7516 if (g == NULL || gimple_code (g) != GIMPLE_COND)
7517 return NULL;
7518
7519 gimple_stmt_iterator gsi = gsi_for_stmt (g);
7520 gsi_prev (&gsi);
7521 if (gsi_end_p (gsi))
7522 return NULL;
7523
7524 g = gsi_stmt (gsi);
7525 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS))
7526 return g;
7527 return NULL;
7528 }
7529
7530 /* Fold loop internal call G like IFN_LOOP_VECTORIZED/IFN_LOOP_DIST_ALIAS
7531 to VALUE and update any immediate uses of it's LHS. */
7532
7533 void
fold_loop_internal_call(gimple * g,tree value)7534 fold_loop_internal_call (gimple *g, tree value)
7535 {
7536 tree lhs = gimple_call_lhs (g);
7537 use_operand_p use_p;
7538 imm_use_iterator iter;
7539 gimple *use_stmt;
7540 gimple_stmt_iterator gsi = gsi_for_stmt (g);
7541
7542 update_call_from_tree (&gsi, value);
7543 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7544 {
7545 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7546 SET_USE (use_p, value);
7547 update_stmt (use_stmt);
7548 }
7549 }
7550
7551 /* Move a single-entry, single-exit region delimited by ENTRY_BB and
7552 EXIT_BB to function DEST_CFUN. The whole region is replaced by a
7553 single basic block in the original CFG and the new basic block is
7554 returned. DEST_CFUN must not have a CFG yet.
7555
7556 Note that the region need not be a pure SESE region. Blocks inside
7557 the region may contain calls to abort/exit. The only restriction
7558 is that ENTRY_BB should be the only entry point and it must
7559 dominate EXIT_BB.
7560
7561 Change TREE_BLOCK of all statements in ORIG_BLOCK to the new
7562 functions outermost BLOCK, move all subblocks of ORIG_BLOCK
7563 to the new function.
7564
7565 All local variables referenced in the region are assumed to be in
7566 the corresponding BLOCK_VARS and unexpanded variable lists
7567 associated with DEST_CFUN.
7568
7569 TODO: investigate whether we can reuse gimple_duplicate_sese_region to
7570 reimplement move_sese_region_to_fn by duplicating the region rather than
7571 moving it. */
7572
7573 basic_block
move_sese_region_to_fn(struct function * dest_cfun,basic_block entry_bb,basic_block exit_bb,tree orig_block)7574 move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
7575 basic_block exit_bb, tree orig_block)
7576 {
7577 vec<basic_block> bbs, dom_bbs;
7578 basic_block dom_entry = get_immediate_dominator (CDI_DOMINATORS, entry_bb);
7579 basic_block after, bb, *entry_pred, *exit_succ, abb;
7580 struct function *saved_cfun = cfun;
7581 int *entry_flag, *exit_flag;
7582 profile_probability *entry_prob, *exit_prob;
7583 unsigned i, num_entry_edges, num_exit_edges, num_nodes;
7584 edge e;
7585 edge_iterator ei;
7586 htab_t new_label_map;
7587 hash_map<void *, void *> *eh_map;
7588 class loop *loop = entry_bb->loop_father;
7589 class loop *loop0 = get_loop (saved_cfun, 0);
7590 struct move_stmt_d d;
7591
7592 /* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
7593 region. */
7594 gcc_assert (entry_bb != exit_bb
7595 && (!exit_bb
7596 || dominated_by_p (CDI_DOMINATORS, exit_bb, entry_bb)));
7597
7598 /* Collect all the blocks in the region. Manually add ENTRY_BB
7599 because it won't be added by dfs_enumerate_from. */
7600 bbs.create (0);
7601 bbs.safe_push (entry_bb);
7602 gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
7603
7604 if (flag_checking)
7605 verify_sese (entry_bb, exit_bb, &bbs);
7606
7607 /* The blocks that used to be dominated by something in BBS will now be
7608 dominated by the new block. */
7609 dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
7610 bbs.address (),
7611 bbs.length ());
7612
7613 /* Detach ENTRY_BB and EXIT_BB from CFUN->CFG. We need to remember
7614 the predecessor edges to ENTRY_BB and the successor edges to
7615 EXIT_BB so that we can re-attach them to the new basic block that
7616 will replace the region. */
7617 num_entry_edges = EDGE_COUNT (entry_bb->preds);
7618 entry_pred = XNEWVEC (basic_block, num_entry_edges);
7619 entry_flag = XNEWVEC (int, num_entry_edges);
7620 entry_prob = XNEWVEC (profile_probability, num_entry_edges);
7621 i = 0;
7622 for (ei = ei_start (entry_bb->preds); (e = ei_safe_edge (ei)) != NULL;)
7623 {
7624 entry_prob[i] = e->probability;
7625 entry_flag[i] = e->flags;
7626 entry_pred[i++] = e->src;
7627 remove_edge (e);
7628 }
7629
7630 if (exit_bb)
7631 {
7632 num_exit_edges = EDGE_COUNT (exit_bb->succs);
7633 exit_succ = XNEWVEC (basic_block, num_exit_edges);
7634 exit_flag = XNEWVEC (int, num_exit_edges);
7635 exit_prob = XNEWVEC (profile_probability, num_exit_edges);
7636 i = 0;
7637 for (ei = ei_start (exit_bb->succs); (e = ei_safe_edge (ei)) != NULL;)
7638 {
7639 exit_prob[i] = e->probability;
7640 exit_flag[i] = e->flags;
7641 exit_succ[i++] = e->dest;
7642 remove_edge (e);
7643 }
7644 }
7645 else
7646 {
7647 num_exit_edges = 0;
7648 exit_succ = NULL;
7649 exit_flag = NULL;
7650 exit_prob = NULL;
7651 }
7652
7653 /* Switch context to the child function to initialize DEST_FN's CFG. */
7654 gcc_assert (dest_cfun->cfg == NULL);
7655 push_cfun (dest_cfun);
7656
7657 init_empty_tree_cfg ();
7658
7659 /* Initialize EH information for the new function. */
7660 eh_map = NULL;
7661 new_label_map = NULL;
7662 if (saved_cfun->eh)
7663 {
7664 eh_region region = NULL;
7665 bool all = false;
7666
7667 FOR_EACH_VEC_ELT (bbs, i, bb)
7668 {
7669 region = find_outermost_region_in_block (saved_cfun, bb, region, &all);
7670 if (all)
7671 break;
7672 }
7673
7674 init_eh_for_function ();
7675 if (region != NULL || all)
7676 {
7677 new_label_map = htab_create (17, tree_map_hash, tree_map_eq, free);
7678 eh_map = duplicate_eh_regions (saved_cfun, region, 0,
7679 new_label_mapper, new_label_map);
7680 }
7681 }
7682
7683 /* Initialize an empty loop tree. */
7684 struct loops *loops = ggc_cleared_alloc<struct loops> ();
7685 init_loops_structure (dest_cfun, loops, 1);
7686 loops->state = LOOPS_MAY_HAVE_MULTIPLE_LATCHES;
7687 set_loops_for_fn (dest_cfun, loops);
7688
7689 vec<loop_p, va_gc> *larray = get_loops (saved_cfun)->copy ();
7690
7691 /* Move the outlined loop tree part. */
7692 num_nodes = bbs.length ();
7693 FOR_EACH_VEC_ELT (bbs, i, bb)
7694 {
7695 if (bb->loop_father->header == bb)
7696 {
7697 class loop *this_loop = bb->loop_father;
7698 class loop *outer = loop_outer (this_loop);
7699 if (outer == loop
7700 /* If the SESE region contains some bbs ending with
7701 a noreturn call, those are considered to belong
7702 to the outermost loop in saved_cfun, rather than
7703 the entry_bb's loop_father. */
7704 || outer == loop0)
7705 {
7706 if (outer != loop)
7707 num_nodes -= this_loop->num_nodes;
7708 flow_loop_tree_node_remove (bb->loop_father);
7709 flow_loop_tree_node_add (get_loop (dest_cfun, 0), this_loop);
7710 fixup_loop_arrays_after_move (saved_cfun, cfun, this_loop);
7711 }
7712 }
7713 else if (bb->loop_father == loop0 && loop0 != loop)
7714 num_nodes--;
7715
7716 /* Remove loop exits from the outlined region. */
7717 if (loops_for_fn (saved_cfun)->exits)
7718 FOR_EACH_EDGE (e, ei, bb->succs)
7719 {
7720 struct loops *l = loops_for_fn (saved_cfun);
7721 loop_exit **slot
7722 = l->exits->find_slot_with_hash (e, htab_hash_pointer (e),
7723 NO_INSERT);
7724 if (slot)
7725 l->exits->clear_slot (slot);
7726 }
7727 }
7728
7729 /* Adjust the number of blocks in the tree root of the outlined part. */
7730 get_loop (dest_cfun, 0)->num_nodes = bbs.length () + 2;
7731
7732 /* Setup a mapping to be used by move_block_to_fn. */
7733 loop->aux = current_loops->tree_root;
7734 loop0->aux = current_loops->tree_root;
7735
7736 /* Fix up orig_loop_num. If the block referenced in it has been moved
7737 to dest_cfun, update orig_loop_num field, otherwise clear it. */
7738 class loop *dloop;
7739 signed char *moved_orig_loop_num = NULL;
7740 FOR_EACH_LOOP_FN (dest_cfun, dloop, 0)
7741 if (dloop->orig_loop_num)
7742 {
7743 if (moved_orig_loop_num == NULL)
7744 moved_orig_loop_num
7745 = XCNEWVEC (signed char, vec_safe_length (larray));
7746 if ((*larray)[dloop->orig_loop_num] != NULL
7747 && get_loop (saved_cfun, dloop->orig_loop_num) == NULL)
7748 {
7749 if (moved_orig_loop_num[dloop->orig_loop_num] >= 0
7750 && moved_orig_loop_num[dloop->orig_loop_num] < 2)
7751 moved_orig_loop_num[dloop->orig_loop_num]++;
7752 dloop->orig_loop_num = (*larray)[dloop->orig_loop_num]->num;
7753 }
7754 else
7755 {
7756 moved_orig_loop_num[dloop->orig_loop_num] = -1;
7757 dloop->orig_loop_num = 0;
7758 }
7759 }
7760 pop_cfun ();
7761
7762 if (moved_orig_loop_num)
7763 {
7764 FOR_EACH_VEC_ELT (bbs, i, bb)
7765 {
7766 gimple *g = find_loop_dist_alias (bb);
7767 if (g == NULL)
7768 continue;
7769
7770 int orig_loop_num = tree_to_shwi (gimple_call_arg (g, 0));
7771 gcc_assert (orig_loop_num
7772 && (unsigned) orig_loop_num < vec_safe_length (larray));
7773 if (moved_orig_loop_num[orig_loop_num] == 2)
7774 {
7775 /* If we have moved both loops with this orig_loop_num into
7776 dest_cfun and the LOOP_DIST_ALIAS call is being moved there
7777 too, update the first argument. */
7778 gcc_assert ((*larray)[dloop->orig_loop_num] != NULL
7779 && (get_loop (saved_cfun, dloop->orig_loop_num)
7780 == NULL));
7781 tree t = build_int_cst (integer_type_node,
7782 (*larray)[dloop->orig_loop_num]->num);
7783 gimple_call_set_arg (g, 0, t);
7784 update_stmt (g);
7785 /* Make sure the following loop will not update it. */
7786 moved_orig_loop_num[orig_loop_num] = 0;
7787 }
7788 else
7789 /* Otherwise at least one of the loops stayed in saved_cfun.
7790 Remove the LOOP_DIST_ALIAS call. */
7791 fold_loop_internal_call (g, gimple_call_arg (g, 1));
7792 }
7793 FOR_EACH_BB_FN (bb, saved_cfun)
7794 {
7795 gimple *g = find_loop_dist_alias (bb);
7796 if (g == NULL)
7797 continue;
7798 int orig_loop_num = tree_to_shwi (gimple_call_arg (g, 0));
7799 gcc_assert (orig_loop_num
7800 && (unsigned) orig_loop_num < vec_safe_length (larray));
7801 if (moved_orig_loop_num[orig_loop_num])
7802 /* LOOP_DIST_ALIAS call remained in saved_cfun, if at least one
7803 of the corresponding loops was moved, remove it. */
7804 fold_loop_internal_call (g, gimple_call_arg (g, 1));
7805 }
7806 XDELETEVEC (moved_orig_loop_num);
7807 }
7808 ggc_free (larray);
7809
7810 /* Move blocks from BBS into DEST_CFUN. */
7811 gcc_assert (bbs.length () >= 2);
7812 after = dest_cfun->cfg->x_entry_block_ptr;
7813 hash_map<tree, tree> vars_map;
7814
7815 memset (&d, 0, sizeof (d));
7816 d.orig_block = orig_block;
7817 d.new_block = DECL_INITIAL (dest_cfun->decl);
7818 d.from_context = cfun->decl;
7819 d.to_context = dest_cfun->decl;
7820 d.vars_map = &vars_map;
7821 d.new_label_map = new_label_map;
7822 d.eh_map = eh_map;
7823 d.remap_decls_p = true;
7824
7825 if (gimple_in_ssa_p (cfun))
7826 for (tree arg = DECL_ARGUMENTS (d.to_context); arg; arg = DECL_CHAIN (arg))
7827 {
7828 tree narg = make_ssa_name_fn (dest_cfun, arg, gimple_build_nop ());
7829 set_ssa_default_def (dest_cfun, arg, narg);
7830 vars_map.put (arg, narg);
7831 }
7832
7833 FOR_EACH_VEC_ELT (bbs, i, bb)
7834 {
7835 /* No need to update edge counts on the last block. It has
7836 already been updated earlier when we detached the region from
7837 the original CFG. */
7838 move_block_to_fn (dest_cfun, bb, after, bb != exit_bb, &d);
7839 after = bb;
7840 }
7841
7842 /* Adjust the maximum clique used. */
7843 dest_cfun->last_clique = saved_cfun->last_clique;
7844
7845 loop->aux = NULL;
7846 loop0->aux = NULL;
7847 /* Loop sizes are no longer correct, fix them up. */
7848 loop->num_nodes -= num_nodes;
7849 for (class loop *outer = loop_outer (loop);
7850 outer; outer = loop_outer (outer))
7851 outer->num_nodes -= num_nodes;
7852 loop0->num_nodes -= bbs.length () - num_nodes;
7853
7854 if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vectorize_loops)
7855 {
7856 class loop *aloop;
7857 for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
7858 if (aloop != NULL)
7859 {
7860 if (aloop->simduid)
7861 {
7862 replace_by_duplicate_decl (&aloop->simduid, d.vars_map,
7863 d.to_context);
7864 dest_cfun->has_simduid_loops = true;
7865 }
7866 if (aloop->force_vectorize)
7867 dest_cfun->has_force_vectorize_loops = true;
7868 }
7869 }
7870
7871 /* Rewire BLOCK_SUBBLOCKS of orig_block. */
7872 if (orig_block)
7873 {
7874 tree block;
7875 gcc_assert (BLOCK_SUBBLOCKS (DECL_INITIAL (dest_cfun->decl))
7876 == NULL_TREE);
7877 BLOCK_SUBBLOCKS (DECL_INITIAL (dest_cfun->decl))
7878 = BLOCK_SUBBLOCKS (orig_block);
7879 for (block = BLOCK_SUBBLOCKS (orig_block);
7880 block; block = BLOCK_CHAIN (block))
7881 BLOCK_SUPERCONTEXT (block) = DECL_INITIAL (dest_cfun->decl);
7882 BLOCK_SUBBLOCKS (orig_block) = NULL_TREE;
7883 }
7884
7885 replace_block_vars_by_duplicates (DECL_INITIAL (dest_cfun->decl),
7886 &vars_map, dest_cfun->decl);
7887
7888 if (new_label_map)
7889 htab_delete (new_label_map);
7890 if (eh_map)
7891 delete eh_map;
7892
7893 if (gimple_in_ssa_p (cfun))
7894 {
7895 /* We need to release ssa-names in a defined order, so first find them,
7896 and then iterate in ascending version order. */
7897 bitmap release_names = BITMAP_ALLOC (NULL);
7898 vars_map.traverse<void *, gather_ssa_name_hash_map_from> (release_names);
7899 bitmap_iterator bi;
7900 unsigned i;
7901 EXECUTE_IF_SET_IN_BITMAP (release_names, 0, i, bi)
7902 release_ssa_name (ssa_name (i));
7903 BITMAP_FREE (release_names);
7904 }
7905
7906 /* Rewire the entry and exit blocks. The successor to the entry
7907 block turns into the successor of DEST_FN's ENTRY_BLOCK_PTR in
7908 the child function. Similarly, the predecessor of DEST_FN's
7909 EXIT_BLOCK_PTR turns into the predecessor of EXIT_BLOCK_PTR. We
7910 need to switch CFUN between DEST_CFUN and SAVED_CFUN so that the
7911 various CFG manipulation function get to the right CFG.
7912
7913 FIXME, this is silly. The CFG ought to become a parameter to
7914 these helpers. */
7915 push_cfun (dest_cfun);
7916 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = entry_bb->count;
7917 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), entry_bb, EDGE_FALLTHRU);
7918 if (exit_bb)
7919 {
7920 make_single_succ_edge (exit_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
7921 EXIT_BLOCK_PTR_FOR_FN (cfun)->count = exit_bb->count;
7922 }
7923 else
7924 EXIT_BLOCK_PTR_FOR_FN (cfun)->count = profile_count::zero ();
7925 pop_cfun ();
7926
7927 /* Back in the original function, the SESE region has disappeared,
7928 create a new basic block in its place. */
7929 bb = create_empty_bb (entry_pred[0]);
7930 if (current_loops)
7931 add_bb_to_loop (bb, loop);
7932 for (i = 0; i < num_entry_edges; i++)
7933 {
7934 e = make_edge (entry_pred[i], bb, entry_flag[i]);
7935 e->probability = entry_prob[i];
7936 }
7937
7938 for (i = 0; i < num_exit_edges; i++)
7939 {
7940 e = make_edge (bb, exit_succ[i], exit_flag[i]);
7941 e->probability = exit_prob[i];
7942 }
7943
7944 set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
7945 FOR_EACH_VEC_ELT (dom_bbs, i, abb)
7946 set_immediate_dominator (CDI_DOMINATORS, abb, bb);
7947 dom_bbs.release ();
7948
7949 if (exit_bb)
7950 {
7951 free (exit_prob);
7952 free (exit_flag);
7953 free (exit_succ);
7954 }
7955 free (entry_prob);
7956 free (entry_flag);
7957 free (entry_pred);
7958 bbs.release ();
7959
7960 return bb;
7961 }
7962
7963 /* Dump default def DEF to file FILE using FLAGS and indentation
7964 SPC. */
7965
7966 static void
dump_default_def(FILE * file,tree def,int spc,dump_flags_t flags)7967 dump_default_def (FILE *file, tree def, int spc, dump_flags_t flags)
7968 {
7969 for (int i = 0; i < spc; ++i)
7970 fprintf (file, " ");
7971 dump_ssaname_info_to_file (file, def, spc);
7972
7973 print_generic_expr (file, TREE_TYPE (def), flags);
7974 fprintf (file, " ");
7975 print_generic_expr (file, def, flags);
7976 fprintf (file, " = ");
7977 print_generic_expr (file, SSA_NAME_VAR (def), flags);
7978 fprintf (file, ";\n");
7979 }
7980
7981 /* Print no_sanitize attribute to FILE for a given attribute VALUE. */
7982
7983 static void
print_no_sanitize_attr_value(FILE * file,tree value)7984 print_no_sanitize_attr_value (FILE *file, tree value)
7985 {
7986 unsigned int flags = tree_to_uhwi (value);
7987 bool first = true;
7988 for (int i = 0; sanitizer_opts[i].name != NULL; ++i)
7989 {
7990 if ((sanitizer_opts[i].flag & flags) == sanitizer_opts[i].flag)
7991 {
7992 if (!first)
7993 fprintf (file, " | ");
7994 fprintf (file, "%s", sanitizer_opts[i].name);
7995 first = false;
7996 }
7997 }
7998 }
7999
8000 /* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in dumpfile.h)
8001 */
8002
8003 void
dump_function_to_file(tree fndecl,FILE * file,dump_flags_t flags)8004 dump_function_to_file (tree fndecl, FILE *file, dump_flags_t flags)
8005 {
8006 tree arg, var, old_current_fndecl = current_function_decl;
8007 struct function *dsf;
8008 bool ignore_topmost_bind = false, any_var = false;
8009 basic_block bb;
8010 tree chain;
8011 bool tmclone = (TREE_CODE (fndecl) == FUNCTION_DECL
8012 && decl_is_tm_clone (fndecl));
8013 struct function *fun = DECL_STRUCT_FUNCTION (fndecl);
8014
8015 tree fntype = TREE_TYPE (fndecl);
8016 tree attrs[] = { DECL_ATTRIBUTES (fndecl), TYPE_ATTRIBUTES (fntype) };
8017
8018 for (int i = 0; i != 2; ++i)
8019 {
8020 if (!attrs[i])
8021 continue;
8022
8023 fprintf (file, "__attribute__((");
8024
8025 bool first = true;
8026 tree chain;
8027 for (chain = attrs[i]; chain; first = false, chain = TREE_CHAIN (chain))
8028 {
8029 if (!first)
8030 fprintf (file, ", ");
8031
8032 tree name = get_attribute_name (chain);
8033 print_generic_expr (file, name, dump_flags);
8034 if (TREE_VALUE (chain) != NULL_TREE)
8035 {
8036 fprintf (file, " (");
8037
8038 if (strstr (IDENTIFIER_POINTER (name), "no_sanitize"))
8039 print_no_sanitize_attr_value (file, TREE_VALUE (chain));
8040 else
8041 print_generic_expr (file, TREE_VALUE (chain), dump_flags);
8042 fprintf (file, ")");
8043 }
8044 }
8045
8046 fprintf (file, "))\n");
8047 }
8048
8049 current_function_decl = fndecl;
8050 if (flags & TDF_GIMPLE)
8051 {
8052 static bool hotness_bb_param_printed = false;
8053 if (profile_info != NULL
8054 && !hotness_bb_param_printed)
8055 {
8056 hotness_bb_param_printed = true;
8057 fprintf (file,
8058 "/* --param=gimple-fe-computed-hot-bb-threshold=%" PRId64
8059 " */\n", get_hot_bb_threshold ());
8060 }
8061
8062 print_generic_expr (file, TREE_TYPE (TREE_TYPE (fndecl)),
8063 dump_flags | TDF_SLIM);
8064 fprintf (file, " __GIMPLE (%s",
8065 (fun->curr_properties & PROP_ssa) ? "ssa"
8066 : (fun->curr_properties & PROP_cfg) ? "cfg"
8067 : "");
8068
8069 if (cfun->cfg)
8070 {
8071 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
8072 if (bb->count.initialized_p ())
8073 fprintf (file, ",%s(%d)",
8074 profile_quality_as_string (bb->count.quality ()),
8075 bb->count.value ());
8076 fprintf (file, ")\n%s (", function_name (fun));
8077 }
8078 }
8079 else
8080 {
8081 print_generic_expr (file, TREE_TYPE (fntype), dump_flags);
8082 fprintf (file, " %s %s(", function_name (fun),
8083 tmclone ? "[tm-clone] " : "");
8084 }
8085
8086 arg = DECL_ARGUMENTS (fndecl);
8087 while (arg)
8088 {
8089 print_generic_expr (file, TREE_TYPE (arg), dump_flags);
8090 fprintf (file, " ");
8091 print_generic_expr (file, arg, dump_flags);
8092 if (DECL_CHAIN (arg))
8093 fprintf (file, ", ");
8094 arg = DECL_CHAIN (arg);
8095 }
8096 fprintf (file, ")\n");
8097
8098 dsf = DECL_STRUCT_FUNCTION (fndecl);
8099 if (dsf && (flags & TDF_EH))
8100 dump_eh_tree (file, dsf);
8101
8102 if (flags & TDF_RAW && !gimple_has_body_p (fndecl))
8103 {
8104 dump_node (fndecl, TDF_SLIM | flags, file);
8105 current_function_decl = old_current_fndecl;
8106 return;
8107 }
8108
8109 /* When GIMPLE is lowered, the variables are no longer available in
8110 BIND_EXPRs, so display them separately. */
8111 if (fun && fun->decl == fndecl && (fun->curr_properties & PROP_gimple_lcf))
8112 {
8113 unsigned ix;
8114 ignore_topmost_bind = true;
8115
8116 fprintf (file, "{\n");
8117 if (gimple_in_ssa_p (fun)
8118 && (flags & TDF_ALIAS))
8119 {
8120 for (arg = DECL_ARGUMENTS (fndecl); arg != NULL;
8121 arg = DECL_CHAIN (arg))
8122 {
8123 tree def = ssa_default_def (fun, arg);
8124 if (def)
8125 dump_default_def (file, def, 2, flags);
8126 }
8127
8128 tree res = DECL_RESULT (fun->decl);
8129 if (res != NULL_TREE
8130 && DECL_BY_REFERENCE (res))
8131 {
8132 tree def = ssa_default_def (fun, res);
8133 if (def)
8134 dump_default_def (file, def, 2, flags);
8135 }
8136
8137 tree static_chain = fun->static_chain_decl;
8138 if (static_chain != NULL_TREE)
8139 {
8140 tree def = ssa_default_def (fun, static_chain);
8141 if (def)
8142 dump_default_def (file, def, 2, flags);
8143 }
8144 }
8145
8146 if (!vec_safe_is_empty (fun->local_decls))
8147 FOR_EACH_LOCAL_DECL (fun, ix, var)
8148 {
8149 print_generic_decl (file, var, flags);
8150 fprintf (file, "\n");
8151
8152 any_var = true;
8153 }
8154
8155 tree name;
8156
8157 if (gimple_in_ssa_p (cfun))
8158 FOR_EACH_SSA_NAME (ix, name, cfun)
8159 {
8160 if (!SSA_NAME_VAR (name)
8161 /* SSA name with decls without a name still get
8162 dumped as _N, list those explicitely as well even
8163 though we've dumped the decl declaration as D.xxx
8164 above. */
8165 || !SSA_NAME_IDENTIFIER (name))
8166 {
8167 fprintf (file, " ");
8168 print_generic_expr (file, TREE_TYPE (name), flags);
8169 fprintf (file, " ");
8170 print_generic_expr (file, name, flags);
8171 fprintf (file, ";\n");
8172
8173 any_var = true;
8174 }
8175 }
8176 }
8177
8178 if (fun && fun->decl == fndecl
8179 && fun->cfg
8180 && basic_block_info_for_fn (fun))
8181 {
8182 /* If the CFG has been built, emit a CFG-based dump. */
8183 if (!ignore_topmost_bind)
8184 fprintf (file, "{\n");
8185
8186 if (any_var && n_basic_blocks_for_fn (fun))
8187 fprintf (file, "\n");
8188
8189 FOR_EACH_BB_FN (bb, fun)
8190 dump_bb (file, bb, 2, flags);
8191
8192 fprintf (file, "}\n");
8193 }
8194 else if (fun->curr_properties & PROP_gimple_any)
8195 {
8196 /* The function is now in GIMPLE form but the CFG has not been
8197 built yet. Emit the single sequence of GIMPLE statements
8198 that make up its body. */
8199 gimple_seq body = gimple_body (fndecl);
8200
8201 if (gimple_seq_first_stmt (body)
8202 && gimple_seq_first_stmt (body) == gimple_seq_last_stmt (body)
8203 && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND)
8204 print_gimple_seq (file, body, 0, flags);
8205 else
8206 {
8207 if (!ignore_topmost_bind)
8208 fprintf (file, "{\n");
8209
8210 if (any_var)
8211 fprintf (file, "\n");
8212
8213 print_gimple_seq (file, body, 2, flags);
8214 fprintf (file, "}\n");
8215 }
8216 }
8217 else
8218 {
8219 int indent;
8220
8221 /* Make a tree based dump. */
8222 chain = DECL_SAVED_TREE (fndecl);
8223 if (chain && TREE_CODE (chain) == BIND_EXPR)
8224 {
8225 if (ignore_topmost_bind)
8226 {
8227 chain = BIND_EXPR_BODY (chain);
8228 indent = 2;
8229 }
8230 else
8231 indent = 0;
8232 }
8233 else
8234 {
8235 if (!ignore_topmost_bind)
8236 {
8237 fprintf (file, "{\n");
8238 /* No topmost bind, pretend it's ignored for later. */
8239 ignore_topmost_bind = true;
8240 }
8241 indent = 2;
8242 }
8243
8244 if (any_var)
8245 fprintf (file, "\n");
8246
8247 print_generic_stmt_indented (file, chain, flags, indent);
8248 if (ignore_topmost_bind)
8249 fprintf (file, "}\n");
8250 }
8251
8252 if (flags & TDF_ENUMERATE_LOCALS)
8253 dump_enumerated_decls (file, flags);
8254 fprintf (file, "\n\n");
8255
8256 current_function_decl = old_current_fndecl;
8257 }
8258
8259 /* Dump FUNCTION_DECL FN to stderr using FLAGS (see TDF_* in tree.h) */
8260
8261 DEBUG_FUNCTION void
debug_function(tree fn,dump_flags_t flags)8262 debug_function (tree fn, dump_flags_t flags)
8263 {
8264 dump_function_to_file (fn, stderr, flags);
8265 }
8266
8267
8268 /* Print on FILE the indexes for the predecessors of basic_block BB. */
8269
8270 static void
print_pred_bbs(FILE * file,basic_block bb)8271 print_pred_bbs (FILE *file, basic_block bb)
8272 {
8273 edge e;
8274 edge_iterator ei;
8275
8276 FOR_EACH_EDGE (e, ei, bb->preds)
8277 fprintf (file, "bb_%d ", e->src->index);
8278 }
8279
8280
8281 /* Print on FILE the indexes for the successors of basic_block BB. */
8282
8283 static void
print_succ_bbs(FILE * file,basic_block bb)8284 print_succ_bbs (FILE *file, basic_block bb)
8285 {
8286 edge e;
8287 edge_iterator ei;
8288
8289 FOR_EACH_EDGE (e, ei, bb->succs)
8290 fprintf (file, "bb_%d ", e->dest->index);
8291 }
8292
8293 /* Print to FILE the basic block BB following the VERBOSITY level. */
8294
8295 void
print_loops_bb(FILE * file,basic_block bb,int indent,int verbosity)8296 print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
8297 {
8298 char *s_indent = (char *) alloca ((size_t) indent + 1);
8299 memset ((void *) s_indent, ' ', (size_t) indent);
8300 s_indent[indent] = '\0';
8301
8302 /* Print basic_block's header. */
8303 if (verbosity >= 2)
8304 {
8305 fprintf (file, "%s bb_%d (preds = {", s_indent, bb->index);
8306 print_pred_bbs (file, bb);
8307 fprintf (file, "}, succs = {");
8308 print_succ_bbs (file, bb);
8309 fprintf (file, "})\n");
8310 }
8311
8312 /* Print basic_block's body. */
8313 if (verbosity >= 3)
8314 {
8315 fprintf (file, "%s {\n", s_indent);
8316 dump_bb (file, bb, indent + 4, TDF_VOPS|TDF_MEMSYMS);
8317 fprintf (file, "%s }\n", s_indent);
8318 }
8319 }
8320
8321 static void print_loop_and_siblings (FILE *, class loop *, int, int);
8322
8323 /* Pretty print LOOP on FILE, indented INDENT spaces. Following
8324 VERBOSITY level this outputs the contents of the loop, or just its
8325 structure. */
8326
8327 static void
print_loop(FILE * file,class loop * loop,int indent,int verbosity)8328 print_loop (FILE *file, class loop *loop, int indent, int verbosity)
8329 {
8330 char *s_indent;
8331 basic_block bb;
8332
8333 if (loop == NULL)
8334 return;
8335
8336 s_indent = (char *) alloca ((size_t) indent + 1);
8337 memset ((void *) s_indent, ' ', (size_t) indent);
8338 s_indent[indent] = '\0';
8339
8340 /* Print loop's header. */
8341 fprintf (file, "%sloop_%d (", s_indent, loop->num);
8342 if (loop->header)
8343 fprintf (file, "header = %d", loop->header->index);
8344 else
8345 {
8346 fprintf (file, "deleted)\n");
8347 return;
8348 }
8349 if (loop->latch)
8350 fprintf (file, ", latch = %d", loop->latch->index);
8351 else
8352 fprintf (file, ", multiple latches");
8353 fprintf (file, ", niter = ");
8354 print_generic_expr (file, loop->nb_iterations);
8355
8356 if (loop->any_upper_bound)
8357 {
8358 fprintf (file, ", upper_bound = ");
8359 print_decu (loop->nb_iterations_upper_bound, file);
8360 }
8361 if (loop->any_likely_upper_bound)
8362 {
8363 fprintf (file, ", likely_upper_bound = ");
8364 print_decu (loop->nb_iterations_likely_upper_bound, file);
8365 }
8366
8367 if (loop->any_estimate)
8368 {
8369 fprintf (file, ", estimate = ");
8370 print_decu (loop->nb_iterations_estimate, file);
8371 }
8372 if (loop->unroll)
8373 fprintf (file, ", unroll = %d", loop->unroll);
8374 fprintf (file, ")\n");
8375
8376 /* Print loop's body. */
8377 if (verbosity >= 1)
8378 {
8379 fprintf (file, "%s{\n", s_indent);
8380 FOR_EACH_BB_FN (bb, cfun)
8381 if (bb->loop_father == loop)
8382 print_loops_bb (file, bb, indent, verbosity);
8383
8384 print_loop_and_siblings (file, loop->inner, indent + 2, verbosity);
8385 fprintf (file, "%s}\n", s_indent);
8386 }
8387 }
8388
8389 /* Print the LOOP and its sibling loops on FILE, indented INDENT
8390 spaces. Following VERBOSITY level this outputs the contents of the
8391 loop, or just its structure. */
8392
8393 static void
print_loop_and_siblings(FILE * file,class loop * loop,int indent,int verbosity)8394 print_loop_and_siblings (FILE *file, class loop *loop, int indent,
8395 int verbosity)
8396 {
8397 if (loop == NULL)
8398 return;
8399
8400 print_loop (file, loop, indent, verbosity);
8401 print_loop_and_siblings (file, loop->next, indent, verbosity);
8402 }
8403
8404 /* Follow a CFG edge from the entry point of the program, and on entry
8405 of a loop, pretty print the loop structure on FILE. */
8406
8407 void
print_loops(FILE * file,int verbosity)8408 print_loops (FILE *file, int verbosity)
8409 {
8410 basic_block bb;
8411
8412 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
8413 fprintf (file, "\nLoops in function: %s\n", current_function_name ());
8414 if (bb && bb->loop_father)
8415 print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
8416 }
8417
8418 /* Dump a loop. */
8419
8420 DEBUG_FUNCTION void
debug(class loop & ref)8421 debug (class loop &ref)
8422 {
8423 print_loop (stderr, &ref, 0, /*verbosity*/0);
8424 }
8425
8426 DEBUG_FUNCTION void
debug(class loop * ptr)8427 debug (class loop *ptr)
8428 {
8429 if (ptr)
8430 debug (*ptr);
8431 else
8432 fprintf (stderr, "<nil>\n");
8433 }
8434
8435 /* Dump a loop verbosely. */
8436
8437 DEBUG_FUNCTION void
debug_verbose(class loop & ref)8438 debug_verbose (class loop &ref)
8439 {
8440 print_loop (stderr, &ref, 0, /*verbosity*/3);
8441 }
8442
8443 DEBUG_FUNCTION void
debug_verbose(class loop * ptr)8444 debug_verbose (class loop *ptr)
8445 {
8446 if (ptr)
8447 debug (*ptr);
8448 else
8449 fprintf (stderr, "<nil>\n");
8450 }
8451
8452
8453 /* Debugging loops structure at tree level, at some VERBOSITY level. */
8454
8455 DEBUG_FUNCTION void
debug_loops(int verbosity)8456 debug_loops (int verbosity)
8457 {
8458 print_loops (stderr, verbosity);
8459 }
8460
8461 /* Print on stderr the code of LOOP, at some VERBOSITY level. */
8462
8463 DEBUG_FUNCTION void
debug_loop(class loop * loop,int verbosity)8464 debug_loop (class loop *loop, int verbosity)
8465 {
8466 print_loop (stderr, loop, 0, verbosity);
8467 }
8468
8469 /* Print on stderr the code of loop number NUM, at some VERBOSITY
8470 level. */
8471
8472 DEBUG_FUNCTION void
debug_loop_num(unsigned num,int verbosity)8473 debug_loop_num (unsigned num, int verbosity)
8474 {
8475 debug_loop (get_loop (cfun, num), verbosity);
8476 }
8477
8478 /* Return true if BB ends with a call, possibly followed by some
8479 instructions that must stay with the call. Return false,
8480 otherwise. */
8481
8482 static bool
gimple_block_ends_with_call_p(basic_block bb)8483 gimple_block_ends_with_call_p (basic_block bb)
8484 {
8485 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
8486 return !gsi_end_p (gsi) && is_gimple_call (gsi_stmt (gsi));
8487 }
8488
8489
8490 /* Return true if BB ends with a conditional branch. Return false,
8491 otherwise. */
8492
8493 static bool
gimple_block_ends_with_condjump_p(const_basic_block bb)8494 gimple_block_ends_with_condjump_p (const_basic_block bb)
8495 {
8496 gimple *stmt = last_stmt (CONST_CAST_BB (bb));
8497 return (stmt && gimple_code (stmt) == GIMPLE_COND);
8498 }
8499
8500
8501 /* Return true if statement T may terminate execution of BB in ways not
8502 explicitly represtented in the CFG. */
8503
8504 bool
stmt_can_terminate_bb_p(gimple * t)8505 stmt_can_terminate_bb_p (gimple *t)
8506 {
8507 tree fndecl = NULL_TREE;
8508 int call_flags = 0;
8509
8510 /* Eh exception not handled internally terminates execution of the whole
8511 function. */
8512 if (stmt_can_throw_external (cfun, t))
8513 return true;
8514
8515 /* NORETURN and LONGJMP calls already have an edge to exit.
8516 CONST and PURE calls do not need one.
8517 We don't currently check for CONST and PURE here, although
8518 it would be a good idea, because those attributes are
8519 figured out from the RTL in mark_constant_function, and
8520 the counter incrementation code from -fprofile-arcs
8521 leads to different results from -fbranch-probabilities. */
8522 if (is_gimple_call (t))
8523 {
8524 fndecl = gimple_call_fndecl (t);
8525 call_flags = gimple_call_flags (t);
8526 }
8527
8528 if (is_gimple_call (t)
8529 && fndecl
8530 && fndecl_built_in_p (fndecl)
8531 && (call_flags & ECF_NOTHROW)
8532 && !(call_flags & ECF_RETURNS_TWICE)
8533 /* fork() doesn't really return twice, but the effect of
8534 wrapping it in __gcov_fork() which calls __gcov_dump() and
8535 __gcov_reset() and clears the counters before forking has the same
8536 effect as returning twice. Force a fake edge. */
8537 && !fndecl_built_in_p (fndecl, BUILT_IN_FORK))
8538 return false;
8539
8540 if (is_gimple_call (t))
8541 {
8542 edge_iterator ei;
8543 edge e;
8544 basic_block bb;
8545
8546 if (call_flags & (ECF_PURE | ECF_CONST)
8547 && !(call_flags & ECF_LOOPING_CONST_OR_PURE))
8548 return false;
8549
8550 /* Function call may do longjmp, terminate program or do other things.
8551 Special case noreturn that have non-abnormal edges out as in this case
8552 the fact is sufficiently represented by lack of edges out of T. */
8553 if (!(call_flags & ECF_NORETURN))
8554 return true;
8555
8556 bb = gimple_bb (t);
8557 FOR_EACH_EDGE (e, ei, bb->succs)
8558 if ((e->flags & EDGE_FAKE) == 0)
8559 return true;
8560 }
8561
8562 if (gasm *asm_stmt = dyn_cast <gasm *> (t))
8563 if (gimple_asm_volatile_p (asm_stmt) || gimple_asm_input_p (asm_stmt))
8564 return true;
8565
8566 return false;
8567 }
8568
8569
8570 /* Add fake edges to the function exit for any non constant and non
8571 noreturn calls (or noreturn calls with EH/abnormal edges),
8572 volatile inline assembly in the bitmap of blocks specified by BLOCKS
8573 or to the whole CFG if BLOCKS is zero. Return the number of blocks
8574 that were split.
8575
8576 The goal is to expose cases in which entering a basic block does
8577 not imply that all subsequent instructions must be executed. */
8578
8579 static int
gimple_flow_call_edges_add(sbitmap blocks)8580 gimple_flow_call_edges_add (sbitmap blocks)
8581 {
8582 int i;
8583 int blocks_split = 0;
8584 int last_bb = last_basic_block_for_fn (cfun);
8585 bool check_last_block = false;
8586
8587 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
8588 return 0;
8589
8590 if (! blocks)
8591 check_last_block = true;
8592 else
8593 check_last_block = bitmap_bit_p (blocks,
8594 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
8595
8596 /* In the last basic block, before epilogue generation, there will be
8597 a fallthru edge to EXIT. Special care is required if the last insn
8598 of the last basic block is a call because make_edge folds duplicate
8599 edges, which would result in the fallthru edge also being marked
8600 fake, which would result in the fallthru edge being removed by
8601 remove_fake_edges, which would result in an invalid CFG.
8602
8603 Moreover, we can't elide the outgoing fake edge, since the block
8604 profiler needs to take this into account in order to solve the minimal
8605 spanning tree in the case that the call doesn't return.
8606
8607 Handle this by adding a dummy instruction in a new last basic block. */
8608 if (check_last_block)
8609 {
8610 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8611 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
8612 gimple *t = NULL;
8613
8614 if (!gsi_end_p (gsi))
8615 t = gsi_stmt (gsi);
8616
8617 if (t && stmt_can_terminate_bb_p (t))
8618 {
8619 edge e;
8620
8621 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
8622 if (e)
8623 {
8624 gsi_insert_on_edge (e, gimple_build_nop ());
8625 gsi_commit_edge_inserts ();
8626 }
8627 }
8628 }
8629
8630 /* Now add fake edges to the function exit for any non constant
8631 calls since there is no way that we can determine if they will
8632 return or not... */
8633 for (i = 0; i < last_bb; i++)
8634 {
8635 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8636 gimple_stmt_iterator gsi;
8637 gimple *stmt, *last_stmt;
8638
8639 if (!bb)
8640 continue;
8641
8642 if (blocks && !bitmap_bit_p (blocks, i))
8643 continue;
8644
8645 gsi = gsi_last_nondebug_bb (bb);
8646 if (!gsi_end_p (gsi))
8647 {
8648 last_stmt = gsi_stmt (gsi);
8649 do
8650 {
8651 stmt = gsi_stmt (gsi);
8652 if (stmt_can_terminate_bb_p (stmt))
8653 {
8654 edge e;
8655
8656 /* The handling above of the final block before the
8657 epilogue should be enough to verify that there is
8658 no edge to the exit block in CFG already.
8659 Calling make_edge in such case would cause us to
8660 mark that edge as fake and remove it later. */
8661 if (flag_checking && stmt == last_stmt)
8662 {
8663 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
8664 gcc_assert (e == NULL);
8665 }
8666
8667 /* Note that the following may create a new basic block
8668 and renumber the existing basic blocks. */
8669 if (stmt != last_stmt)
8670 {
8671 e = split_block (bb, stmt);
8672 if (e)
8673 blocks_split++;
8674 }
8675 e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
8676 e->probability = profile_probability::guessed_never ();
8677 }
8678 gsi_prev (&gsi);
8679 }
8680 while (!gsi_end_p (gsi));
8681 }
8682 }
8683
8684 if (blocks_split)
8685 checking_verify_flow_info ();
8686
8687 return blocks_split;
8688 }
8689
8690 /* Removes edge E and all the blocks dominated by it, and updates dominance
8691 information. The IL in E->src needs to be updated separately.
8692 If dominance info is not available, only the edge E is removed.*/
8693
8694 void
remove_edge_and_dominated_blocks(edge e)8695 remove_edge_and_dominated_blocks (edge e)
8696 {
8697 vec<basic_block> bbs_to_remove = vNULL;
8698 vec<basic_block> bbs_to_fix_dom = vNULL;
8699 edge f;
8700 edge_iterator ei;
8701 bool none_removed = false;
8702 unsigned i;
8703 basic_block bb, dbb;
8704 bitmap_iterator bi;
8705
8706 /* If we are removing a path inside a non-root loop that may change
8707 loop ownership of blocks or remove loops. Mark loops for fixup. */
8708 if (current_loops
8709 && loop_outer (e->src->loop_father) != NULL
8710 && e->src->loop_father == e->dest->loop_father)
8711 loops_state_set (LOOPS_NEED_FIXUP);
8712
8713 if (!dom_info_available_p (CDI_DOMINATORS))
8714 {
8715 remove_edge (e);
8716 return;
8717 }
8718
8719 /* No updating is needed for edges to exit. */
8720 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
8721 {
8722 if (cfgcleanup_altered_bbs)
8723 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
8724 remove_edge (e);
8725 return;
8726 }
8727
8728 /* First, we find the basic blocks to remove. If E->dest has a predecessor
8729 that is not dominated by E->dest, then this set is empty. Otherwise,
8730 all the basic blocks dominated by E->dest are removed.
8731
8732 Also, to DF_IDOM we store the immediate dominators of the blocks in
8733 the dominance frontier of E (i.e., of the successors of the
8734 removed blocks, if there are any, and of E->dest otherwise). */
8735 FOR_EACH_EDGE (f, ei, e->dest->preds)
8736 {
8737 if (f == e)
8738 continue;
8739
8740 if (!dominated_by_p (CDI_DOMINATORS, f->src, e->dest))
8741 {
8742 none_removed = true;
8743 break;
8744 }
8745 }
8746
8747 auto_bitmap df, df_idom;
8748 if (none_removed)
8749 bitmap_set_bit (df_idom,
8750 get_immediate_dominator (CDI_DOMINATORS, e->dest)->index);
8751 else
8752 {
8753 bbs_to_remove = get_all_dominated_blocks (CDI_DOMINATORS, e->dest);
8754 FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
8755 {
8756 FOR_EACH_EDGE (f, ei, bb->succs)
8757 {
8758 if (f->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
8759 bitmap_set_bit (df, f->dest->index);
8760 }
8761 }
8762 FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
8763 bitmap_clear_bit (df, bb->index);
8764
8765 EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
8766 {
8767 bb = BASIC_BLOCK_FOR_FN (cfun, i);
8768 bitmap_set_bit (df_idom,
8769 get_immediate_dominator (CDI_DOMINATORS, bb)->index);
8770 }
8771 }
8772
8773 if (cfgcleanup_altered_bbs)
8774 {
8775 /* Record the set of the altered basic blocks. */
8776 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
8777 bitmap_ior_into (cfgcleanup_altered_bbs, df);
8778 }
8779
8780 /* Remove E and the cancelled blocks. */
8781 if (none_removed)
8782 remove_edge (e);
8783 else
8784 {
8785 /* Walk backwards so as to get a chance to substitute all
8786 released DEFs into debug stmts. See
8787 eliminate_unnecessary_stmts() in tree-ssa-dce.c for more
8788 details. */
8789 for (i = bbs_to_remove.length (); i-- > 0; )
8790 delete_basic_block (bbs_to_remove[i]);
8791 }
8792
8793 /* Update the dominance information. The immediate dominator may change only
8794 for blocks whose immediate dominator belongs to DF_IDOM:
8795
8796 Suppose that idom(X) = Y before removal of E and idom(X) != Y after the
8797 removal. Let Z the arbitrary block such that idom(Z) = Y and
8798 Z dominates X after the removal. Before removal, there exists a path P
8799 from Y to X that avoids Z. Let F be the last edge on P that is
8800 removed, and let W = F->dest. Before removal, idom(W) = Y (since Y
8801 dominates W, and because of P, Z does not dominate W), and W belongs to
8802 the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
8803 EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
8804 {
8805 bb = BASIC_BLOCK_FOR_FN (cfun, i);
8806 for (dbb = first_dom_son (CDI_DOMINATORS, bb);
8807 dbb;
8808 dbb = next_dom_son (CDI_DOMINATORS, dbb))
8809 bbs_to_fix_dom.safe_push (dbb);
8810 }
8811
8812 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
8813
8814 bbs_to_remove.release ();
8815 bbs_to_fix_dom.release ();
8816 }
8817
8818 /* Purge dead EH edges from basic block BB. */
8819
8820 bool
gimple_purge_dead_eh_edges(basic_block bb)8821 gimple_purge_dead_eh_edges (basic_block bb)
8822 {
8823 bool changed = false;
8824 edge e;
8825 edge_iterator ei;
8826 gimple *stmt = last_stmt (bb);
8827
8828 if (stmt && stmt_can_throw_internal (cfun, stmt))
8829 return false;
8830
8831 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
8832 {
8833 if (e->flags & EDGE_EH)
8834 {
8835 remove_edge_and_dominated_blocks (e);
8836 changed = true;
8837 }
8838 else
8839 ei_next (&ei);
8840 }
8841
8842 return changed;
8843 }
8844
8845 /* Purge dead EH edges from basic block listed in BLOCKS. */
8846
8847 bool
gimple_purge_all_dead_eh_edges(const_bitmap blocks)8848 gimple_purge_all_dead_eh_edges (const_bitmap blocks)
8849 {
8850 bool changed = false;
8851 unsigned i;
8852 bitmap_iterator bi;
8853
8854 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
8855 {
8856 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8857
8858 /* Earlier gimple_purge_dead_eh_edges could have removed
8859 this basic block already. */
8860 gcc_assert (bb || changed);
8861 if (bb != NULL)
8862 changed |= gimple_purge_dead_eh_edges (bb);
8863 }
8864
8865 return changed;
8866 }
8867
8868 /* Purge dead abnormal call edges from basic block BB. */
8869
8870 bool
gimple_purge_dead_abnormal_call_edges(basic_block bb)8871 gimple_purge_dead_abnormal_call_edges (basic_block bb)
8872 {
8873 bool changed = false;
8874 edge e;
8875 edge_iterator ei;
8876 gimple *stmt = last_stmt (bb);
8877
8878 if (!cfun->has_nonlocal_label
8879 && !cfun->calls_setjmp)
8880 return false;
8881
8882 if (stmt && stmt_can_make_abnormal_goto (stmt))
8883 return false;
8884
8885 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
8886 {
8887 if (e->flags & EDGE_ABNORMAL)
8888 {
8889 if (e->flags & EDGE_FALLTHRU)
8890 e->flags &= ~EDGE_ABNORMAL;
8891 else
8892 remove_edge_and_dominated_blocks (e);
8893 changed = true;
8894 }
8895 else
8896 ei_next (&ei);
8897 }
8898
8899 return changed;
8900 }
8901
8902 /* Purge dead abnormal call edges from basic block listed in BLOCKS. */
8903
8904 bool
gimple_purge_all_dead_abnormal_call_edges(const_bitmap blocks)8905 gimple_purge_all_dead_abnormal_call_edges (const_bitmap blocks)
8906 {
8907 bool changed = false;
8908 unsigned i;
8909 bitmap_iterator bi;
8910
8911 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
8912 {
8913 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8914
8915 /* Earlier gimple_purge_dead_abnormal_call_edges could have removed
8916 this basic block already. */
8917 gcc_assert (bb || changed);
8918 if (bb != NULL)
8919 changed |= gimple_purge_dead_abnormal_call_edges (bb);
8920 }
8921
8922 return changed;
8923 }
8924
8925 /* This function is called whenever a new edge is created or
8926 redirected. */
8927
8928 static void
gimple_execute_on_growing_pred(edge e)8929 gimple_execute_on_growing_pred (edge e)
8930 {
8931 basic_block bb = e->dest;
8932
8933 if (!gimple_seq_empty_p (phi_nodes (bb)))
8934 reserve_phi_args_for_new_edge (bb);
8935 }
8936
8937 /* This function is called immediately before edge E is removed from
8938 the edge vector E->dest->preds. */
8939
8940 static void
gimple_execute_on_shrinking_pred(edge e)8941 gimple_execute_on_shrinking_pred (edge e)
8942 {
8943 if (!gimple_seq_empty_p (phi_nodes (e->dest)))
8944 remove_phi_args (e);
8945 }
8946
8947 /*---------------------------------------------------------------------------
8948 Helper functions for Loop versioning
8949 ---------------------------------------------------------------------------*/
8950
8951 /* Adjust phi nodes for 'first' basic block. 'second' basic block is a copy
8952 of 'first'. Both of them are dominated by 'new_head' basic block. When
8953 'new_head' was created by 'second's incoming edge it received phi arguments
8954 on the edge by split_edge(). Later, additional edge 'e' was created to
8955 connect 'new_head' and 'first'. Now this routine adds phi args on this
8956 additional edge 'e' that new_head to second edge received as part of edge
8957 splitting. */
8958
8959 static void
gimple_lv_adjust_loop_header_phi(basic_block first,basic_block second,basic_block new_head,edge e)8960 gimple_lv_adjust_loop_header_phi (basic_block first, basic_block second,
8961 basic_block new_head, edge e)
8962 {
8963 gphi *phi1, *phi2;
8964 gphi_iterator psi1, psi2;
8965 tree def;
8966 edge e2 = find_edge (new_head, second);
8967
8968 /* Because NEW_HEAD has been created by splitting SECOND's incoming
8969 edge, we should always have an edge from NEW_HEAD to SECOND. */
8970 gcc_assert (e2 != NULL);
8971
8972 /* Browse all 'second' basic block phi nodes and add phi args to
8973 edge 'e' for 'first' head. PHI args are always in correct order. */
8974
8975 for (psi2 = gsi_start_phis (second),
8976 psi1 = gsi_start_phis (first);
8977 !gsi_end_p (psi2) && !gsi_end_p (psi1);
8978 gsi_next (&psi2), gsi_next (&psi1))
8979 {
8980 phi1 = psi1.phi ();
8981 phi2 = psi2.phi ();
8982 def = PHI_ARG_DEF (phi2, e2->dest_idx);
8983 add_phi_arg (phi1, def, e, gimple_phi_arg_location_from_edge (phi2, e2));
8984 }
8985 }
8986
8987
8988 /* Adds a if else statement to COND_BB with condition COND_EXPR.
8989 SECOND_HEAD is the destination of the THEN and FIRST_HEAD is
8990 the destination of the ELSE part. */
8991
8992 static void
gimple_lv_add_condition_to_bb(basic_block first_head ATTRIBUTE_UNUSED,basic_block second_head ATTRIBUTE_UNUSED,basic_block cond_bb,void * cond_e)8993 gimple_lv_add_condition_to_bb (basic_block first_head ATTRIBUTE_UNUSED,
8994 basic_block second_head ATTRIBUTE_UNUSED,
8995 basic_block cond_bb, void *cond_e)
8996 {
8997 gimple_stmt_iterator gsi;
8998 gimple *new_cond_expr;
8999 tree cond_expr = (tree) cond_e;
9000 edge e0;
9001
9002 /* Build new conditional expr */
9003 new_cond_expr = gimple_build_cond_from_tree (cond_expr,
9004 NULL_TREE, NULL_TREE);
9005
9006 /* Add new cond in cond_bb. */
9007 gsi = gsi_last_bb (cond_bb);
9008 gsi_insert_after (&gsi, new_cond_expr, GSI_NEW_STMT);
9009
9010 /* Adjust edges appropriately to connect new head with first head
9011 as well as second head. */
9012 e0 = single_succ_edge (cond_bb);
9013 e0->flags &= ~EDGE_FALLTHRU;
9014 e0->flags |= EDGE_FALSE_VALUE;
9015 }
9016
9017
9018 /* Do book-keeping of basic block BB for the profile consistency checker.
9019 Store the counting in RECORD. */
9020 static void
gimple_account_profile_record(basic_block bb,struct profile_record * record)9021 gimple_account_profile_record (basic_block bb,
9022 struct profile_record *record)
9023 {
9024 gimple_stmt_iterator i;
9025 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
9026 {
9027 record->size
9028 += estimate_num_insns (gsi_stmt (i), &eni_size_weights);
9029 if (bb->count.initialized_p ())
9030 record->time
9031 += estimate_num_insns (gsi_stmt (i),
9032 &eni_time_weights) * bb->count.to_gcov_type ();
9033 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
9034 record->time
9035 += estimate_num_insns (gsi_stmt (i),
9036 &eni_time_weights) * bb->count.to_frequency (cfun);
9037 }
9038 }
9039
9040 struct cfg_hooks gimple_cfg_hooks = {
9041 "gimple",
9042 gimple_verify_flow_info,
9043 gimple_dump_bb, /* dump_bb */
9044 gimple_dump_bb_for_graph, /* dump_bb_for_graph */
9045 create_bb, /* create_basic_block */
9046 gimple_redirect_edge_and_branch, /* redirect_edge_and_branch */
9047 gimple_redirect_edge_and_branch_force, /* redirect_edge_and_branch_force */
9048 gimple_can_remove_branch_p, /* can_remove_branch_p */
9049 remove_bb, /* delete_basic_block */
9050 gimple_split_block, /* split_block */
9051 gimple_move_block_after, /* move_block_after */
9052 gimple_can_merge_blocks_p, /* can_merge_blocks_p */
9053 gimple_merge_blocks, /* merge_blocks */
9054 gimple_predict_edge, /* predict_edge */
9055 gimple_predicted_by_p, /* predicted_by_p */
9056 gimple_can_duplicate_bb_p, /* can_duplicate_block_p */
9057 gimple_duplicate_bb, /* duplicate_block */
9058 gimple_split_edge, /* split_edge */
9059 gimple_make_forwarder_block, /* make_forward_block */
9060 NULL, /* tidy_fallthru_edge */
9061 NULL, /* force_nonfallthru */
9062 gimple_block_ends_with_call_p,/* block_ends_with_call_p */
9063 gimple_block_ends_with_condjump_p, /* block_ends_with_condjump_p */
9064 gimple_flow_call_edges_add, /* flow_call_edges_add */
9065 gimple_execute_on_growing_pred, /* execute_on_growing_pred */
9066 gimple_execute_on_shrinking_pred, /* execute_on_shrinking_pred */
9067 gimple_duplicate_loop_to_header_edge, /* duplicate loop for trees */
9068 gimple_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
9069 gimple_lv_adjust_loop_header_phi, /* lv_adjust_loop_header_phi*/
9070 extract_true_false_edges_from_block, /* extract_cond_bb_edges */
9071 flush_pending_stmts, /* flush_pending_stmts */
9072 gimple_empty_block_p, /* block_empty_p */
9073 gimple_split_block_before_cond_jump, /* split_block_before_cond_jump */
9074 gimple_account_profile_record,
9075 };
9076
9077
9078 /* Split all critical edges. Split some extra (not necessarily critical) edges
9079 if FOR_EDGE_INSERTION_P is true. */
9080
9081 unsigned int
split_critical_edges(bool for_edge_insertion_p)9082 split_critical_edges (bool for_edge_insertion_p /* = false */)
9083 {
9084 basic_block bb;
9085 edge e;
9086 edge_iterator ei;
9087
9088 /* split_edge can redirect edges out of SWITCH_EXPRs, which can get
9089 expensive. So we want to enable recording of edge to CASE_LABEL_EXPR
9090 mappings around the calls to split_edge. */
9091 start_recording_case_labels ();
9092 FOR_ALL_BB_FN (bb, cfun)
9093 {
9094 FOR_EACH_EDGE (e, ei, bb->succs)
9095 {
9096 if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL))
9097 split_edge (e);
9098 /* PRE inserts statements to edges and expects that
9099 since split_critical_edges was done beforehand, committing edge
9100 insertions will not split more edges. In addition to critical
9101 edges we must split edges that have multiple successors and
9102 end by control flow statements, such as RESX.
9103 Go ahead and split them too. This matches the logic in
9104 gimple_find_edge_insert_loc. */
9105 else if (for_edge_insertion_p
9106 && (!single_pred_p (e->dest)
9107 || !gimple_seq_empty_p (phi_nodes (e->dest))
9108 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
9109 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
9110 && !(e->flags & EDGE_ABNORMAL))
9111 {
9112 gimple_stmt_iterator gsi;
9113
9114 gsi = gsi_last_bb (e->src);
9115 if (!gsi_end_p (gsi)
9116 && stmt_ends_bb_p (gsi_stmt (gsi))
9117 && (gimple_code (gsi_stmt (gsi)) != GIMPLE_RETURN
9118 && !gimple_call_builtin_p (gsi_stmt (gsi),
9119 BUILT_IN_RETURN)))
9120 split_edge (e);
9121 }
9122 }
9123 }
9124 end_recording_case_labels ();
9125 return 0;
9126 }
9127
9128 namespace {
9129
9130 const pass_data pass_data_split_crit_edges =
9131 {
9132 GIMPLE_PASS, /* type */
9133 "crited", /* name */
9134 OPTGROUP_NONE, /* optinfo_flags */
9135 TV_TREE_SPLIT_EDGES, /* tv_id */
9136 PROP_cfg, /* properties_required */
9137 PROP_no_crit_edges, /* properties_provided */
9138 0, /* properties_destroyed */
9139 0, /* todo_flags_start */
9140 0, /* todo_flags_finish */
9141 };
9142
9143 class pass_split_crit_edges : public gimple_opt_pass
9144 {
9145 public:
pass_split_crit_edges(gcc::context * ctxt)9146 pass_split_crit_edges (gcc::context *ctxt)
9147 : gimple_opt_pass (pass_data_split_crit_edges, ctxt)
9148 {}
9149
9150 /* opt_pass methods: */
execute(function *)9151 virtual unsigned int execute (function *) { return split_critical_edges (); }
9152
clone()9153 opt_pass * clone () { return new pass_split_crit_edges (m_ctxt); }
9154 }; // class pass_split_crit_edges
9155
9156 } // anon namespace
9157
9158 gimple_opt_pass *
make_pass_split_crit_edges(gcc::context * ctxt)9159 make_pass_split_crit_edges (gcc::context *ctxt)
9160 {
9161 return new pass_split_crit_edges (ctxt);
9162 }
9163
9164
9165 /* Insert COND expression which is GIMPLE_COND after STMT
9166 in basic block BB with appropriate basic block split
9167 and creation of a new conditionally executed basic block.
9168 Update profile so the new bb is visited with probability PROB.
9169 Return created basic block. */
9170 basic_block
insert_cond_bb(basic_block bb,gimple * stmt,gimple * cond,profile_probability prob)9171 insert_cond_bb (basic_block bb, gimple *stmt, gimple *cond,
9172 profile_probability prob)
9173 {
9174 edge fall = split_block (bb, stmt);
9175 gimple_stmt_iterator iter = gsi_last_bb (bb);
9176 basic_block new_bb;
9177
9178 /* Insert cond statement. */
9179 gcc_assert (gimple_code (cond) == GIMPLE_COND);
9180 if (gsi_end_p (iter))
9181 gsi_insert_before (&iter, cond, GSI_CONTINUE_LINKING);
9182 else
9183 gsi_insert_after (&iter, cond, GSI_CONTINUE_LINKING);
9184
9185 /* Create conditionally executed block. */
9186 new_bb = create_empty_bb (bb);
9187 edge e = make_edge (bb, new_bb, EDGE_TRUE_VALUE);
9188 e->probability = prob;
9189 new_bb->count = e->count ();
9190 make_single_succ_edge (new_bb, fall->dest, EDGE_FALLTHRU);
9191
9192 /* Fix edge for split bb. */
9193 fall->flags = EDGE_FALSE_VALUE;
9194 fall->probability -= e->probability;
9195
9196 /* Update dominance info. */
9197 if (dom_info_available_p (CDI_DOMINATORS))
9198 {
9199 set_immediate_dominator (CDI_DOMINATORS, new_bb, bb);
9200 set_immediate_dominator (CDI_DOMINATORS, fall->dest, bb);
9201 }
9202
9203 /* Update loop info. */
9204 if (current_loops)
9205 add_bb_to_loop (new_bb, bb->loop_father);
9206
9207 return new_bb;
9208 }
9209
9210 /* Build a ternary operation and gimplify it. Emit code before GSI.
9211 Return the gimple_val holding the result. */
9212
9213 tree
gimplify_build3(gimple_stmt_iterator * gsi,enum tree_code code,tree type,tree a,tree b,tree c)9214 gimplify_build3 (gimple_stmt_iterator *gsi, enum tree_code code,
9215 tree type, tree a, tree b, tree c)
9216 {
9217 tree ret;
9218 location_t loc = gimple_location (gsi_stmt (*gsi));
9219
9220 ret = fold_build3_loc (loc, code, type, a, b, c);
9221 return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
9222 GSI_SAME_STMT);
9223 }
9224
9225 /* Build a binary operation and gimplify it. Emit code before GSI.
9226 Return the gimple_val holding the result. */
9227
9228 tree
gimplify_build2(gimple_stmt_iterator * gsi,enum tree_code code,tree type,tree a,tree b)9229 gimplify_build2 (gimple_stmt_iterator *gsi, enum tree_code code,
9230 tree type, tree a, tree b)
9231 {
9232 tree ret;
9233
9234 ret = fold_build2_loc (gimple_location (gsi_stmt (*gsi)), code, type, a, b);
9235 return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
9236 GSI_SAME_STMT);
9237 }
9238
9239 /* Build a unary operation and gimplify it. Emit code before GSI.
9240 Return the gimple_val holding the result. */
9241
9242 tree
gimplify_build1(gimple_stmt_iterator * gsi,enum tree_code code,tree type,tree a)9243 gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type,
9244 tree a)
9245 {
9246 tree ret;
9247
9248 ret = fold_build1_loc (gimple_location (gsi_stmt (*gsi)), code, type, a);
9249 return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
9250 GSI_SAME_STMT);
9251 }
9252
9253
9254
9255 /* Given a basic block B which ends with a conditional and has
9256 precisely two successors, determine which of the edges is taken if
9257 the conditional is true and which is taken if the conditional is
9258 false. Set TRUE_EDGE and FALSE_EDGE appropriately. */
9259
9260 void
extract_true_false_edges_from_block(basic_block b,edge * true_edge,edge * false_edge)9261 extract_true_false_edges_from_block (basic_block b,
9262 edge *true_edge,
9263 edge *false_edge)
9264 {
9265 edge e = EDGE_SUCC (b, 0);
9266
9267 if (e->flags & EDGE_TRUE_VALUE)
9268 {
9269 *true_edge = e;
9270 *false_edge = EDGE_SUCC (b, 1);
9271 }
9272 else
9273 {
9274 *false_edge = e;
9275 *true_edge = EDGE_SUCC (b, 1);
9276 }
9277 }
9278
9279
9280 /* From a controlling predicate in the immediate dominator DOM of
9281 PHIBLOCK determine the edges into PHIBLOCK that are chosen if the
9282 predicate evaluates to true and false and store them to
9283 *TRUE_CONTROLLED_EDGE and *FALSE_CONTROLLED_EDGE if
9284 they are non-NULL. Returns true if the edges can be determined,
9285 else return false. */
9286
9287 bool
extract_true_false_controlled_edges(basic_block dom,basic_block phiblock,edge * true_controlled_edge,edge * false_controlled_edge)9288 extract_true_false_controlled_edges (basic_block dom, basic_block phiblock,
9289 edge *true_controlled_edge,
9290 edge *false_controlled_edge)
9291 {
9292 basic_block bb = phiblock;
9293 edge true_edge, false_edge, tem;
9294 edge e0 = NULL, e1 = NULL;
9295
9296 /* We have to verify that one edge into the PHI node is dominated
9297 by the true edge of the predicate block and the other edge
9298 dominated by the false edge. This ensures that the PHI argument
9299 we are going to take is completely determined by the path we
9300 take from the predicate block.
9301 We can only use BB dominance checks below if the destination of
9302 the true/false edges are dominated by their edge, thus only
9303 have a single predecessor. */
9304 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
9305 tem = EDGE_PRED (bb, 0);
9306 if (tem == true_edge
9307 || (single_pred_p (true_edge->dest)
9308 && (tem->src == true_edge->dest
9309 || dominated_by_p (CDI_DOMINATORS,
9310 tem->src, true_edge->dest))))
9311 e0 = tem;
9312 else if (tem == false_edge
9313 || (single_pred_p (false_edge->dest)
9314 && (tem->src == false_edge->dest
9315 || dominated_by_p (CDI_DOMINATORS,
9316 tem->src, false_edge->dest))))
9317 e1 = tem;
9318 else
9319 return false;
9320 tem = EDGE_PRED (bb, 1);
9321 if (tem == true_edge
9322 || (single_pred_p (true_edge->dest)
9323 && (tem->src == true_edge->dest
9324 || dominated_by_p (CDI_DOMINATORS,
9325 tem->src, true_edge->dest))))
9326 e0 = tem;
9327 else if (tem == false_edge
9328 || (single_pred_p (false_edge->dest)
9329 && (tem->src == false_edge->dest
9330 || dominated_by_p (CDI_DOMINATORS,
9331 tem->src, false_edge->dest))))
9332 e1 = tem;
9333 else
9334 return false;
9335 if (!e0 || !e1)
9336 return false;
9337
9338 if (true_controlled_edge)
9339 *true_controlled_edge = e0;
9340 if (false_controlled_edge)
9341 *false_controlled_edge = e1;
9342
9343 return true;
9344 }
9345
9346 /* Generate a range test LHS CODE RHS that determines whether INDEX is in the
9347 range [low, high]. Place associated stmts before *GSI. */
9348
9349 void
generate_range_test(basic_block bb,tree index,tree low,tree high,tree * lhs,tree * rhs)9350 generate_range_test (basic_block bb, tree index, tree low, tree high,
9351 tree *lhs, tree *rhs)
9352 {
9353 tree type = TREE_TYPE (index);
9354 tree utype = range_check_type (type);
9355
9356 low = fold_convert (utype, low);
9357 high = fold_convert (utype, high);
9358
9359 gimple_seq seq = NULL;
9360 index = gimple_convert (&seq, utype, index);
9361 *lhs = gimple_build (&seq, MINUS_EXPR, utype, index, low);
9362 *rhs = const_binop (MINUS_EXPR, utype, high, low);
9363
9364 gimple_stmt_iterator gsi = gsi_last_bb (bb);
9365 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
9366 }
9367
9368 /* Return the basic block that belongs to label numbered INDEX
9369 of a switch statement. */
9370
9371 basic_block
gimple_switch_label_bb(function * ifun,gswitch * gs,unsigned index)9372 gimple_switch_label_bb (function *ifun, gswitch *gs, unsigned index)
9373 {
9374 return label_to_block (ifun, CASE_LABEL (gimple_switch_label (gs, index)));
9375 }
9376
9377 /* Return the default basic block of a switch statement. */
9378
9379 basic_block
gimple_switch_default_bb(function * ifun,gswitch * gs)9380 gimple_switch_default_bb (function *ifun, gswitch *gs)
9381 {
9382 return gimple_switch_label_bb (ifun, gs, 0);
9383 }
9384
9385 /* Return the edge that belongs to label numbered INDEX
9386 of a switch statement. */
9387
9388 edge
gimple_switch_edge(function * ifun,gswitch * gs,unsigned index)9389 gimple_switch_edge (function *ifun, gswitch *gs, unsigned index)
9390 {
9391 return find_edge (gimple_bb (gs), gimple_switch_label_bb (ifun, gs, index));
9392 }
9393
9394 /* Return the default edge of a switch statement. */
9395
9396 edge
gimple_switch_default_edge(function * ifun,gswitch * gs)9397 gimple_switch_default_edge (function *ifun, gswitch *gs)
9398 {
9399 return gimple_switch_edge (ifun, gs, 0);
9400 }
9401
9402
9403 /* Emit return warnings. */
9404
9405 namespace {
9406
9407 const pass_data pass_data_warn_function_return =
9408 {
9409 GIMPLE_PASS, /* type */
9410 "*warn_function_return", /* name */
9411 OPTGROUP_NONE, /* optinfo_flags */
9412 TV_NONE, /* tv_id */
9413 PROP_cfg, /* properties_required */
9414 0, /* properties_provided */
9415 0, /* properties_destroyed */
9416 0, /* todo_flags_start */
9417 0, /* todo_flags_finish */
9418 };
9419
9420 class pass_warn_function_return : public gimple_opt_pass
9421 {
9422 public:
pass_warn_function_return(gcc::context * ctxt)9423 pass_warn_function_return (gcc::context *ctxt)
9424 : gimple_opt_pass (pass_data_warn_function_return, ctxt)
9425 {}
9426
9427 /* opt_pass methods: */
9428 virtual unsigned int execute (function *);
9429
9430 }; // class pass_warn_function_return
9431
9432 unsigned int
execute(function * fun)9433 pass_warn_function_return::execute (function *fun)
9434 {
9435 location_t location;
9436 gimple *last;
9437 edge e;
9438 edge_iterator ei;
9439
9440 if (!targetm.warn_func_return (fun->decl))
9441 return 0;
9442
9443 /* If we have a path to EXIT, then we do return. */
9444 if (TREE_THIS_VOLATILE (fun->decl)
9445 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) > 0)
9446 {
9447 location = UNKNOWN_LOCATION;
9448 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (fun)->preds);
9449 (e = ei_safe_edge (ei)); )
9450 {
9451 last = last_stmt (e->src);
9452 if ((gimple_code (last) == GIMPLE_RETURN
9453 || gimple_call_builtin_p (last, BUILT_IN_RETURN))
9454 && location == UNKNOWN_LOCATION
9455 && ((location = LOCATION_LOCUS (gimple_location (last)))
9456 != UNKNOWN_LOCATION)
9457 && !optimize)
9458 break;
9459 /* When optimizing, replace return stmts in noreturn functions
9460 with __builtin_unreachable () call. */
9461 if (optimize && gimple_code (last) == GIMPLE_RETURN)
9462 {
9463 tree fndecl = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
9464 gimple *new_stmt = gimple_build_call (fndecl, 0);
9465 gimple_set_location (new_stmt, gimple_location (last));
9466 gimple_stmt_iterator gsi = gsi_for_stmt (last);
9467 gsi_replace (&gsi, new_stmt, true);
9468 remove_edge (e);
9469 }
9470 else
9471 ei_next (&ei);
9472 }
9473 if (location == UNKNOWN_LOCATION)
9474 location = cfun->function_end_locus;
9475 warning_at (location, 0, "%<noreturn%> function does return");
9476 }
9477
9478 /* If we see "return;" in some basic block, then we do reach the end
9479 without returning a value. */
9480 else if (warn_return_type > 0
9481 && !TREE_NO_WARNING (fun->decl)
9482 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fun->decl))))
9483 {
9484 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (fun)->preds)
9485 {
9486 gimple *last = last_stmt (e->src);
9487 greturn *return_stmt = dyn_cast <greturn *> (last);
9488 if (return_stmt
9489 && gimple_return_retval (return_stmt) == NULL
9490 && !gimple_no_warning_p (last))
9491 {
9492 location = gimple_location (last);
9493 if (LOCATION_LOCUS (location) == UNKNOWN_LOCATION)
9494 location = fun->function_end_locus;
9495 if (warning_at (location, OPT_Wreturn_type,
9496 "control reaches end of non-void function"))
9497 TREE_NO_WARNING (fun->decl) = 1;
9498 break;
9499 }
9500 }
9501 /* The C++ FE turns fallthrough from the end of non-void function
9502 into __builtin_unreachable () call with BUILTINS_LOCATION.
9503 Recognize those too. */
9504 basic_block bb;
9505 if (!TREE_NO_WARNING (fun->decl))
9506 FOR_EACH_BB_FN (bb, fun)
9507 if (EDGE_COUNT (bb->succs) == 0)
9508 {
9509 gimple *last = last_stmt (bb);
9510 const enum built_in_function ubsan_missing_ret
9511 = BUILT_IN_UBSAN_HANDLE_MISSING_RETURN;
9512 if (last
9513 && ((LOCATION_LOCUS (gimple_location (last))
9514 == BUILTINS_LOCATION
9515 && gimple_call_builtin_p (last, BUILT_IN_UNREACHABLE))
9516 || gimple_call_builtin_p (last, ubsan_missing_ret)))
9517 {
9518 gimple_stmt_iterator gsi = gsi_for_stmt (last);
9519 gsi_prev_nondebug (&gsi);
9520 gimple *prev = gsi_stmt (gsi);
9521 if (prev == NULL)
9522 location = UNKNOWN_LOCATION;
9523 else
9524 location = gimple_location (prev);
9525 if (LOCATION_LOCUS (location) == UNKNOWN_LOCATION)
9526 location = fun->function_end_locus;
9527 if (warning_at (location, OPT_Wreturn_type,
9528 "control reaches end of non-void function"))
9529 TREE_NO_WARNING (fun->decl) = 1;
9530 break;
9531 }
9532 }
9533 }
9534 return 0;
9535 }
9536
9537 } // anon namespace
9538
9539 gimple_opt_pass *
make_pass_warn_function_return(gcc::context * ctxt)9540 make_pass_warn_function_return (gcc::context *ctxt)
9541 {
9542 return new pass_warn_function_return (ctxt);
9543 }
9544
9545 /* Walk a gimplified function and warn for functions whose return value is
9546 ignored and attribute((warn_unused_result)) is set. This is done before
9547 inlining, so we don't have to worry about that. */
9548
9549 static void
do_warn_unused_result(gimple_seq seq)9550 do_warn_unused_result (gimple_seq seq)
9551 {
9552 tree fdecl, ftype;
9553 gimple_stmt_iterator i;
9554
9555 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
9556 {
9557 gimple *g = gsi_stmt (i);
9558
9559 switch (gimple_code (g))
9560 {
9561 case GIMPLE_BIND:
9562 do_warn_unused_result (gimple_bind_body (as_a <gbind *>(g)));
9563 break;
9564 case GIMPLE_TRY:
9565 do_warn_unused_result (gimple_try_eval (g));
9566 do_warn_unused_result (gimple_try_cleanup (g));
9567 break;
9568 case GIMPLE_CATCH:
9569 do_warn_unused_result (gimple_catch_handler (
9570 as_a <gcatch *> (g)));
9571 break;
9572 case GIMPLE_EH_FILTER:
9573 do_warn_unused_result (gimple_eh_filter_failure (g));
9574 break;
9575
9576 case GIMPLE_CALL:
9577 if (gimple_call_lhs (g))
9578 break;
9579 if (gimple_call_internal_p (g))
9580 break;
9581
9582 /* This is a naked call, as opposed to a GIMPLE_CALL with an
9583 LHS. All calls whose value is ignored should be
9584 represented like this. Look for the attribute. */
9585 fdecl = gimple_call_fndecl (g);
9586 ftype = gimple_call_fntype (g);
9587
9588 if (lookup_attribute ("warn_unused_result", TYPE_ATTRIBUTES (ftype)))
9589 {
9590 location_t loc = gimple_location (g);
9591
9592 if (fdecl)
9593 warning_at (loc, OPT_Wunused_result,
9594 "ignoring return value of %qD "
9595 "declared with attribute %<warn_unused_result%>",
9596 fdecl);
9597 else
9598 warning_at (loc, OPT_Wunused_result,
9599 "ignoring return value of function "
9600 "declared with attribute %<warn_unused_result%>");
9601 }
9602 break;
9603
9604 default:
9605 /* Not a container, not a call, or a call whose value is used. */
9606 break;
9607 }
9608 }
9609 }
9610
9611 namespace {
9612
9613 const pass_data pass_data_warn_unused_result =
9614 {
9615 GIMPLE_PASS, /* type */
9616 "*warn_unused_result", /* name */
9617 OPTGROUP_NONE, /* optinfo_flags */
9618 TV_NONE, /* tv_id */
9619 PROP_gimple_any, /* properties_required */
9620 0, /* properties_provided */
9621 0, /* properties_destroyed */
9622 0, /* todo_flags_start */
9623 0, /* todo_flags_finish */
9624 };
9625
9626 class pass_warn_unused_result : public gimple_opt_pass
9627 {
9628 public:
pass_warn_unused_result(gcc::context * ctxt)9629 pass_warn_unused_result (gcc::context *ctxt)
9630 : gimple_opt_pass (pass_data_warn_unused_result, ctxt)
9631 {}
9632
9633 /* opt_pass methods: */
gate(function *)9634 virtual bool gate (function *) { return flag_warn_unused_result; }
execute(function *)9635 virtual unsigned int execute (function *)
9636 {
9637 do_warn_unused_result (gimple_body (current_function_decl));
9638 return 0;
9639 }
9640
9641 }; // class pass_warn_unused_result
9642
9643 } // anon namespace
9644
9645 gimple_opt_pass *
make_pass_warn_unused_result(gcc::context * ctxt)9646 make_pass_warn_unused_result (gcc::context *ctxt)
9647 {
9648 return new pass_warn_unused_result (ctxt);
9649 }
9650
9651 /* IPA passes, compilation of earlier functions or inlining
9652 might have changed some properties, such as marked functions nothrow,
9653 pure, const or noreturn.
9654 Remove redundant edges and basic blocks, and create new ones if necessary.
9655
9656 This pass can't be executed as stand alone pass from pass manager, because
9657 in between inlining and this fixup the verify_flow_info would fail. */
9658
9659 unsigned int
execute_fixup_cfg(void)9660 execute_fixup_cfg (void)
9661 {
9662 basic_block bb;
9663 gimple_stmt_iterator gsi;
9664 int todo = 0;
9665 cgraph_node *node = cgraph_node::get (current_function_decl);
9666 /* Same scaling is also done by ipa_merge_profiles. */
9667 profile_count num = node->count;
9668 profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
9669 bool scale = num.initialized_p () && !(num == den);
9670
9671 if (scale)
9672 {
9673 profile_count::adjust_for_ipa_scaling (&num, &den);
9674 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = node->count;
9675 EXIT_BLOCK_PTR_FOR_FN (cfun)->count
9676 = EXIT_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale (num, den);
9677 }
9678
9679 FOR_EACH_BB_FN (bb, cfun)
9680 {
9681 if (scale)
9682 bb->count = bb->count.apply_scale (num, den);
9683 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
9684 {
9685 gimple *stmt = gsi_stmt (gsi);
9686 tree decl = is_gimple_call (stmt)
9687 ? gimple_call_fndecl (stmt)
9688 : NULL;
9689 if (decl)
9690 {
9691 int flags = gimple_call_flags (stmt);
9692 if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
9693 {
9694 if (gimple_purge_dead_abnormal_call_edges (bb))
9695 todo |= TODO_cleanup_cfg;
9696
9697 if (gimple_in_ssa_p (cfun))
9698 {
9699 todo |= TODO_update_ssa | TODO_cleanup_cfg;
9700 update_stmt (stmt);
9701 }
9702 }
9703
9704 if (flags & ECF_NORETURN
9705 && fixup_noreturn_call (stmt))
9706 todo |= TODO_cleanup_cfg;
9707 }
9708
9709 /* Remove stores to variables we marked write-only.
9710 Keep access when store has side effect, i.e. in case when source
9711 is volatile. */
9712 if (gimple_store_p (stmt)
9713 && !gimple_has_side_effects (stmt)
9714 && !optimize_debug)
9715 {
9716 tree lhs = get_base_address (gimple_get_lhs (stmt));
9717
9718 if (VAR_P (lhs)
9719 && (TREE_STATIC (lhs) || DECL_EXTERNAL (lhs))
9720 && varpool_node::get (lhs)->writeonly)
9721 {
9722 unlink_stmt_vdef (stmt);
9723 gsi_remove (&gsi, true);
9724 release_defs (stmt);
9725 todo |= TODO_update_ssa | TODO_cleanup_cfg;
9726 continue;
9727 }
9728 }
9729 /* For calls we can simply remove LHS when it is known
9730 to be write-only. */
9731 if (is_gimple_call (stmt)
9732 && gimple_get_lhs (stmt))
9733 {
9734 tree lhs = get_base_address (gimple_get_lhs (stmt));
9735
9736 if (VAR_P (lhs)
9737 && (TREE_STATIC (lhs) || DECL_EXTERNAL (lhs))
9738 && varpool_node::get (lhs)->writeonly)
9739 {
9740 gimple_call_set_lhs (stmt, NULL);
9741 update_stmt (stmt);
9742 todo |= TODO_update_ssa | TODO_cleanup_cfg;
9743 }
9744 }
9745
9746 if (maybe_clean_eh_stmt (stmt)
9747 && gimple_purge_dead_eh_edges (bb))
9748 todo |= TODO_cleanup_cfg;
9749 gsi_next (&gsi);
9750 }
9751
9752 /* If we have a basic block with no successors that does not
9753 end with a control statement or a noreturn call end it with
9754 a call to __builtin_unreachable. This situation can occur
9755 when inlining a noreturn call that does in fact return. */
9756 if (EDGE_COUNT (bb->succs) == 0)
9757 {
9758 gimple *stmt = last_stmt (bb);
9759 if (!stmt
9760 || (!is_ctrl_stmt (stmt)
9761 && (!is_gimple_call (stmt)
9762 || !gimple_call_noreturn_p (stmt))))
9763 {
9764 if (stmt && is_gimple_call (stmt))
9765 gimple_call_set_ctrl_altering (stmt, false);
9766 tree fndecl = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
9767 stmt = gimple_build_call (fndecl, 0);
9768 gimple_stmt_iterator gsi = gsi_last_bb (bb);
9769 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
9770 if (!cfun->after_inlining)
9771 {
9772 gcall *call_stmt = dyn_cast <gcall *> (stmt);
9773 node->create_edge (cgraph_node::get_create (fndecl),
9774 call_stmt, bb->count);
9775 }
9776 }
9777 }
9778 }
9779 if (scale)
9780 {
9781 update_max_bb_count ();
9782 compute_function_frequency ();
9783 }
9784
9785 if (current_loops
9786 && (todo & TODO_cleanup_cfg))
9787 loops_state_set (LOOPS_NEED_FIXUP);
9788
9789 return todo;
9790 }
9791
9792 namespace {
9793
9794 const pass_data pass_data_fixup_cfg =
9795 {
9796 GIMPLE_PASS, /* type */
9797 "fixup_cfg", /* name */
9798 OPTGROUP_NONE, /* optinfo_flags */
9799 TV_NONE, /* tv_id */
9800 PROP_cfg, /* properties_required */
9801 0, /* properties_provided */
9802 0, /* properties_destroyed */
9803 0, /* todo_flags_start */
9804 0, /* todo_flags_finish */
9805 };
9806
9807 class pass_fixup_cfg : public gimple_opt_pass
9808 {
9809 public:
pass_fixup_cfg(gcc::context * ctxt)9810 pass_fixup_cfg (gcc::context *ctxt)
9811 : gimple_opt_pass (pass_data_fixup_cfg, ctxt)
9812 {}
9813
9814 /* opt_pass methods: */
clone()9815 opt_pass * clone () { return new pass_fixup_cfg (m_ctxt); }
execute(function *)9816 virtual unsigned int execute (function *) { return execute_fixup_cfg (); }
9817
9818 }; // class pass_fixup_cfg
9819
9820 } // anon namespace
9821
9822 gimple_opt_pass *
make_pass_fixup_cfg(gcc::context * ctxt)9823 make_pass_fixup_cfg (gcc::context *ctxt)
9824 {
9825 return new pass_fixup_cfg (ctxt);
9826 }
9827
9828 /* Garbage collection support for edge_def. */
9829
9830 extern void gt_ggc_mx (tree&);
9831 extern void gt_ggc_mx (gimple *&);
9832 extern void gt_ggc_mx (rtx&);
9833 extern void gt_ggc_mx (basic_block&);
9834
9835 static void
gt_ggc_mx(rtx_insn * & x)9836 gt_ggc_mx (rtx_insn *& x)
9837 {
9838 if (x)
9839 gt_ggc_mx_rtx_def ((void *) x);
9840 }
9841
9842 void
gt_ggc_mx(edge_def * e)9843 gt_ggc_mx (edge_def *e)
9844 {
9845 tree block = LOCATION_BLOCK (e->goto_locus);
9846 gt_ggc_mx (e->src);
9847 gt_ggc_mx (e->dest);
9848 if (current_ir_type () == IR_GIMPLE)
9849 gt_ggc_mx (e->insns.g);
9850 else
9851 gt_ggc_mx (e->insns.r);
9852 gt_ggc_mx (block);
9853 }
9854
9855 /* PCH support for edge_def. */
9856
9857 extern void gt_pch_nx (tree&);
9858 extern void gt_pch_nx (gimple *&);
9859 extern void gt_pch_nx (rtx&);
9860 extern void gt_pch_nx (basic_block&);
9861
9862 static void
gt_pch_nx(rtx_insn * & x)9863 gt_pch_nx (rtx_insn *& x)
9864 {
9865 if (x)
9866 gt_pch_nx_rtx_def ((void *) x);
9867 }
9868
9869 void
gt_pch_nx(edge_def * e)9870 gt_pch_nx (edge_def *e)
9871 {
9872 tree block = LOCATION_BLOCK (e->goto_locus);
9873 gt_pch_nx (e->src);
9874 gt_pch_nx (e->dest);
9875 if (current_ir_type () == IR_GIMPLE)
9876 gt_pch_nx (e->insns.g);
9877 else
9878 gt_pch_nx (e->insns.r);
9879 gt_pch_nx (block);
9880 }
9881
9882 void
gt_pch_nx(edge_def * e,gt_pointer_operator op,void * cookie)9883 gt_pch_nx (edge_def *e, gt_pointer_operator op, void *cookie)
9884 {
9885 tree block = LOCATION_BLOCK (e->goto_locus);
9886 op (&(e->src), cookie);
9887 op (&(e->dest), cookie);
9888 if (current_ir_type () == IR_GIMPLE)
9889 op (&(e->insns.g), cookie);
9890 else
9891 op (&(e->insns.r), cookie);
9892 op (&(block), cookie);
9893 }
9894
9895 #if CHECKING_P
9896
9897 namespace selftest {
9898
9899 /* Helper function for CFG selftests: create a dummy function decl
9900 and push it as cfun. */
9901
9902 static tree
push_fndecl(const char * name)9903 push_fndecl (const char *name)
9904 {
9905 tree fn_type = build_function_type_array (integer_type_node, 0, NULL);
9906 /* FIXME: this uses input_location: */
9907 tree fndecl = build_fn_decl (name, fn_type);
9908 tree retval = build_decl (UNKNOWN_LOCATION, RESULT_DECL,
9909 NULL_TREE, integer_type_node);
9910 DECL_RESULT (fndecl) = retval;
9911 push_struct_function (fndecl);
9912 function *fun = DECL_STRUCT_FUNCTION (fndecl);
9913 ASSERT_TRUE (fun != NULL);
9914 init_empty_tree_cfg_for_function (fun);
9915 ASSERT_EQ (2, n_basic_blocks_for_fn (fun));
9916 ASSERT_EQ (0, n_edges_for_fn (fun));
9917 return fndecl;
9918 }
9919
9920 /* These tests directly create CFGs.
9921 Compare with the static fns within tree-cfg.c:
9922 - build_gimple_cfg
9923 - make_blocks: calls create_basic_block (seq, bb);
9924 - make_edges. */
9925
9926 /* Verify a simple cfg of the form:
9927 ENTRY -> A -> B -> C -> EXIT. */
9928
9929 static void
test_linear_chain()9930 test_linear_chain ()
9931 {
9932 gimple_register_cfg_hooks ();
9933
9934 tree fndecl = push_fndecl ("cfg_test_linear_chain");
9935 function *fun = DECL_STRUCT_FUNCTION (fndecl);
9936
9937 /* Create some empty blocks. */
9938 basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
9939 basic_block bb_b = create_empty_bb (bb_a);
9940 basic_block bb_c = create_empty_bb (bb_b);
9941
9942 ASSERT_EQ (5, n_basic_blocks_for_fn (fun));
9943 ASSERT_EQ (0, n_edges_for_fn (fun));
9944
9945 /* Create some edges: a simple linear chain of BBs. */
9946 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
9947 make_edge (bb_a, bb_b, 0);
9948 make_edge (bb_b, bb_c, 0);
9949 make_edge (bb_c, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
9950
9951 /* Verify the edges. */
9952 ASSERT_EQ (4, n_edges_for_fn (fun));
9953 ASSERT_EQ (NULL, ENTRY_BLOCK_PTR_FOR_FN (fun)->preds);
9954 ASSERT_EQ (1, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs->length ());
9955 ASSERT_EQ (1, bb_a->preds->length ());
9956 ASSERT_EQ (1, bb_a->succs->length ());
9957 ASSERT_EQ (1, bb_b->preds->length ());
9958 ASSERT_EQ (1, bb_b->succs->length ());
9959 ASSERT_EQ (1, bb_c->preds->length ());
9960 ASSERT_EQ (1, bb_c->succs->length ());
9961 ASSERT_EQ (1, EXIT_BLOCK_PTR_FOR_FN (fun)->preds->length ());
9962 ASSERT_EQ (NULL, EXIT_BLOCK_PTR_FOR_FN (fun)->succs);
9963
9964 /* Verify the dominance information
9965 Each BB in our simple chain should be dominated by the one before
9966 it. */
9967 calculate_dominance_info (CDI_DOMINATORS);
9968 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
9969 ASSERT_EQ (bb_b, get_immediate_dominator (CDI_DOMINATORS, bb_c));
9970 vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
9971 ASSERT_EQ (1, dom_by_b.length ());
9972 ASSERT_EQ (bb_c, dom_by_b[0]);
9973 free_dominance_info (CDI_DOMINATORS);
9974 dom_by_b.release ();
9975
9976 /* Similarly for post-dominance: each BB in our chain is post-dominated
9977 by the one after it. */
9978 calculate_dominance_info (CDI_POST_DOMINATORS);
9979 ASSERT_EQ (bb_b, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
9980 ASSERT_EQ (bb_c, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
9981 vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
9982 ASSERT_EQ (1, postdom_by_b.length ());
9983 ASSERT_EQ (bb_a, postdom_by_b[0]);
9984 free_dominance_info (CDI_POST_DOMINATORS);
9985 postdom_by_b.release ();
9986
9987 pop_cfun ();
9988 }
9989
9990 /* Verify a simple CFG of the form:
9991 ENTRY
9992 |
9993 A
9994 / \
9995 /t \f
9996 B C
9997 \ /
9998 \ /
9999 D
10000 |
10001 EXIT. */
10002
10003 static void
test_diamond()10004 test_diamond ()
10005 {
10006 gimple_register_cfg_hooks ();
10007
10008 tree fndecl = push_fndecl ("cfg_test_diamond");
10009 function *fun = DECL_STRUCT_FUNCTION (fndecl);
10010
10011 /* Create some empty blocks. */
10012 basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
10013 basic_block bb_b = create_empty_bb (bb_a);
10014 basic_block bb_c = create_empty_bb (bb_a);
10015 basic_block bb_d = create_empty_bb (bb_b);
10016
10017 ASSERT_EQ (6, n_basic_blocks_for_fn (fun));
10018 ASSERT_EQ (0, n_edges_for_fn (fun));
10019
10020 /* Create the edges. */
10021 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
10022 make_edge (bb_a, bb_b, EDGE_TRUE_VALUE);
10023 make_edge (bb_a, bb_c, EDGE_FALSE_VALUE);
10024 make_edge (bb_b, bb_d, 0);
10025 make_edge (bb_c, bb_d, 0);
10026 make_edge (bb_d, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
10027
10028 /* Verify the edges. */
10029 ASSERT_EQ (6, n_edges_for_fn (fun));
10030 ASSERT_EQ (1, bb_a->preds->length ());
10031 ASSERT_EQ (2, bb_a->succs->length ());
10032 ASSERT_EQ (1, bb_b->preds->length ());
10033 ASSERT_EQ (1, bb_b->succs->length ());
10034 ASSERT_EQ (1, bb_c->preds->length ());
10035 ASSERT_EQ (1, bb_c->succs->length ());
10036 ASSERT_EQ (2, bb_d->preds->length ());
10037 ASSERT_EQ (1, bb_d->succs->length ());
10038
10039 /* Verify the dominance information. */
10040 calculate_dominance_info (CDI_DOMINATORS);
10041 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
10042 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_c));
10043 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_d));
10044 vec<basic_block> dom_by_a = get_dominated_by (CDI_DOMINATORS, bb_a);
10045 ASSERT_EQ (3, dom_by_a.length ()); /* B, C, D, in some order. */
10046 dom_by_a.release ();
10047 vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
10048 ASSERT_EQ (0, dom_by_b.length ());
10049 dom_by_b.release ();
10050 free_dominance_info (CDI_DOMINATORS);
10051
10052 /* Similarly for post-dominance. */
10053 calculate_dominance_info (CDI_POST_DOMINATORS);
10054 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
10055 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
10056 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_c));
10057 vec<basic_block> postdom_by_d = get_dominated_by (CDI_POST_DOMINATORS, bb_d);
10058 ASSERT_EQ (3, postdom_by_d.length ()); /* A, B, C in some order. */
10059 postdom_by_d.release ();
10060 vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
10061 ASSERT_EQ (0, postdom_by_b.length ());
10062 postdom_by_b.release ();
10063 free_dominance_info (CDI_POST_DOMINATORS);
10064
10065 pop_cfun ();
10066 }
10067
10068 /* Verify that we can handle a CFG containing a "complete" aka
10069 fully-connected subgraph (where A B C D below all have edges
10070 pointing to each other node, also to themselves).
10071 e.g.:
10072 ENTRY EXIT
10073 | ^
10074 | /
10075 | /
10076 | /
10077 V/
10078 A<--->B
10079 ^^ ^^
10080 | \ / |
10081 | X |
10082 | / \ |
10083 VV VV
10084 C<--->D
10085 */
10086
10087 static void
test_fully_connected()10088 test_fully_connected ()
10089 {
10090 gimple_register_cfg_hooks ();
10091
10092 tree fndecl = push_fndecl ("cfg_fully_connected");
10093 function *fun = DECL_STRUCT_FUNCTION (fndecl);
10094
10095 const int n = 4;
10096
10097 /* Create some empty blocks. */
10098 auto_vec <basic_block> subgraph_nodes;
10099 for (int i = 0; i < n; i++)
10100 subgraph_nodes.safe_push (create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun)));
10101
10102 ASSERT_EQ (n + 2, n_basic_blocks_for_fn (fun));
10103 ASSERT_EQ (0, n_edges_for_fn (fun));
10104
10105 /* Create the edges. */
10106 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), subgraph_nodes[0], EDGE_FALLTHRU);
10107 make_edge (subgraph_nodes[0], EXIT_BLOCK_PTR_FOR_FN (fun), 0);
10108 for (int i = 0; i < n; i++)
10109 for (int j = 0; j < n; j++)
10110 make_edge (subgraph_nodes[i], subgraph_nodes[j], 0);
10111
10112 /* Verify the edges. */
10113 ASSERT_EQ (2 + (n * n), n_edges_for_fn (fun));
10114 /* The first one is linked to ENTRY/EXIT as well as itself and
10115 everything else. */
10116 ASSERT_EQ (n + 1, subgraph_nodes[0]->preds->length ());
10117 ASSERT_EQ (n + 1, subgraph_nodes[0]->succs->length ());
10118 /* The other ones in the subgraph are linked to everything in
10119 the subgraph (including themselves). */
10120 for (int i = 1; i < n; i++)
10121 {
10122 ASSERT_EQ (n, subgraph_nodes[i]->preds->length ());
10123 ASSERT_EQ (n, subgraph_nodes[i]->succs->length ());
10124 }
10125
10126 /* Verify the dominance information. */
10127 calculate_dominance_info (CDI_DOMINATORS);
10128 /* The initial block in the subgraph should be dominated by ENTRY. */
10129 ASSERT_EQ (ENTRY_BLOCK_PTR_FOR_FN (fun),
10130 get_immediate_dominator (CDI_DOMINATORS,
10131 subgraph_nodes[0]));
10132 /* Every other block in the subgraph should be dominated by the
10133 initial block. */
10134 for (int i = 1; i < n; i++)
10135 ASSERT_EQ (subgraph_nodes[0],
10136 get_immediate_dominator (CDI_DOMINATORS,
10137 subgraph_nodes[i]));
10138 free_dominance_info (CDI_DOMINATORS);
10139
10140 /* Similarly for post-dominance. */
10141 calculate_dominance_info (CDI_POST_DOMINATORS);
10142 /* The initial block in the subgraph should be postdominated by EXIT. */
10143 ASSERT_EQ (EXIT_BLOCK_PTR_FOR_FN (fun),
10144 get_immediate_dominator (CDI_POST_DOMINATORS,
10145 subgraph_nodes[0]));
10146 /* Every other block in the subgraph should be postdominated by the
10147 initial block, since that leads to EXIT. */
10148 for (int i = 1; i < n; i++)
10149 ASSERT_EQ (subgraph_nodes[0],
10150 get_immediate_dominator (CDI_POST_DOMINATORS,
10151 subgraph_nodes[i]));
10152 free_dominance_info (CDI_POST_DOMINATORS);
10153
10154 pop_cfun ();
10155 }
10156
10157 /* Run all of the selftests within this file. */
10158
10159 void
tree_cfg_c_tests()10160 tree_cfg_c_tests ()
10161 {
10162 test_linear_chain ();
10163 test_diamond ();
10164 test_fully_connected ();
10165 }
10166
10167 } // namespace selftest
10168
10169 /* TODO: test the dominator/postdominator logic with various graphs/nodes:
10170 - loop
10171 - nested loops
10172 - switch statement (a block with many out-edges)
10173 - something that jumps to itself
10174 - etc */
10175
10176 #endif /* CHECKING_P */
10177