1 /* Generic SSA value propagation engine.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "gimple-pretty-print.h"
29 #include "dumpfile.h"
30 #include "gimple-fold.h"
31 #include "tree-eh.h"
32 #include "gimplify.h"
33 #include "gimple-iterator.h"
34 #include "tree-cfg.h"
35 #include "tree-ssa.h"
36 #include "tree-ssa-propagate.h"
37 #include "domwalk.h"
38 #include "cfgloop.h"
39 #include "tree-cfgcleanup.h"
40 #include "cfganal.h"
41
42 /* This file implements a generic value propagation engine based on
43 the same propagation used by the SSA-CCP algorithm [1].
44
45 Propagation is performed by simulating the execution of every
46 statement that produces the value being propagated. Simulation
47 proceeds as follows:
48
49 1- Initially, all edges of the CFG are marked not executable and
50 the CFG worklist is seeded with all the statements in the entry
51 basic block (block 0).
52
53 2- Every statement S is simulated with a call to the call-back
54 function SSA_PROP_VISIT_STMT. This evaluation may produce 3
55 results:
56
57 SSA_PROP_NOT_INTERESTING: Statement S produces nothing of
58 interest and does not affect any of the work lists.
59 The statement may be simulated again if any of its input
60 operands change in future iterations of the simulator.
61
62 SSA_PROP_VARYING: The value produced by S cannot be determined
63 at compile time. Further simulation of S is not required.
64 If S is a conditional jump, all the outgoing edges for the
65 block are considered executable and added to the work
66 list.
67
68 SSA_PROP_INTERESTING: S produces a value that can be computed
69 at compile time. Its result can be propagated into the
70 statements that feed from S. Furthermore, if S is a
71 conditional jump, only the edge known to be taken is added
72 to the work list. Edges that are known not to execute are
73 never simulated.
74
75 3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI. The
76 return value from SSA_PROP_VISIT_PHI has the same semantics as
77 described in #2.
78
79 4- Three work lists are kept. Statements are only added to these
80 lists if they produce one of SSA_PROP_INTERESTING or
81 SSA_PROP_VARYING.
82
83 CFG_BLOCKS contains the list of blocks to be simulated.
84 Blocks are added to this list if their incoming edges are
85 found executable.
86
87 SSA_EDGE_WORKLIST contains the list of statements that we
88 need to revisit.
89
90 5- Simulation terminates when all three work lists are drained.
91
92 Before calling ssa_propagate, it is important to clear
93 prop_simulate_again_p for all the statements in the program that
94 should be simulated. This initialization allows an implementation
95 to specify which statements should never be simulated.
96
97 It is also important to compute def-use information before calling
98 ssa_propagate.
99
100 References:
101
102 [1] Constant propagation with conditional branches,
103 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
104
105 [2] Building an Optimizing Compiler,
106 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
107
108 [3] Advanced Compiler Design and Implementation,
109 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
110
111 /* Function pointers used to parameterize the propagation engine. */
112 static ssa_prop_visit_stmt_fn ssa_prop_visit_stmt;
113 static ssa_prop_visit_phi_fn ssa_prop_visit_phi;
114
115 /* Worklist of control flow edge destinations. This contains
116 the CFG order number of the blocks so we can iterate in CFG
117 order by visiting in bit-order. */
118 static bitmap cfg_blocks;
119 static int *bb_to_cfg_order;
120 static int *cfg_order_to_bb;
121
122 /* Worklist of SSA edges which will need reexamination as their
123 definition has changed. SSA edges are def-use edges in the SSA
124 web. For each D-U edge, we store the target statement or PHI node
125 UID in a bitmap. UIDs order stmts in execution order. */
126 static bitmap ssa_edge_worklist;
127 static vec<gimple *> uid_to_stmt;
128
129 /* Return true if the block worklist empty. */
130
131 static inline bool
cfg_blocks_empty_p(void)132 cfg_blocks_empty_p (void)
133 {
134 return bitmap_empty_p (cfg_blocks);
135 }
136
137
138 /* Add a basic block to the worklist. The block must not be the ENTRY
139 or EXIT block. */
140
141 static void
cfg_blocks_add(basic_block bb)142 cfg_blocks_add (basic_block bb)
143 {
144 gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
145 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
146 bitmap_set_bit (cfg_blocks, bb_to_cfg_order[bb->index]);
147 }
148
149
150 /* Remove a block from the worklist. */
151
152 static basic_block
cfg_blocks_get(void)153 cfg_blocks_get (void)
154 {
155 gcc_assert (!cfg_blocks_empty_p ());
156 int order_index = bitmap_first_set_bit (cfg_blocks);
157 bitmap_clear_bit (cfg_blocks, order_index);
158 return BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [order_index]);
159 }
160
161
162 /* We have just defined a new value for VAR. If IS_VARYING is true,
163 add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add
164 them to INTERESTING_SSA_EDGES. */
165
166 static void
add_ssa_edge(tree var)167 add_ssa_edge (tree var)
168 {
169 imm_use_iterator iter;
170 use_operand_p use_p;
171
172 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
173 {
174 gimple *use_stmt = USE_STMT (use_p);
175
176 /* If we did not yet simulate the block wait for this to happen
177 and do not add the stmt to the SSA edge worklist. */
178 if (! (gimple_bb (use_stmt)->flags & BB_VISITED))
179 continue;
180
181 if (prop_simulate_again_p (use_stmt)
182 && bitmap_set_bit (ssa_edge_worklist, gimple_uid (use_stmt)))
183 {
184 uid_to_stmt[gimple_uid (use_stmt)] = use_stmt;
185 if (dump_file && (dump_flags & TDF_DETAILS))
186 {
187 fprintf (dump_file, "ssa_edge_worklist: adding SSA use in ");
188 print_gimple_stmt (dump_file, use_stmt, 0, TDF_SLIM);
189 }
190 }
191 }
192 }
193
194
195 /* Add edge E to the control flow worklist. */
196
197 static void
add_control_edge(edge e)198 add_control_edge (edge e)
199 {
200 basic_block bb = e->dest;
201 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
202 return;
203
204 /* If the edge had already been executed, skip it. */
205 if (e->flags & EDGE_EXECUTABLE)
206 return;
207
208 e->flags |= EDGE_EXECUTABLE;
209
210 cfg_blocks_add (bb);
211
212 if (dump_file && (dump_flags & TDF_DETAILS))
213 fprintf (dump_file, "Adding destination of edge (%d -> %d) to worklist\n",
214 e->src->index, e->dest->index);
215 }
216
217
218 /* Simulate the execution of STMT and update the work lists accordingly. */
219
220 static void
simulate_stmt(gimple * stmt)221 simulate_stmt (gimple *stmt)
222 {
223 enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING;
224 edge taken_edge = NULL;
225 tree output_name = NULL_TREE;
226
227 /* Pull the stmt off the SSA edge worklist. */
228 bitmap_clear_bit (ssa_edge_worklist, gimple_uid (stmt));
229
230 /* Don't bother visiting statements that are already
231 considered varying by the propagator. */
232 if (!prop_simulate_again_p (stmt))
233 return;
234
235 if (gimple_code (stmt) == GIMPLE_PHI)
236 {
237 val = ssa_prop_visit_phi (as_a <gphi *> (stmt));
238 output_name = gimple_phi_result (stmt);
239 }
240 else
241 val = ssa_prop_visit_stmt (stmt, &taken_edge, &output_name);
242
243 if (val == SSA_PROP_VARYING)
244 {
245 prop_set_simulate_again (stmt, false);
246
247 /* If the statement produced a new varying value, add the SSA
248 edges coming out of OUTPUT_NAME. */
249 if (output_name)
250 add_ssa_edge (output_name);
251
252 /* If STMT transfers control out of its basic block, add
253 all outgoing edges to the work list. */
254 if (stmt_ends_bb_p (stmt))
255 {
256 edge e;
257 edge_iterator ei;
258 basic_block bb = gimple_bb (stmt);
259 FOR_EACH_EDGE (e, ei, bb->succs)
260 add_control_edge (e);
261 }
262 return;
263 }
264 else if (val == SSA_PROP_INTERESTING)
265 {
266 /* If the statement produced new value, add the SSA edges coming
267 out of OUTPUT_NAME. */
268 if (output_name)
269 add_ssa_edge (output_name);
270
271 /* If we know which edge is going to be taken out of this block,
272 add it to the CFG work list. */
273 if (taken_edge)
274 add_control_edge (taken_edge);
275 }
276
277 /* If there are no SSA uses on the stmt whose defs are simulated
278 again then this stmt will be never visited again. */
279 bool has_simulate_again_uses = false;
280 use_operand_p use_p;
281 ssa_op_iter iter;
282 if (gimple_code (stmt) == GIMPLE_PHI)
283 {
284 edge_iterator ei;
285 edge e;
286 tree arg;
287 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
288 if (!(e->flags & EDGE_EXECUTABLE)
289 || ((arg = PHI_ARG_DEF_FROM_EDGE (stmt, e))
290 && TREE_CODE (arg) == SSA_NAME
291 && !SSA_NAME_IS_DEFAULT_DEF (arg)
292 && prop_simulate_again_p (SSA_NAME_DEF_STMT (arg))))
293 {
294 has_simulate_again_uses = true;
295 break;
296 }
297 }
298 else
299 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
300 {
301 gimple *def_stmt = SSA_NAME_DEF_STMT (USE_FROM_PTR (use_p));
302 if (!gimple_nop_p (def_stmt)
303 && prop_simulate_again_p (def_stmt))
304 {
305 has_simulate_again_uses = true;
306 break;
307 }
308 }
309 if (!has_simulate_again_uses)
310 {
311 if (dump_file && (dump_flags & TDF_DETAILS))
312 fprintf (dump_file, "marking stmt to be not simulated again\n");
313 prop_set_simulate_again (stmt, false);
314 }
315 }
316
317 /* Process an SSA edge worklist. WORKLIST is the SSA edge worklist to
318 drain. This pops statements off the given WORKLIST and processes
319 them until one statement was simulated or there are no more statements
320 on WORKLIST. We take a pointer to WORKLIST because it may be reallocated
321 when an SSA edge is added to it in simulate_stmt. Return true if a stmt
322 was simulated. */
323
324 static void
process_ssa_edge_worklist()325 process_ssa_edge_worklist ()
326 {
327 /* Process the next entry from the worklist. */
328 unsigned stmt_uid = bitmap_first_set_bit (ssa_edge_worklist);
329 bitmap_clear_bit (ssa_edge_worklist, stmt_uid);
330 gimple *stmt = uid_to_stmt[stmt_uid];
331
332 /* We should not have stmts in not yet simulated BBs on the worklist. */
333 gcc_assert (gimple_bb (stmt)->flags & BB_VISITED);
334
335 if (dump_file && (dump_flags & TDF_DETAILS))
336 {
337 fprintf (dump_file, "\nSimulating statement: ");
338 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
339 }
340
341 simulate_stmt (stmt);
342 }
343
344
345 /* Simulate the execution of BLOCK. Evaluate the statement associated
346 with each variable reference inside the block. */
347
348 static void
simulate_block(basic_block block)349 simulate_block (basic_block block)
350 {
351 gimple_stmt_iterator gsi;
352
353 /* There is nothing to do for the exit block. */
354 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
355 return;
356
357 if (dump_file && (dump_flags & TDF_DETAILS))
358 fprintf (dump_file, "\nSimulating block %d\n", block->index);
359
360 /* Always simulate PHI nodes, even if we have simulated this block
361 before. */
362 for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi))
363 simulate_stmt (gsi_stmt (gsi));
364
365 /* If this is the first time we've simulated this block, then we
366 must simulate each of its statements. */
367 if (! (block->flags & BB_VISITED))
368 {
369 gimple_stmt_iterator j;
370 unsigned int normal_edge_count;
371 edge e, normal_edge;
372 edge_iterator ei;
373
374 for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j))
375 simulate_stmt (gsi_stmt (j));
376
377 /* Note that we have simulated this block. */
378 block->flags |= BB_VISITED;
379
380 /* We can not predict when abnormal and EH edges will be executed, so
381 once a block is considered executable, we consider any
382 outgoing abnormal edges as executable.
383
384 TODO: This is not exactly true. Simplifying statement might
385 prove it non-throwing and also computed goto can be handled
386 when destination is known.
387
388 At the same time, if this block has only one successor that is
389 reached by non-abnormal edges, then add that successor to the
390 worklist. */
391 normal_edge_count = 0;
392 normal_edge = NULL;
393 FOR_EACH_EDGE (e, ei, block->succs)
394 {
395 if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
396 add_control_edge (e);
397 else
398 {
399 normal_edge_count++;
400 normal_edge = e;
401 }
402 }
403
404 if (normal_edge_count == 1)
405 add_control_edge (normal_edge);
406 }
407 }
408
409
410 /* Initialize local data structures and work lists. */
411
412 static void
ssa_prop_init(void)413 ssa_prop_init (void)
414 {
415 edge e;
416 edge_iterator ei;
417 basic_block bb;
418
419 /* Worklists of SSA edges. */
420 ssa_edge_worklist = BITMAP_ALLOC (NULL);
421
422 /* Worklist of basic-blocks. */
423 bb_to_cfg_order = XNEWVEC (int, last_basic_block_for_fn (cfun) + 1);
424 cfg_order_to_bb = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
425 int n = pre_and_rev_post_order_compute_fn (cfun, NULL,
426 cfg_order_to_bb, false);
427 for (int i = 0; i < n; ++i)
428 bb_to_cfg_order[cfg_order_to_bb[i]] = i;
429 cfg_blocks = BITMAP_ALLOC (NULL);
430
431 if (dump_file && (dump_flags & TDF_DETAILS))
432 dump_immediate_uses (dump_file);
433
434 /* Initially assume that every edge in the CFG is not executable.
435 (including the edges coming out of the entry block). Mark blocks
436 as not visited, blocks not yet visited will have all their statements
437 simulated once an incoming edge gets executable. */
438 set_gimple_stmt_max_uid (cfun, 0);
439 for (int i = 0; i < n; ++i)
440 {
441 gimple_stmt_iterator si;
442 bb = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb[i]);
443
444 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
445 {
446 gimple *stmt = gsi_stmt (si);
447 gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
448 }
449
450 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
451 {
452 gimple *stmt = gsi_stmt (si);
453 gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
454 }
455
456 bb->flags &= ~BB_VISITED;
457 FOR_EACH_EDGE (e, ei, bb->succs)
458 e->flags &= ~EDGE_EXECUTABLE;
459 }
460 uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun));
461
462 /* Seed the algorithm by adding the successors of the entry block to the
463 edge worklist. */
464 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
465 {
466 e->flags &= ~EDGE_EXECUTABLE;
467 add_control_edge (e);
468 }
469 }
470
471
472 /* Free allocated storage. */
473
474 static void
ssa_prop_fini(void)475 ssa_prop_fini (void)
476 {
477 BITMAP_FREE (cfg_blocks);
478 free (bb_to_cfg_order);
479 free (cfg_order_to_bb);
480 BITMAP_FREE (ssa_edge_worklist);
481 uid_to_stmt.release ();
482 basic_block bb;
483 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
484 bb->flags &= ~BB_VISITED;
485 }
486
487
488 /* Return true if EXPR is an acceptable right-hand-side for a
489 GIMPLE assignment. We validate the entire tree, not just
490 the root node, thus catching expressions that embed complex
491 operands that are not permitted in GIMPLE. This function
492 is needed because the folding routines in fold-const.c
493 may return such expressions in some cases, e.g., an array
494 access with an embedded index addition. It may make more
495 sense to have folding routines that are sensitive to the
496 constraints on GIMPLE operands, rather than abandoning any
497 any attempt to fold if the usual folding turns out to be too
498 aggressive. */
499
500 bool
valid_gimple_rhs_p(tree expr)501 valid_gimple_rhs_p (tree expr)
502 {
503 enum tree_code code = TREE_CODE (expr);
504
505 switch (TREE_CODE_CLASS (code))
506 {
507 case tcc_declaration:
508 if (!is_gimple_variable (expr))
509 return false;
510 break;
511
512 case tcc_constant:
513 /* All constants are ok. */
514 break;
515
516 case tcc_comparison:
517 /* GENERIC allows comparisons with non-boolean types, reject
518 those for GIMPLE. Let vector-typed comparisons pass - rules
519 for GENERIC and GIMPLE are the same here. */
520 if (!(INTEGRAL_TYPE_P (TREE_TYPE (expr))
521 && (TREE_CODE (TREE_TYPE (expr)) == BOOLEAN_TYPE
522 || TYPE_PRECISION (TREE_TYPE (expr)) == 1))
523 && ! VECTOR_TYPE_P (TREE_TYPE (expr)))
524 return false;
525
526 /* Fallthru. */
527 case tcc_binary:
528 if (!is_gimple_val (TREE_OPERAND (expr, 0))
529 || !is_gimple_val (TREE_OPERAND (expr, 1)))
530 return false;
531 break;
532
533 case tcc_unary:
534 if (!is_gimple_val (TREE_OPERAND (expr, 0)))
535 return false;
536 break;
537
538 case tcc_expression:
539 switch (code)
540 {
541 case ADDR_EXPR:
542 {
543 tree t;
544 if (is_gimple_min_invariant (expr))
545 return true;
546 t = TREE_OPERAND (expr, 0);
547 while (handled_component_p (t))
548 {
549 /* ??? More checks needed, see the GIMPLE verifier. */
550 if ((TREE_CODE (t) == ARRAY_REF
551 || TREE_CODE (t) == ARRAY_RANGE_REF)
552 && !is_gimple_val (TREE_OPERAND (t, 1)))
553 return false;
554 t = TREE_OPERAND (t, 0);
555 }
556 if (!is_gimple_id (t))
557 return false;
558 }
559 break;
560
561 default:
562 if (get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS)
563 {
564 if (((code == VEC_COND_EXPR || code == COND_EXPR)
565 ? !is_gimple_condexpr (TREE_OPERAND (expr, 0))
566 : !is_gimple_val (TREE_OPERAND (expr, 0)))
567 || !is_gimple_val (TREE_OPERAND (expr, 1))
568 || !is_gimple_val (TREE_OPERAND (expr, 2)))
569 return false;
570 break;
571 }
572 return false;
573 }
574 break;
575
576 case tcc_vl_exp:
577 return false;
578
579 case tcc_exceptional:
580 if (code == CONSTRUCTOR)
581 {
582 unsigned i;
583 tree elt;
584 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt)
585 if (!is_gimple_val (elt))
586 return false;
587 return true;
588 }
589 if (code != SSA_NAME)
590 return false;
591 break;
592
593 case tcc_reference:
594 if (code == BIT_FIELD_REF)
595 return is_gimple_val (TREE_OPERAND (expr, 0));
596 return false;
597
598 default:
599 return false;
600 }
601
602 return true;
603 }
604
605
606 /* Return true if EXPR is a CALL_EXPR suitable for representation
607 as a single GIMPLE_CALL statement. If the arguments require
608 further gimplification, return false. */
609
610 static bool
valid_gimple_call_p(tree expr)611 valid_gimple_call_p (tree expr)
612 {
613 unsigned i, nargs;
614
615 if (TREE_CODE (expr) != CALL_EXPR)
616 return false;
617
618 nargs = call_expr_nargs (expr);
619 for (i = 0; i < nargs; i++)
620 {
621 tree arg = CALL_EXPR_ARG (expr, i);
622 if (is_gimple_reg_type (TREE_TYPE (arg)))
623 {
624 if (!is_gimple_val (arg))
625 return false;
626 }
627 else
628 if (!is_gimple_lvalue (arg))
629 return false;
630 }
631
632 return true;
633 }
634
635
636 /* Make SSA names defined by OLD_STMT point to NEW_STMT
637 as their defining statement. */
638
639 void
move_ssa_defining_stmt_for_defs(gimple * new_stmt,gimple * old_stmt)640 move_ssa_defining_stmt_for_defs (gimple *new_stmt, gimple *old_stmt)
641 {
642 tree var;
643 ssa_op_iter iter;
644
645 if (gimple_in_ssa_p (cfun))
646 {
647 /* Make defined SSA_NAMEs point to the new
648 statement as their definition. */
649 FOR_EACH_SSA_TREE_OPERAND (var, old_stmt, iter, SSA_OP_ALL_DEFS)
650 {
651 if (TREE_CODE (var) == SSA_NAME)
652 SSA_NAME_DEF_STMT (var) = new_stmt;
653 }
654 }
655 }
656
657 /* Helper function for update_gimple_call and update_call_from_tree.
658 A GIMPLE_CALL STMT is being replaced with GIMPLE_CALL NEW_STMT. */
659
660 static void
finish_update_gimple_call(gimple_stmt_iterator * si_p,gimple * new_stmt,gimple * stmt)661 finish_update_gimple_call (gimple_stmt_iterator *si_p, gimple *new_stmt,
662 gimple *stmt)
663 {
664 gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
665 move_ssa_defining_stmt_for_defs (new_stmt, stmt);
666 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
667 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
668 gimple_set_location (new_stmt, gimple_location (stmt));
669 if (gimple_block (new_stmt) == NULL_TREE)
670 gimple_set_block (new_stmt, gimple_block (stmt));
671 gsi_replace (si_p, new_stmt, false);
672 }
673
674 /* Update a GIMPLE_CALL statement at iterator *SI_P to call to FN
675 with number of arguments NARGS, where the arguments in GIMPLE form
676 follow NARGS argument. */
677
678 bool
update_gimple_call(gimple_stmt_iterator * si_p,tree fn,int nargs,...)679 update_gimple_call (gimple_stmt_iterator *si_p, tree fn, int nargs, ...)
680 {
681 va_list ap;
682 gcall *new_stmt, *stmt = as_a <gcall *> (gsi_stmt (*si_p));
683
684 gcc_assert (is_gimple_call (stmt));
685 va_start (ap, nargs);
686 new_stmt = gimple_build_call_valist (fn, nargs, ap);
687 finish_update_gimple_call (si_p, new_stmt, stmt);
688 va_end (ap);
689 return true;
690 }
691
692 /* Update a GIMPLE_CALL statement at iterator *SI_P to reflect the
693 value of EXPR, which is expected to be the result of folding the
694 call. This can only be done if EXPR is a CALL_EXPR with valid
695 GIMPLE operands as arguments, or if it is a suitable RHS expression
696 for a GIMPLE_ASSIGN. More complex expressions will require
697 gimplification, which will introduce additional statements. In this
698 event, no update is performed, and the function returns false.
699 Note that we cannot mutate a GIMPLE_CALL in-place, so we always
700 replace the statement at *SI_P with an entirely new statement.
701 The new statement need not be a call, e.g., if the original call
702 folded to a constant. */
703
704 bool
update_call_from_tree(gimple_stmt_iterator * si_p,tree expr)705 update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
706 {
707 gimple *stmt = gsi_stmt (*si_p);
708
709 if (valid_gimple_call_p (expr))
710 {
711 /* The call has simplified to another call. */
712 tree fn = CALL_EXPR_FN (expr);
713 unsigned i;
714 unsigned nargs = call_expr_nargs (expr);
715 vec<tree> args = vNULL;
716 gcall *new_stmt;
717
718 if (nargs > 0)
719 {
720 args.create (nargs);
721 args.safe_grow_cleared (nargs);
722
723 for (i = 0; i < nargs; i++)
724 args[i] = CALL_EXPR_ARG (expr, i);
725 }
726
727 new_stmt = gimple_build_call_vec (fn, args);
728 finish_update_gimple_call (si_p, new_stmt, stmt);
729 args.release ();
730
731 return true;
732 }
733 else if (valid_gimple_rhs_p (expr))
734 {
735 tree lhs = gimple_call_lhs (stmt);
736 gimple *new_stmt;
737
738 /* The call has simplified to an expression
739 that cannot be represented as a GIMPLE_CALL. */
740 if (lhs)
741 {
742 /* A value is expected.
743 Introduce a new GIMPLE_ASSIGN statement. */
744 STRIP_USELESS_TYPE_CONVERSION (expr);
745 new_stmt = gimple_build_assign (lhs, expr);
746 move_ssa_defining_stmt_for_defs (new_stmt, stmt);
747 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
748 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
749 }
750 else if (!TREE_SIDE_EFFECTS (expr))
751 {
752 /* No value is expected, and EXPR has no effect.
753 Replace it with an empty statement. */
754 new_stmt = gimple_build_nop ();
755 if (gimple_in_ssa_p (cfun))
756 {
757 unlink_stmt_vdef (stmt);
758 release_defs (stmt);
759 }
760 }
761 else
762 {
763 /* No value is expected, but EXPR has an effect,
764 e.g., it could be a reference to a volatile
765 variable. Create an assignment statement
766 with a dummy (unused) lhs variable. */
767 STRIP_USELESS_TYPE_CONVERSION (expr);
768 if (gimple_in_ssa_p (cfun))
769 lhs = make_ssa_name (TREE_TYPE (expr));
770 else
771 lhs = create_tmp_var (TREE_TYPE (expr));
772 new_stmt = gimple_build_assign (lhs, expr);
773 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
774 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
775 move_ssa_defining_stmt_for_defs (new_stmt, stmt);
776 }
777 gimple_set_location (new_stmt, gimple_location (stmt));
778 gsi_replace (si_p, new_stmt, false);
779 return true;
780 }
781 else
782 /* The call simplified to an expression that is
783 not a valid GIMPLE RHS. */
784 return false;
785 }
786
787
788 /* Entry point to the propagation engine.
789
790 VISIT_STMT is called for every statement visited.
791 VISIT_PHI is called for every PHI node visited. */
792
793 void
ssa_propagate(ssa_prop_visit_stmt_fn visit_stmt,ssa_prop_visit_phi_fn visit_phi)794 ssa_propagate (ssa_prop_visit_stmt_fn visit_stmt,
795 ssa_prop_visit_phi_fn visit_phi)
796 {
797 ssa_prop_visit_stmt = visit_stmt;
798 ssa_prop_visit_phi = visit_phi;
799
800 ssa_prop_init ();
801
802 /* Iterate until the worklists are empty. */
803 while (! cfg_blocks_empty_p ()
804 || ! bitmap_empty_p (ssa_edge_worklist))
805 {
806 /* First simulate whole blocks. */
807 if (! cfg_blocks_empty_p ())
808 {
809 /* Pull the next block to simulate off the worklist. */
810 basic_block dest_block = cfg_blocks_get ();
811 simulate_block (dest_block);
812 continue;
813 }
814
815 /* Then simulate from the SSA edge worklist. */
816 process_ssa_edge_worklist ();
817 }
818
819 ssa_prop_fini ();
820 }
821
822
823 /* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref'
824 is a non-volatile pointer dereference, a structure reference or a
825 reference to a single _DECL. Ignore volatile memory references
826 because they are not interesting for the optimizers. */
827
828 bool
stmt_makes_single_store(gimple * stmt)829 stmt_makes_single_store (gimple *stmt)
830 {
831 tree lhs;
832
833 if (gimple_code (stmt) != GIMPLE_ASSIGN
834 && gimple_code (stmt) != GIMPLE_CALL)
835 return false;
836
837 if (!gimple_vdef (stmt))
838 return false;
839
840 lhs = gimple_get_lhs (stmt);
841
842 /* A call statement may have a null LHS. */
843 if (!lhs)
844 return false;
845
846 return (!TREE_THIS_VOLATILE (lhs)
847 && (DECL_P (lhs)
848 || REFERENCE_CLASS_P (lhs)));
849 }
850
851
852 /* Propagation statistics. */
853 struct prop_stats_d
854 {
855 long num_const_prop;
856 long num_copy_prop;
857 long num_stmts_folded;
858 long num_dce;
859 };
860
861 static struct prop_stats_d prop_stats;
862
863 /* Replace USE references in statement STMT with the values stored in
864 PROP_VALUE. Return true if at least one reference was replaced. */
865
866 static bool
replace_uses_in(gimple * stmt,ssa_prop_get_value_fn get_value)867 replace_uses_in (gimple *stmt, ssa_prop_get_value_fn get_value)
868 {
869 bool replaced = false;
870 use_operand_p use;
871 ssa_op_iter iter;
872
873 FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE)
874 {
875 tree tuse = USE_FROM_PTR (use);
876 tree val = (*get_value) (tuse);
877
878 if (val == tuse || val == NULL_TREE)
879 continue;
880
881 if (gimple_code (stmt) == GIMPLE_ASM
882 && !may_propagate_copy_into_asm (tuse))
883 continue;
884
885 if (!may_propagate_copy (tuse, val))
886 continue;
887
888 if (TREE_CODE (val) != SSA_NAME)
889 prop_stats.num_const_prop++;
890 else
891 prop_stats.num_copy_prop++;
892
893 propagate_value (use, val);
894
895 replaced = true;
896 }
897
898 return replaced;
899 }
900
901
902 /* Replace propagated values into all the arguments for PHI using the
903 values from PROP_VALUE. */
904
905 static bool
replace_phi_args_in(gphi * phi,ssa_prop_get_value_fn get_value)906 replace_phi_args_in (gphi *phi, ssa_prop_get_value_fn get_value)
907 {
908 size_t i;
909 bool replaced = false;
910
911 if (dump_file && (dump_flags & TDF_DETAILS))
912 {
913 fprintf (dump_file, "Folding PHI node: ");
914 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
915 }
916
917 basic_block bb = gimple_bb (phi);
918 for (i = 0; i < gimple_phi_num_args (phi); i++)
919 {
920 tree arg = gimple_phi_arg_def (phi, i);
921
922 if (TREE_CODE (arg) == SSA_NAME)
923 {
924 tree val = (*get_value) (arg);
925
926 if (val && val != arg && may_propagate_copy (arg, val))
927 {
928 edge e = gimple_phi_arg_edge (phi, i);
929
930 /* Avoid propagating constants into loop latch edge
931 PHI arguments as this makes coalescing the copy
932 across this edge impossible. If the argument is
933 defined by an assert - otherwise the stmt will
934 get removed without replacing its uses. */
935 if (TREE_CODE (val) != SSA_NAME
936 && bb->loop_father->header == bb
937 && dominated_by_p (CDI_DOMINATORS, e->src, bb)
938 && is_gimple_assign (SSA_NAME_DEF_STMT (arg))
939 && (gimple_assign_rhs_code (SSA_NAME_DEF_STMT (arg))
940 == ASSERT_EXPR))
941 continue;
942
943 if (TREE_CODE (val) != SSA_NAME)
944 prop_stats.num_const_prop++;
945 else
946 prop_stats.num_copy_prop++;
947
948 propagate_value (PHI_ARG_DEF_PTR (phi, i), val);
949 replaced = true;
950
951 /* If we propagated a copy and this argument flows
952 through an abnormal edge, update the replacement
953 accordingly. */
954 if (TREE_CODE (val) == SSA_NAME
955 && e->flags & EDGE_ABNORMAL
956 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
957 {
958 /* This can only occur for virtual operands, since
959 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
960 would prevent replacement. */
961 gcc_checking_assert (virtual_operand_p (val));
962 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
963 }
964 }
965 }
966 }
967
968 if (dump_file && (dump_flags & TDF_DETAILS))
969 {
970 if (!replaced)
971 fprintf (dump_file, "No folding possible\n");
972 else
973 {
974 fprintf (dump_file, "Folded into: ");
975 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
976 fprintf (dump_file, "\n");
977 }
978 }
979
980 return replaced;
981 }
982
983
984 class substitute_and_fold_dom_walker : public dom_walker
985 {
986 public:
substitute_and_fold_dom_walker(cdi_direction direction,ssa_prop_get_value_fn get_value_fn_,ssa_prop_fold_stmt_fn fold_fn_,bool do_dce_)987 substitute_and_fold_dom_walker (cdi_direction direction,
988 ssa_prop_get_value_fn get_value_fn_,
989 ssa_prop_fold_stmt_fn fold_fn_,
990 bool do_dce_)
991 : dom_walker (direction), get_value_fn (get_value_fn_),
992 fold_fn (fold_fn_), do_dce (do_dce_), something_changed (false)
993 {
994 stmts_to_remove.create (0);
995 stmts_to_fixup.create (0);
996 need_eh_cleanup = BITMAP_ALLOC (NULL);
997 }
~substitute_and_fold_dom_walker()998 ~substitute_and_fold_dom_walker ()
999 {
1000 stmts_to_remove.release ();
1001 stmts_to_fixup.release ();
1002 BITMAP_FREE (need_eh_cleanup);
1003 }
1004
1005 virtual edge before_dom_children (basic_block);
after_dom_children(basic_block)1006 virtual void after_dom_children (basic_block) {}
1007
1008 ssa_prop_get_value_fn get_value_fn;
1009 ssa_prop_fold_stmt_fn fold_fn;
1010 bool do_dce;
1011 bool something_changed;
1012 vec<gimple *> stmts_to_remove;
1013 vec<gimple *> stmts_to_fixup;
1014 bitmap need_eh_cleanup;
1015 };
1016
1017 edge
before_dom_children(basic_block bb)1018 substitute_and_fold_dom_walker::before_dom_children (basic_block bb)
1019 {
1020 /* Propagate known values into PHI nodes. */
1021 for (gphi_iterator i = gsi_start_phis (bb);
1022 !gsi_end_p (i);
1023 gsi_next (&i))
1024 {
1025 gphi *phi = i.phi ();
1026 tree res = gimple_phi_result (phi);
1027 if (virtual_operand_p (res))
1028 continue;
1029 if (do_dce
1030 && res && TREE_CODE (res) == SSA_NAME)
1031 {
1032 tree sprime = get_value_fn (res);
1033 if (sprime
1034 && sprime != res
1035 && may_propagate_copy (res, sprime))
1036 {
1037 stmts_to_remove.safe_push (phi);
1038 continue;
1039 }
1040 }
1041 something_changed |= replace_phi_args_in (phi, get_value_fn);
1042 }
1043
1044 /* Propagate known values into stmts. In some case it exposes
1045 more trivially deletable stmts to walk backward. */
1046 for (gimple_stmt_iterator i = gsi_start_bb (bb);
1047 !gsi_end_p (i);
1048 gsi_next (&i))
1049 {
1050 bool did_replace;
1051 gimple *stmt = gsi_stmt (i);
1052 enum gimple_code code = gimple_code (stmt);
1053
1054 /* Ignore ASSERT_EXPRs. They are used by VRP to generate
1055 range information for names and they are discarded
1056 afterwards. */
1057
1058 if (code == GIMPLE_ASSIGN
1059 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
1060 continue;
1061
1062 /* No point propagating into a stmt we have a value for we
1063 can propagate into all uses. Mark it for removal instead. */
1064 tree lhs = gimple_get_lhs (stmt);
1065 if (do_dce
1066 && lhs && TREE_CODE (lhs) == SSA_NAME)
1067 {
1068 tree sprime = get_value_fn (lhs);
1069 if (sprime
1070 && sprime != lhs
1071 && may_propagate_copy (lhs, sprime)
1072 && !stmt_could_throw_p (stmt)
1073 && !gimple_has_side_effects (stmt))
1074 {
1075 stmts_to_remove.safe_push (stmt);
1076 continue;
1077 }
1078 }
1079
1080 /* Replace the statement with its folded version and mark it
1081 folded. */
1082 did_replace = false;
1083 if (dump_file && (dump_flags & TDF_DETAILS))
1084 {
1085 fprintf (dump_file, "Folding statement: ");
1086 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1087 }
1088
1089 gimple *old_stmt = stmt;
1090 bool was_noreturn = (is_gimple_call (stmt)
1091 && gimple_call_noreturn_p (stmt));
1092
1093 /* Some statements may be simplified using propagator
1094 specific information. Do this before propagating
1095 into the stmt to not disturb pass specific information. */
1096 if (fold_fn
1097 && (*fold_fn)(&i))
1098 {
1099 did_replace = true;
1100 prop_stats.num_stmts_folded++;
1101 stmt = gsi_stmt (i);
1102 update_stmt (stmt);
1103 }
1104
1105 /* Replace real uses in the statement. */
1106 did_replace |= replace_uses_in (stmt, get_value_fn);
1107
1108 /* If we made a replacement, fold the statement. */
1109 if (did_replace)
1110 {
1111 fold_stmt (&i, follow_single_use_edges);
1112 stmt = gsi_stmt (i);
1113 }
1114
1115 /* If this is a control statement the propagator left edges
1116 unexecuted on force the condition in a way consistent with
1117 that. See PR66945 for cases where the propagator can end
1118 up with a different idea of a taken edge than folding
1119 (once undefined behavior is involved). */
1120 if (gimple_code (stmt) == GIMPLE_COND)
1121 {
1122 if ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE)
1123 ^ (EDGE_SUCC (bb, 1)->flags & EDGE_EXECUTABLE))
1124 {
1125 if (((EDGE_SUCC (bb, 0)->flags & EDGE_TRUE_VALUE) != 0)
1126 == ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE) != 0))
1127 gimple_cond_make_true (as_a <gcond *> (stmt));
1128 else
1129 gimple_cond_make_false (as_a <gcond *> (stmt));
1130 did_replace = true;
1131 }
1132 }
1133
1134 /* Now cleanup. */
1135 if (did_replace)
1136 {
1137 /* If we cleaned up EH information from the statement,
1138 remove EH edges. */
1139 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1140 bitmap_set_bit (need_eh_cleanup, bb->index);
1141
1142 /* If we turned a not noreturn call into a noreturn one
1143 schedule it for fixup. */
1144 if (!was_noreturn
1145 && is_gimple_call (stmt)
1146 && gimple_call_noreturn_p (stmt))
1147 stmts_to_fixup.safe_push (stmt);
1148
1149 if (gimple_assign_single_p (stmt))
1150 {
1151 tree rhs = gimple_assign_rhs1 (stmt);
1152
1153 if (TREE_CODE (rhs) == ADDR_EXPR)
1154 recompute_tree_invariant_for_addr_expr (rhs);
1155 }
1156
1157 /* Determine what needs to be done to update the SSA form. */
1158 update_stmt (stmt);
1159 if (!is_gimple_debug (stmt))
1160 something_changed = true;
1161 }
1162
1163 if (dump_file && (dump_flags & TDF_DETAILS))
1164 {
1165 if (did_replace)
1166 {
1167 fprintf (dump_file, "Folded into: ");
1168 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1169 fprintf (dump_file, "\n");
1170 }
1171 else
1172 fprintf (dump_file, "Not folded\n");
1173 }
1174 }
1175 return NULL;
1176 }
1177
1178
1179
1180 /* Perform final substitution and folding of propagated values.
1181
1182 PROP_VALUE[I] contains the single value that should be substituted
1183 at every use of SSA name N_I. If PROP_VALUE is NULL, no values are
1184 substituted.
1185
1186 If FOLD_FN is non-NULL the function will be invoked on all statements
1187 before propagating values for pass specific simplification.
1188
1189 DO_DCE is true if trivially dead stmts can be removed.
1190
1191 If DO_DCE is true, the statements within a BB are walked from
1192 last to first element. Otherwise we scan from first to last element.
1193
1194 Return TRUE when something changed. */
1195
1196 bool
substitute_and_fold(ssa_prop_get_value_fn get_value_fn,ssa_prop_fold_stmt_fn fold_fn,bool do_dce)1197 substitute_and_fold (ssa_prop_get_value_fn get_value_fn,
1198 ssa_prop_fold_stmt_fn fold_fn,
1199 bool do_dce)
1200 {
1201 gcc_assert (get_value_fn);
1202
1203 if (dump_file && (dump_flags & TDF_DETAILS))
1204 fprintf (dump_file, "\nSubstituting values and folding statements\n\n");
1205
1206 memset (&prop_stats, 0, sizeof (prop_stats));
1207
1208 calculate_dominance_info (CDI_DOMINATORS);
1209 substitute_and_fold_dom_walker walker(CDI_DOMINATORS,
1210 get_value_fn, fold_fn, do_dce);
1211 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1212
1213 /* We cannot remove stmts during the BB walk, especially not release
1214 SSA names there as that destroys the lattice of our callers.
1215 Remove stmts in reverse order to make debug stmt creation possible. */
1216 while (!walker.stmts_to_remove.is_empty ())
1217 {
1218 gimple *stmt = walker.stmts_to_remove.pop ();
1219 if (dump_file && dump_flags & TDF_DETAILS)
1220 {
1221 fprintf (dump_file, "Removing dead stmt ");
1222 print_gimple_stmt (dump_file, stmt, 0, 0);
1223 fprintf (dump_file, "\n");
1224 }
1225 prop_stats.num_dce++;
1226 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1227 if (gimple_code (stmt) == GIMPLE_PHI)
1228 remove_phi_node (&gsi, true);
1229 else
1230 {
1231 unlink_stmt_vdef (stmt);
1232 gsi_remove (&gsi, true);
1233 release_defs (stmt);
1234 }
1235 }
1236
1237 if (!bitmap_empty_p (walker.need_eh_cleanup))
1238 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
1239
1240 /* Fixup stmts that became noreturn calls. This may require splitting
1241 blocks and thus isn't possible during the dominator walk. Do this
1242 in reverse order so we don't inadvertedly remove a stmt we want to
1243 fixup by visiting a dominating now noreturn call first. */
1244 while (!walker.stmts_to_fixup.is_empty ())
1245 {
1246 gimple *stmt = walker.stmts_to_fixup.pop ();
1247 if (dump_file && dump_flags & TDF_DETAILS)
1248 {
1249 fprintf (dump_file, "Fixing up noreturn call ");
1250 print_gimple_stmt (dump_file, stmt, 0, 0);
1251 fprintf (dump_file, "\n");
1252 }
1253 fixup_noreturn_call (stmt);
1254 }
1255
1256 statistics_counter_event (cfun, "Constants propagated",
1257 prop_stats.num_const_prop);
1258 statistics_counter_event (cfun, "Copies propagated",
1259 prop_stats.num_copy_prop);
1260 statistics_counter_event (cfun, "Statements folded",
1261 prop_stats.num_stmts_folded);
1262 statistics_counter_event (cfun, "Statements deleted",
1263 prop_stats.num_dce);
1264
1265 return walker.something_changed;
1266 }
1267
1268
1269 /* Return true if we may propagate ORIG into DEST, false otherwise. */
1270
1271 bool
may_propagate_copy(tree dest,tree orig)1272 may_propagate_copy (tree dest, tree orig)
1273 {
1274 tree type_d = TREE_TYPE (dest);
1275 tree type_o = TREE_TYPE (orig);
1276
1277 /* If ORIG is a default definition which flows in from an abnormal edge
1278 then the copy can be propagated. It is important that we do so to avoid
1279 uninitialized copies. */
1280 if (TREE_CODE (orig) == SSA_NAME
1281 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)
1282 && SSA_NAME_IS_DEFAULT_DEF (orig)
1283 && (SSA_NAME_VAR (orig) == NULL_TREE
1284 || TREE_CODE (SSA_NAME_VAR (orig)) == VAR_DECL))
1285 ;
1286 /* Otherwise if ORIG just flows in from an abnormal edge then the copy cannot
1287 be propagated. */
1288 else if (TREE_CODE (orig) == SSA_NAME
1289 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1290 return false;
1291 /* Similarly if DEST flows in from an abnormal edge then the copy cannot be
1292 propagated. */
1293 else if (TREE_CODE (dest) == SSA_NAME
1294 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
1295 return false;
1296
1297 /* Do not copy between types for which we *do* need a conversion. */
1298 if (!useless_type_conversion_p (type_d, type_o))
1299 return false;
1300
1301 /* Generally propagating virtual operands is not ok as that may
1302 create overlapping life-ranges. */
1303 if (TREE_CODE (dest) == SSA_NAME && virtual_operand_p (dest))
1304 return false;
1305
1306 /* Anything else is OK. */
1307 return true;
1308 }
1309
1310 /* Like may_propagate_copy, but use as the destination expression
1311 the principal expression (typically, the RHS) contained in
1312 statement DEST. This is more efficient when working with the
1313 gimple tuples representation. */
1314
1315 bool
may_propagate_copy_into_stmt(gimple * dest,tree orig)1316 may_propagate_copy_into_stmt (gimple *dest, tree orig)
1317 {
1318 tree type_d;
1319 tree type_o;
1320
1321 /* If the statement is a switch or a single-rhs assignment,
1322 then the expression to be replaced by the propagation may
1323 be an SSA_NAME. Fortunately, there is an explicit tree
1324 for the expression, so we delegate to may_propagate_copy. */
1325
1326 if (gimple_assign_single_p (dest))
1327 return may_propagate_copy (gimple_assign_rhs1 (dest), orig);
1328 else if (gswitch *dest_swtch = dyn_cast <gswitch *> (dest))
1329 return may_propagate_copy (gimple_switch_index (dest_swtch), orig);
1330
1331 /* In other cases, the expression is not materialized, so there
1332 is no destination to pass to may_propagate_copy. On the other
1333 hand, the expression cannot be an SSA_NAME, so the analysis
1334 is much simpler. */
1335
1336 if (TREE_CODE (orig) == SSA_NAME
1337 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1338 return false;
1339
1340 if (is_gimple_assign (dest))
1341 type_d = TREE_TYPE (gimple_assign_lhs (dest));
1342 else if (gimple_code (dest) == GIMPLE_COND)
1343 type_d = boolean_type_node;
1344 else if (is_gimple_call (dest)
1345 && gimple_call_lhs (dest) != NULL_TREE)
1346 type_d = TREE_TYPE (gimple_call_lhs (dest));
1347 else
1348 gcc_unreachable ();
1349
1350 type_o = TREE_TYPE (orig);
1351
1352 if (!useless_type_conversion_p (type_d, type_o))
1353 return false;
1354
1355 return true;
1356 }
1357
1358 /* Similarly, but we know that we're propagating into an ASM_EXPR. */
1359
1360 bool
may_propagate_copy_into_asm(tree dest ATTRIBUTE_UNUSED)1361 may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED)
1362 {
1363 return true;
1364 }
1365
1366
1367 /* Common code for propagate_value and replace_exp.
1368
1369 Replace use operand OP_P with VAL. FOR_PROPAGATION indicates if the
1370 replacement is done to propagate a value or not. */
1371
1372 static void
replace_exp_1(use_operand_p op_p,tree val,bool for_propagation ATTRIBUTE_UNUSED)1373 replace_exp_1 (use_operand_p op_p, tree val,
1374 bool for_propagation ATTRIBUTE_UNUSED)
1375 {
1376 if (flag_checking)
1377 {
1378 tree op = USE_FROM_PTR (op_p);
1379 gcc_assert (!(for_propagation
1380 && TREE_CODE (op) == SSA_NAME
1381 && TREE_CODE (val) == SSA_NAME
1382 && !may_propagate_copy (op, val)));
1383 }
1384
1385 if (TREE_CODE (val) == SSA_NAME)
1386 SET_USE (op_p, val);
1387 else
1388 SET_USE (op_p, unshare_expr (val));
1389 }
1390
1391
1392 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1393 into the operand pointed to by OP_P.
1394
1395 Use this version for const/copy propagation as it will perform additional
1396 checks to ensure validity of the const/copy propagation. */
1397
1398 void
propagate_value(use_operand_p op_p,tree val)1399 propagate_value (use_operand_p op_p, tree val)
1400 {
1401 replace_exp_1 (op_p, val, true);
1402 }
1403
1404 /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME).
1405
1406 Use this version when not const/copy propagating values. For example,
1407 PRE uses this version when building expressions as they would appear
1408 in specific blocks taking into account actions of PHI nodes.
1409
1410 The statement in which an expression has been replaced should be
1411 folded using fold_stmt_inplace. */
1412
1413 void
replace_exp(use_operand_p op_p,tree val)1414 replace_exp (use_operand_p op_p, tree val)
1415 {
1416 replace_exp_1 (op_p, val, false);
1417 }
1418
1419
1420 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1421 into the tree pointed to by OP_P.
1422
1423 Use this version for const/copy propagation when SSA operands are not
1424 available. It will perform the additional checks to ensure validity of
1425 the const/copy propagation, but will not update any operand information.
1426 Be sure to mark the stmt as modified. */
1427
1428 void
propagate_tree_value(tree * op_p,tree val)1429 propagate_tree_value (tree *op_p, tree val)
1430 {
1431 if (TREE_CODE (val) == SSA_NAME)
1432 *op_p = val;
1433 else
1434 *op_p = unshare_expr (val);
1435 }
1436
1437
1438 /* Like propagate_tree_value, but use as the operand to replace
1439 the principal expression (typically, the RHS) contained in the
1440 statement referenced by iterator GSI. Note that it is not
1441 always possible to update the statement in-place, so a new
1442 statement may be created to replace the original. */
1443
1444 void
propagate_tree_value_into_stmt(gimple_stmt_iterator * gsi,tree val)1445 propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val)
1446 {
1447 gimple *stmt = gsi_stmt (*gsi);
1448
1449 if (is_gimple_assign (stmt))
1450 {
1451 tree expr = NULL_TREE;
1452 if (gimple_assign_single_p (stmt))
1453 expr = gimple_assign_rhs1 (stmt);
1454 propagate_tree_value (&expr, val);
1455 gimple_assign_set_rhs_from_tree (gsi, expr);
1456 }
1457 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
1458 {
1459 tree lhs = NULL_TREE;
1460 tree rhs = build_zero_cst (TREE_TYPE (val));
1461 propagate_tree_value (&lhs, val);
1462 gimple_cond_set_code (cond_stmt, NE_EXPR);
1463 gimple_cond_set_lhs (cond_stmt, lhs);
1464 gimple_cond_set_rhs (cond_stmt, rhs);
1465 }
1466 else if (is_gimple_call (stmt)
1467 && gimple_call_lhs (stmt) != NULL_TREE)
1468 {
1469 tree expr = NULL_TREE;
1470 bool res;
1471 propagate_tree_value (&expr, val);
1472 res = update_call_from_tree (gsi, expr);
1473 gcc_assert (res);
1474 }
1475 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1476 propagate_tree_value (gimple_switch_index_ptr (swtch_stmt), val);
1477 else
1478 gcc_unreachable ();
1479 }
1480