1 /* Generic SSA value propagation engine.
2    Copyright (C) 2004-2020 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by the
9    Free Software Foundation; either version 3, or (at your option) any
10    later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15    for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "gimple-pretty-print.h"
29 #include "dumpfile.h"
30 #include "gimple-fold.h"
31 #include "tree-eh.h"
32 #include "gimplify.h"
33 #include "gimple-iterator.h"
34 #include "tree-cfg.h"
35 #include "tree-ssa.h"
36 #include "tree-ssa-propagate.h"
37 #include "domwalk.h"
38 #include "cfgloop.h"
39 #include "tree-cfgcleanup.h"
40 #include "cfganal.h"
41 
42 /* This file implements a generic value propagation engine based on
43    the same propagation used by the SSA-CCP algorithm [1].
44 
45    Propagation is performed by simulating the execution of every
46    statement that produces the value being propagated.  Simulation
47    proceeds as follows:
48 
49    1- Initially, all edges of the CFG are marked not executable and
50       the CFG worklist is seeded with all the statements in the entry
51       basic block (block 0).
52 
53    2- Every statement S is simulated with a call to the call-back
54       function SSA_PROP_VISIT_STMT.  This evaluation may produce 3
55       results:
56 
57       	SSA_PROP_NOT_INTERESTING: Statement S produces nothing of
58 	    interest and does not affect any of the work lists.
59 	    The statement may be simulated again if any of its input
60 	    operands change in future iterations of the simulator.
61 
62 	SSA_PROP_VARYING: The value produced by S cannot be determined
63 	    at compile time.  Further simulation of S is not required.
64 	    If S is a conditional jump, all the outgoing edges for the
65 	    block are considered executable and added to the work
66 	    list.
67 
68 	SSA_PROP_INTERESTING: S produces a value that can be computed
69 	    at compile time.  Its result can be propagated into the
70 	    statements that feed from S.  Furthermore, if S is a
71 	    conditional jump, only the edge known to be taken is added
72 	    to the work list.  Edges that are known not to execute are
73 	    never simulated.
74 
75    3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI.  The
76       return value from SSA_PROP_VISIT_PHI has the same semantics as
77       described in #2.
78 
79    4- Three work lists are kept.  Statements are only added to these
80       lists if they produce one of SSA_PROP_INTERESTING or
81       SSA_PROP_VARYING.
82 
83    	CFG_BLOCKS contains the list of blocks to be simulated.
84 	    Blocks are added to this list if their incoming edges are
85 	    found executable.
86 
87 	SSA_EDGE_WORKLIST contains the list of statements that we
88 	    need to revisit.
89 
90    5- Simulation terminates when all three work lists are drained.
91 
92    Before calling ssa_propagate, it is important to clear
93    prop_simulate_again_p for all the statements in the program that
94    should be simulated.  This initialization allows an implementation
95    to specify which statements should never be simulated.
96 
97    It is also important to compute def-use information before calling
98    ssa_propagate.
99 
100    References:
101 
102      [1] Constant propagation with conditional branches,
103          Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
104 
105      [2] Building an Optimizing Compiler,
106 	 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
107 
108      [3] Advanced Compiler Design and Implementation,
109 	 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
110 
111 /* Worklists of control flow edge destinations.  This contains
112    the CFG order number of the blocks so we can iterate in CFG
113    order by visiting in bit-order.  We use two worklists to
114    first make forward progress before iterating.  */
115 static bitmap cfg_blocks;
116 static bitmap cfg_blocks_back;
117 static int *bb_to_cfg_order;
118 static int *cfg_order_to_bb;
119 
120 /* Worklists of SSA edges which will need reexamination as their
121    definition has changed.  SSA edges are def-use edges in the SSA
122    web.  For each D-U edge, we store the target statement or PHI node
123    UID in a bitmap.  UIDs order stmts in execution order.  We use
124    two worklists to first make forward progress before iterating.  */
125 static bitmap ssa_edge_worklist;
126 static bitmap ssa_edge_worklist_back;
127 static vec<gimple *> uid_to_stmt;
128 
129 /* Current RPO index in the iteration.  */
130 static int curr_order;
131 
132 
133 /* We have just defined a new value for VAR.  If IS_VARYING is true,
134    add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add
135    them to INTERESTING_SSA_EDGES.  */
136 
137 static void
add_ssa_edge(tree var)138 add_ssa_edge (tree var)
139 {
140   imm_use_iterator iter;
141   use_operand_p use_p;
142 
143   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
144     {
145       gimple *use_stmt = USE_STMT (use_p);
146       if (!prop_simulate_again_p (use_stmt))
147 	continue;
148 
149       /* If we did not yet simulate the block wait for this to happen
150          and do not add the stmt to the SSA edge worklist.  */
151       basic_block use_bb = gimple_bb (use_stmt);
152       if (! (use_bb->flags & BB_VISITED))
153 	continue;
154 
155       /* If this is a use on a not yet executable edge do not bother to
156 	 queue it.  */
157       if (gimple_code (use_stmt) == GIMPLE_PHI
158 	  && !(EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->flags
159 	       & EDGE_EXECUTABLE))
160 	continue;
161 
162       bitmap worklist;
163       if (bb_to_cfg_order[gimple_bb (use_stmt)->index] < curr_order)
164 	worklist = ssa_edge_worklist_back;
165       else
166 	worklist = ssa_edge_worklist;
167       if (bitmap_set_bit (worklist, gimple_uid (use_stmt)))
168 	{
169 	  uid_to_stmt[gimple_uid (use_stmt)] = use_stmt;
170 	  if (dump_file && (dump_flags & TDF_DETAILS))
171 	    {
172 	      fprintf (dump_file, "ssa_edge_worklist: adding SSA use in ");
173 	      print_gimple_stmt (dump_file, use_stmt, 0, TDF_SLIM);
174 	    }
175 	}
176     }
177 }
178 
179 
180 /* Add edge E to the control flow worklist.  */
181 
182 static void
add_control_edge(edge e)183 add_control_edge (edge e)
184 {
185   basic_block bb = e->dest;
186   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
187     return;
188 
189   /* If the edge had already been executed, skip it.  */
190   if (e->flags & EDGE_EXECUTABLE)
191     return;
192 
193   e->flags |= EDGE_EXECUTABLE;
194 
195   int bb_order = bb_to_cfg_order[bb->index];
196   if (bb_order < curr_order)
197     bitmap_set_bit (cfg_blocks_back, bb_order);
198   else
199     bitmap_set_bit (cfg_blocks, bb_order);
200 
201   if (dump_file && (dump_flags & TDF_DETAILS))
202     fprintf (dump_file, "Adding destination of edge (%d -> %d) to worklist\n",
203 	e->src->index, e->dest->index);
204 }
205 
206 
207 /* Simulate the execution of STMT and update the work lists accordingly.  */
208 
209 void
simulate_stmt(gimple * stmt)210 ssa_propagation_engine::simulate_stmt (gimple *stmt)
211 {
212   enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING;
213   edge taken_edge = NULL;
214   tree output_name = NULL_TREE;
215 
216   /* Pull the stmt off the SSA edge worklist.  */
217   bitmap_clear_bit (ssa_edge_worklist, gimple_uid (stmt));
218 
219   /* Don't bother visiting statements that are already
220      considered varying by the propagator.  */
221   if (!prop_simulate_again_p (stmt))
222     return;
223 
224   if (gimple_code (stmt) == GIMPLE_PHI)
225     {
226       val = visit_phi (as_a <gphi *> (stmt));
227       output_name = gimple_phi_result (stmt);
228     }
229   else
230     val = visit_stmt (stmt, &taken_edge, &output_name);
231 
232   if (val == SSA_PROP_VARYING)
233     {
234       prop_set_simulate_again (stmt, false);
235 
236       /* If the statement produced a new varying value, add the SSA
237 	 edges coming out of OUTPUT_NAME.  */
238       if (output_name)
239 	add_ssa_edge (output_name);
240 
241       /* If STMT transfers control out of its basic block, add
242 	 all outgoing edges to the work list.  */
243       if (stmt_ends_bb_p (stmt))
244 	{
245 	  edge e;
246 	  edge_iterator ei;
247 	  basic_block bb = gimple_bb (stmt);
248 	  FOR_EACH_EDGE (e, ei, bb->succs)
249 	    add_control_edge (e);
250 	}
251       return;
252     }
253   else if (val == SSA_PROP_INTERESTING)
254     {
255       /* If the statement produced new value, add the SSA edges coming
256 	 out of OUTPUT_NAME.  */
257       if (output_name)
258 	add_ssa_edge (output_name);
259 
260       /* If we know which edge is going to be taken out of this block,
261 	 add it to the CFG work list.  */
262       if (taken_edge)
263 	add_control_edge (taken_edge);
264     }
265 
266   /* If there are no SSA uses on the stmt whose defs are simulated
267      again then this stmt will be never visited again.  */
268   bool has_simulate_again_uses = false;
269   use_operand_p use_p;
270   ssa_op_iter iter;
271   if (gimple_code  (stmt) == GIMPLE_PHI)
272     {
273       edge_iterator ei;
274       edge e;
275       tree arg;
276       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
277 	if (!(e->flags & EDGE_EXECUTABLE)
278 	    || ((arg = PHI_ARG_DEF_FROM_EDGE (stmt, e))
279 		&& TREE_CODE (arg) == SSA_NAME
280 		&& !SSA_NAME_IS_DEFAULT_DEF (arg)
281 		&& prop_simulate_again_p (SSA_NAME_DEF_STMT (arg))))
282 	  {
283 	    has_simulate_again_uses = true;
284 	    break;
285 	  }
286     }
287   else
288     FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
289       {
290 	gimple *def_stmt = SSA_NAME_DEF_STMT (USE_FROM_PTR (use_p));
291 	if (!gimple_nop_p (def_stmt)
292 	    && prop_simulate_again_p (def_stmt))
293 	  {
294 	    has_simulate_again_uses = true;
295 	    break;
296 	  }
297       }
298   if (!has_simulate_again_uses)
299     {
300       if (dump_file && (dump_flags & TDF_DETAILS))
301 	fprintf (dump_file, "marking stmt to be not simulated again\n");
302       prop_set_simulate_again (stmt, false);
303     }
304 }
305 
306 
307 /* Simulate the execution of BLOCK.  Evaluate the statement associated
308    with each variable reference inside the block.  */
309 
310 void
simulate_block(basic_block block)311 ssa_propagation_engine::simulate_block (basic_block block)
312 {
313   gimple_stmt_iterator gsi;
314 
315   /* There is nothing to do for the exit block.  */
316   if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
317     return;
318 
319   if (dump_file && (dump_flags & TDF_DETAILS))
320     fprintf (dump_file, "\nSimulating block %d\n", block->index);
321 
322   /* Always simulate PHI nodes, even if we have simulated this block
323      before.  */
324   for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi))
325     simulate_stmt (gsi_stmt (gsi));
326 
327   /* If this is the first time we've simulated this block, then we
328      must simulate each of its statements.  */
329   if (! (block->flags & BB_VISITED))
330     {
331       gimple_stmt_iterator j;
332       unsigned int normal_edge_count;
333       edge e, normal_edge;
334       edge_iterator ei;
335 
336       for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j))
337 	simulate_stmt (gsi_stmt (j));
338 
339       /* Note that we have simulated this block.  */
340       block->flags |= BB_VISITED;
341 
342       /* We cannot predict when abnormal and EH edges will be executed, so
343 	 once a block is considered executable, we consider any
344 	 outgoing abnormal edges as executable.
345 
346 	 TODO: This is not exactly true.  Simplifying statement might
347 	 prove it non-throwing and also computed goto can be handled
348 	 when destination is known.
349 
350 	 At the same time, if this block has only one successor that is
351 	 reached by non-abnormal edges, then add that successor to the
352 	 worklist.  */
353       normal_edge_count = 0;
354       normal_edge = NULL;
355       FOR_EACH_EDGE (e, ei, block->succs)
356 	{
357 	  if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
358 	    add_control_edge (e);
359 	  else
360 	    {
361 	      normal_edge_count++;
362 	      normal_edge = e;
363 	    }
364 	}
365 
366       if (normal_edge_count == 1)
367 	add_control_edge (normal_edge);
368     }
369 }
370 
371 
372 /* Initialize local data structures and work lists.  */
373 
374 static void
ssa_prop_init(void)375 ssa_prop_init (void)
376 {
377   edge e;
378   edge_iterator ei;
379   basic_block bb;
380 
381   /* Worklists of SSA edges.  */
382   ssa_edge_worklist = BITMAP_ALLOC (NULL);
383   ssa_edge_worklist_back = BITMAP_ALLOC (NULL);
384   bitmap_tree_view (ssa_edge_worklist);
385   bitmap_tree_view (ssa_edge_worklist_back);
386 
387   /* Worklist of basic-blocks.  */
388   bb_to_cfg_order = XNEWVEC (int, last_basic_block_for_fn (cfun) + 1);
389   cfg_order_to_bb = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
390   int n = pre_and_rev_post_order_compute_fn (cfun, NULL,
391 					     cfg_order_to_bb, false);
392   for (int i = 0; i < n; ++i)
393     bb_to_cfg_order[cfg_order_to_bb[i]] = i;
394   cfg_blocks = BITMAP_ALLOC (NULL);
395   cfg_blocks_back = BITMAP_ALLOC (NULL);
396 
397   /* Initially assume that every edge in the CFG is not executable.
398      (including the edges coming out of the entry block).  Mark blocks
399      as not visited, blocks not yet visited will have all their statements
400      simulated once an incoming edge gets executable.  */
401   set_gimple_stmt_max_uid (cfun, 0);
402   for (int i = 0; i < n; ++i)
403     {
404       gimple_stmt_iterator si;
405       bb = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb[i]);
406 
407       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
408 	{
409 	  gimple *stmt = gsi_stmt (si);
410 	  gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
411 	}
412 
413       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
414 	{
415 	  gimple *stmt = gsi_stmt (si);
416 	  gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
417 	}
418 
419       bb->flags &= ~BB_VISITED;
420       FOR_EACH_EDGE (e, ei, bb->succs)
421 	e->flags &= ~EDGE_EXECUTABLE;
422     }
423   uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun));
424 }
425 
426 
427 /* Free allocated storage.  */
428 
429 static void
ssa_prop_fini(void)430 ssa_prop_fini (void)
431 {
432   BITMAP_FREE (cfg_blocks);
433   BITMAP_FREE (cfg_blocks_back);
434   free (bb_to_cfg_order);
435   free (cfg_order_to_bb);
436   BITMAP_FREE (ssa_edge_worklist);
437   BITMAP_FREE (ssa_edge_worklist_back);
438   uid_to_stmt.release ();
439 }
440 
441 
442 /* Return true if EXPR is an acceptable right-hand-side for a
443    GIMPLE assignment.  We validate the entire tree, not just
444    the root node, thus catching expressions that embed complex
445    operands that are not permitted in GIMPLE.  This function
446    is needed because the folding routines in fold-const.c
447    may return such expressions in some cases, e.g., an array
448    access with an embedded index addition.  It may make more
449    sense to have folding routines that are sensitive to the
450    constraints on GIMPLE operands, rather than abandoning any
451    any attempt to fold if the usual folding turns out to be too
452    aggressive.  */
453 
454 bool
valid_gimple_rhs_p(tree expr)455 valid_gimple_rhs_p (tree expr)
456 {
457   enum tree_code code = TREE_CODE (expr);
458 
459   switch (TREE_CODE_CLASS (code))
460     {
461     case tcc_declaration:
462       if (!is_gimple_variable (expr))
463 	return false;
464       break;
465 
466     case tcc_constant:
467       /* All constants are ok.  */
468       break;
469 
470     case tcc_comparison:
471       /* GENERIC allows comparisons with non-boolean types, reject
472          those for GIMPLE.  Let vector-typed comparisons pass - rules
473 	 for GENERIC and GIMPLE are the same here.  */
474       if (!(INTEGRAL_TYPE_P (TREE_TYPE (expr))
475 	    && (TREE_CODE (TREE_TYPE (expr)) == BOOLEAN_TYPE
476 		|| TYPE_PRECISION (TREE_TYPE (expr)) == 1))
477 	  && ! VECTOR_TYPE_P (TREE_TYPE (expr)))
478 	return false;
479 
480       /* Fallthru.  */
481     case tcc_binary:
482       if (!is_gimple_val (TREE_OPERAND (expr, 0))
483 	  || !is_gimple_val (TREE_OPERAND (expr, 1)))
484 	return false;
485       break;
486 
487     case tcc_unary:
488       if (!is_gimple_val (TREE_OPERAND (expr, 0)))
489 	return false;
490       break;
491 
492     case tcc_expression:
493       switch (code)
494         {
495         case ADDR_EXPR:
496           {
497 	    tree t;
498 	    if (is_gimple_min_invariant (expr))
499 	      return true;
500             t = TREE_OPERAND (expr, 0);
501             while (handled_component_p (t))
502               {
503                 /* ??? More checks needed, see the GIMPLE verifier.  */
504                 if ((TREE_CODE (t) == ARRAY_REF
505                      || TREE_CODE (t) == ARRAY_RANGE_REF)
506                     && !is_gimple_val (TREE_OPERAND (t, 1)))
507                   return false;
508                 t = TREE_OPERAND (t, 0);
509               }
510             if (!is_gimple_id (t))
511               return false;
512           }
513           break;
514 
515 	default:
516 	  if (get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS)
517 	    {
518 	      if (((code == VEC_COND_EXPR || code == COND_EXPR)
519 		   ? !is_gimple_condexpr (TREE_OPERAND (expr, 0))
520 		   : !is_gimple_val (TREE_OPERAND (expr, 0)))
521 		  || !is_gimple_val (TREE_OPERAND (expr, 1))
522 		  || !is_gimple_val (TREE_OPERAND (expr, 2)))
523 		return false;
524 	      break;
525 	    }
526 	  return false;
527 	}
528       break;
529 
530     case tcc_vl_exp:
531       return false;
532 
533     case tcc_exceptional:
534       if (code == CONSTRUCTOR)
535 	{
536 	  unsigned i;
537 	  tree elt;
538 	  FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt)
539 	    if (!is_gimple_val (elt))
540 	      return false;
541 	  return true;
542 	}
543       if (code != SSA_NAME)
544         return false;
545       break;
546 
547     case tcc_reference:
548       if (code == BIT_FIELD_REF)
549 	return is_gimple_val (TREE_OPERAND (expr, 0));
550       return false;
551 
552     default:
553       return false;
554     }
555 
556   return true;
557 }
558 
559 
560 /* Return true if EXPR is a CALL_EXPR suitable for representation
561    as a single GIMPLE_CALL statement.  If the arguments require
562    further gimplification, return false.  */
563 
564 static bool
valid_gimple_call_p(tree expr)565 valid_gimple_call_p (tree expr)
566 {
567   unsigned i, nargs;
568 
569   if (TREE_CODE (expr) != CALL_EXPR)
570     return false;
571 
572   nargs = call_expr_nargs (expr);
573   for (i = 0; i < nargs; i++)
574     {
575       tree arg = CALL_EXPR_ARG (expr, i);
576       if (is_gimple_reg_type (TREE_TYPE (arg)))
577 	{
578 	  if (!is_gimple_val (arg))
579 	    return false;
580 	}
581       else
582 	if (!is_gimple_lvalue (arg))
583 	  return false;
584     }
585 
586   return true;
587 }
588 
589 
590 /* Make SSA names defined by OLD_STMT point to NEW_STMT
591    as their defining statement.  */
592 
593 void
move_ssa_defining_stmt_for_defs(gimple * new_stmt,gimple * old_stmt)594 move_ssa_defining_stmt_for_defs (gimple *new_stmt, gimple *old_stmt)
595 {
596   tree var;
597   ssa_op_iter iter;
598 
599   if (gimple_in_ssa_p (cfun))
600     {
601       /* Make defined SSA_NAMEs point to the new
602          statement as their definition.  */
603       FOR_EACH_SSA_TREE_OPERAND (var, old_stmt, iter, SSA_OP_ALL_DEFS)
604         {
605           if (TREE_CODE (var) == SSA_NAME)
606             SSA_NAME_DEF_STMT (var) = new_stmt;
607         }
608     }
609 }
610 
611 /* Helper function for update_gimple_call and update_call_from_tree.
612    A GIMPLE_CALL STMT is being replaced with GIMPLE_CALL NEW_STMT.  */
613 
614 static void
finish_update_gimple_call(gimple_stmt_iterator * si_p,gimple * new_stmt,gimple * stmt)615 finish_update_gimple_call (gimple_stmt_iterator *si_p, gimple *new_stmt,
616 			   gimple *stmt)
617 {
618   gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
619   move_ssa_defining_stmt_for_defs (new_stmt, stmt);
620   gimple_move_vops (new_stmt, stmt);
621   gimple_set_location (new_stmt, gimple_location (stmt));
622   if (gimple_block (new_stmt) == NULL_TREE)
623     gimple_set_block (new_stmt, gimple_block (stmt));
624   gsi_replace (si_p, new_stmt, false);
625 }
626 
627 /* Update a GIMPLE_CALL statement at iterator *SI_P to call to FN
628    with number of arguments NARGS, where the arguments in GIMPLE form
629    follow NARGS argument.  */
630 
631 bool
update_gimple_call(gimple_stmt_iterator * si_p,tree fn,int nargs,...)632 update_gimple_call (gimple_stmt_iterator *si_p, tree fn, int nargs, ...)
633 {
634   va_list ap;
635   gcall *new_stmt, *stmt = as_a <gcall *> (gsi_stmt (*si_p));
636 
637   gcc_assert (is_gimple_call (stmt));
638   va_start (ap, nargs);
639   new_stmt = gimple_build_call_valist (fn, nargs, ap);
640   finish_update_gimple_call (si_p, new_stmt, stmt);
641   va_end (ap);
642   return true;
643 }
644 
645 /* Update a GIMPLE_CALL statement at iterator *SI_P to reflect the
646    value of EXPR, which is expected to be the result of folding the
647    call.  This can only be done if EXPR is a CALL_EXPR with valid
648    GIMPLE operands as arguments, or if it is a suitable RHS expression
649    for a GIMPLE_ASSIGN.  More complex expressions will require
650    gimplification, which will introduce additional statements.  In this
651    event, no update is performed, and the function returns false.
652    Note that we cannot mutate a GIMPLE_CALL in-place, so we always
653    replace the statement at *SI_P with an entirely new statement.
654    The new statement need not be a call, e.g., if the original call
655    folded to a constant.  */
656 
657 bool
update_call_from_tree(gimple_stmt_iterator * si_p,tree expr)658 update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
659 {
660   gimple *stmt = gsi_stmt (*si_p);
661 
662   if (valid_gimple_call_p (expr))
663     {
664       /* The call has simplified to another call.  */
665       tree fn = CALL_EXPR_FN (expr);
666       unsigned i;
667       unsigned nargs = call_expr_nargs (expr);
668       vec<tree> args = vNULL;
669       gcall *new_stmt;
670 
671       if (nargs > 0)
672         {
673           args.create (nargs);
674           args.safe_grow_cleared (nargs);
675 
676           for (i = 0; i < nargs; i++)
677             args[i] = CALL_EXPR_ARG (expr, i);
678         }
679 
680       new_stmt = gimple_build_call_vec (fn, args);
681       finish_update_gimple_call (si_p, new_stmt, stmt);
682       args.release ();
683 
684       return true;
685     }
686   else if (valid_gimple_rhs_p (expr))
687     {
688       tree lhs = gimple_call_lhs (stmt);
689       gimple *new_stmt;
690 
691       /* The call has simplified to an expression
692          that cannot be represented as a GIMPLE_CALL. */
693       if (lhs)
694         {
695           /* A value is expected.
696              Introduce a new GIMPLE_ASSIGN statement.  */
697           STRIP_USELESS_TYPE_CONVERSION (expr);
698           new_stmt = gimple_build_assign (lhs, expr);
699           move_ssa_defining_stmt_for_defs (new_stmt, stmt);
700 	  gimple_move_vops (new_stmt, stmt);
701         }
702       else if (!TREE_SIDE_EFFECTS (expr))
703         {
704           /* No value is expected, and EXPR has no effect.
705              Replace it with an empty statement.  */
706           new_stmt = gimple_build_nop ();
707 	  if (gimple_in_ssa_p (cfun))
708 	    {
709 	      unlink_stmt_vdef (stmt);
710 	      release_defs (stmt);
711 	    }
712         }
713       else
714         {
715           /* No value is expected, but EXPR has an effect,
716              e.g., it could be a reference to a volatile
717              variable.  Create an assignment statement
718              with a dummy (unused) lhs variable.  */
719           STRIP_USELESS_TYPE_CONVERSION (expr);
720 	  if (gimple_in_ssa_p (cfun))
721 	    lhs = make_ssa_name (TREE_TYPE (expr));
722 	  else
723 	    lhs = create_tmp_var (TREE_TYPE (expr));
724           new_stmt = gimple_build_assign (lhs, expr);
725 	  gimple_move_vops (new_stmt, stmt);
726           move_ssa_defining_stmt_for_defs (new_stmt, stmt);
727         }
728       gimple_set_location (new_stmt, gimple_location (stmt));
729       gsi_replace (si_p, new_stmt, false);
730       return true;
731     }
732   else
733     /* The call simplified to an expression that is
734        not a valid GIMPLE RHS.  */
735     return false;
736 }
737 
738 /* Entry point to the propagation engine.
739 
740    The VISIT_STMT virtual function is called for every statement
741    visited and the VISIT_PHI virtual function is called for every PHI
742    node visited.  */
743 
744 void
ssa_propagate(void)745 ssa_propagation_engine::ssa_propagate (void)
746 {
747   ssa_prop_init ();
748 
749   curr_order = 0;
750 
751   /* Iterate until the worklists are empty.  We iterate both blocks
752      and stmts in RPO order, using sets of two worklists to first
753      complete the current iteration before iterating over backedges.
754      Seed the algorithm by adding the successors of the entry block to the
755      edge worklist.  */
756   edge e;
757   edge_iterator ei;
758   FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
759     {
760       e->flags &= ~EDGE_EXECUTABLE;
761       add_control_edge (e);
762     }
763   while (1)
764     {
765       int next_block_order = (bitmap_empty_p (cfg_blocks)
766 			      ? -1 : bitmap_first_set_bit (cfg_blocks));
767       int next_stmt_uid = (bitmap_empty_p (ssa_edge_worklist)
768 			   ? -1 : bitmap_first_set_bit (ssa_edge_worklist));
769       if (next_block_order == -1 && next_stmt_uid == -1)
770 	{
771 	  if (bitmap_empty_p (cfg_blocks_back)
772 	      && bitmap_empty_p (ssa_edge_worklist_back))
773 	    break;
774 
775 	  if (dump_file && (dump_flags & TDF_DETAILS))
776 	    fprintf (dump_file, "Regular worklists empty, now processing "
777 		     "backedge destinations\n");
778 	  std::swap (cfg_blocks, cfg_blocks_back);
779 	  std::swap (ssa_edge_worklist, ssa_edge_worklist_back);
780 	  continue;
781 	}
782 
783       int next_stmt_bb_order = -1;
784       gimple *next_stmt = NULL;
785       if (next_stmt_uid != -1)
786 	{
787 	  next_stmt = uid_to_stmt[next_stmt_uid];
788 	  next_stmt_bb_order = bb_to_cfg_order[gimple_bb (next_stmt)->index];
789 	}
790 
791       /* Pull the next block to simulate off the worklist if it comes first.  */
792       if (next_block_order != -1
793 	  && (next_stmt_bb_order == -1
794 	      || next_block_order <= next_stmt_bb_order))
795 	{
796 	  curr_order = next_block_order;
797 	  bitmap_clear_bit (cfg_blocks, next_block_order);
798 	  basic_block bb
799 	    = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [next_block_order]);
800 	  simulate_block (bb);
801 	}
802       /* Else simulate from the SSA edge worklist.  */
803       else
804 	{
805 	  curr_order = next_stmt_bb_order;
806 	  if (dump_file && (dump_flags & TDF_DETAILS))
807 	    {
808 	      fprintf (dump_file, "\nSimulating statement: ");
809 	      print_gimple_stmt (dump_file, next_stmt, 0, dump_flags);
810 	    }
811 	  simulate_stmt (next_stmt);
812 	}
813     }
814 
815   ssa_prop_fini ();
816 }
817 
818 /* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref'
819    is a non-volatile pointer dereference, a structure reference or a
820    reference to a single _DECL.  Ignore volatile memory references
821    because they are not interesting for the optimizers.  */
822 
823 bool
stmt_makes_single_store(gimple * stmt)824 stmt_makes_single_store (gimple *stmt)
825 {
826   tree lhs;
827 
828   if (gimple_code (stmt) != GIMPLE_ASSIGN
829       && gimple_code (stmt) != GIMPLE_CALL)
830     return false;
831 
832   if (!gimple_vdef (stmt))
833     return false;
834 
835   lhs = gimple_get_lhs (stmt);
836 
837   /* A call statement may have a null LHS.  */
838   if (!lhs)
839     return false;
840 
841   return (!TREE_THIS_VOLATILE (lhs)
842           && (DECL_P (lhs)
843 	      || REFERENCE_CLASS_P (lhs)));
844 }
845 
846 
847 /* Propagation statistics.  */
848 struct prop_stats_d
849 {
850   long num_const_prop;
851   long num_copy_prop;
852   long num_stmts_folded;
853   long num_dce;
854 };
855 
856 static struct prop_stats_d prop_stats;
857 
858 /* Replace USE references in statement STMT with the values stored in
859    PROP_VALUE. Return true if at least one reference was replaced.  */
860 
861 bool
replace_uses_in(gimple * stmt)862 substitute_and_fold_engine::replace_uses_in (gimple *stmt)
863 {
864   bool replaced = false;
865   use_operand_p use;
866   ssa_op_iter iter;
867 
868   FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE)
869     {
870       tree tuse = USE_FROM_PTR (use);
871       tree val = get_value (tuse);
872 
873       if (val == tuse || val == NULL_TREE)
874 	continue;
875 
876       if (gimple_code (stmt) == GIMPLE_ASM
877 	  && !may_propagate_copy_into_asm (tuse))
878 	continue;
879 
880       if (!may_propagate_copy (tuse, val))
881 	continue;
882 
883       if (TREE_CODE (val) != SSA_NAME)
884 	prop_stats.num_const_prop++;
885       else
886 	prop_stats.num_copy_prop++;
887 
888       propagate_value (use, val);
889 
890       replaced = true;
891     }
892 
893   return replaced;
894 }
895 
896 
897 /* Replace propagated values into all the arguments for PHI using the
898    values from PROP_VALUE.  */
899 
900 bool
replace_phi_args_in(gphi * phi)901 substitute_and_fold_engine::replace_phi_args_in (gphi *phi)
902 {
903   size_t i;
904   bool replaced = false;
905 
906   if (dump_file && (dump_flags & TDF_DETAILS))
907     {
908       fprintf (dump_file, "Folding PHI node: ");
909       print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
910     }
911 
912   for (i = 0; i < gimple_phi_num_args (phi); i++)
913     {
914       tree arg = gimple_phi_arg_def (phi, i);
915 
916       if (TREE_CODE (arg) == SSA_NAME)
917 	{
918 	  tree val = get_value (arg);
919 
920 	  if (val && val != arg && may_propagate_copy (arg, val))
921 	    {
922 	      edge e = gimple_phi_arg_edge (phi, i);
923 
924 	      if (TREE_CODE (val) != SSA_NAME)
925 		prop_stats.num_const_prop++;
926 	      else
927 		prop_stats.num_copy_prop++;
928 
929 	      propagate_value (PHI_ARG_DEF_PTR (phi, i), val);
930 	      replaced = true;
931 
932 	      /* If we propagated a copy and this argument flows
933 		 through an abnormal edge, update the replacement
934 		 accordingly.  */
935 	      if (TREE_CODE (val) == SSA_NAME
936 		  && e->flags & EDGE_ABNORMAL
937 		  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
938 		{
939 		  /* This can only occur for virtual operands, since
940 		     for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
941 		     would prevent replacement.  */
942 		  gcc_checking_assert (virtual_operand_p (val));
943 		  SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
944 		}
945 	    }
946 	}
947     }
948 
949   if (dump_file && (dump_flags & TDF_DETAILS))
950     {
951       if (!replaced)
952 	fprintf (dump_file, "No folding possible\n");
953       else
954 	{
955 	  fprintf (dump_file, "Folded into: ");
956 	  print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
957 	  fprintf (dump_file, "\n");
958 	}
959     }
960 
961   return replaced;
962 }
963 
964 
965 class substitute_and_fold_dom_walker : public dom_walker
966 {
967 public:
substitute_and_fold_dom_walker(cdi_direction direction,class substitute_and_fold_engine * engine)968     substitute_and_fold_dom_walker (cdi_direction direction,
969 				    class substitute_and_fold_engine *engine)
970 	: dom_walker (direction),
971           something_changed (false),
972 	  substitute_and_fold_engine (engine)
973     {
974       stmts_to_remove.create (0);
975       stmts_to_fixup.create (0);
976       need_eh_cleanup = BITMAP_ALLOC (NULL);
977     }
~substitute_and_fold_dom_walker()978     ~substitute_and_fold_dom_walker ()
979     {
980       stmts_to_remove.release ();
981       stmts_to_fixup.release ();
982       BITMAP_FREE (need_eh_cleanup);
983     }
984 
985     virtual edge before_dom_children (basic_block);
after_dom_children(basic_block)986     virtual void after_dom_children (basic_block) {}
987 
988     bool something_changed;
989     vec<gimple *> stmts_to_remove;
990     vec<gimple *> stmts_to_fixup;
991     bitmap need_eh_cleanup;
992 
993     class substitute_and_fold_engine *substitute_and_fold_engine;
994 };
995 
996 edge
before_dom_children(basic_block bb)997 substitute_and_fold_dom_walker::before_dom_children (basic_block bb)
998 {
999   /* Propagate known values into PHI nodes.  */
1000   for (gphi_iterator i = gsi_start_phis (bb);
1001        !gsi_end_p (i);
1002        gsi_next (&i))
1003     {
1004       gphi *phi = i.phi ();
1005       tree res = gimple_phi_result (phi);
1006       if (virtual_operand_p (res))
1007 	continue;
1008       if (res && TREE_CODE (res) == SSA_NAME)
1009 	{
1010 	  tree sprime = substitute_and_fold_engine->get_value (res);
1011 	  if (sprime
1012 	      && sprime != res
1013 	      && may_propagate_copy (res, sprime))
1014 	    {
1015 	      stmts_to_remove.safe_push (phi);
1016 	      continue;
1017 	    }
1018 	}
1019       something_changed |= substitute_and_fold_engine->replace_phi_args_in (phi);
1020     }
1021 
1022   /* Propagate known values into stmts.  In some case it exposes
1023      more trivially deletable stmts to walk backward.  */
1024   for (gimple_stmt_iterator i = gsi_start_bb (bb);
1025        !gsi_end_p (i);
1026        gsi_next (&i))
1027     {
1028       bool did_replace;
1029       gimple *stmt = gsi_stmt (i);
1030 
1031       /* No point propagating into a stmt we have a value for we
1032          can propagate into all uses.  Mark it for removal instead.  */
1033       tree lhs = gimple_get_lhs (stmt);
1034       if (lhs && TREE_CODE (lhs) == SSA_NAME)
1035 	{
1036 	  tree sprime = substitute_and_fold_engine->get_value (lhs);
1037 	  if (sprime
1038 	      && sprime != lhs
1039 	      && may_propagate_copy (lhs, sprime)
1040 	      && !stmt_could_throw_p (cfun, stmt)
1041 	      && !gimple_has_side_effects (stmt)
1042 	      /* We have to leave ASSERT_EXPRs around for jump-threading.  */
1043 	      && (!is_gimple_assign (stmt)
1044 		  || gimple_assign_rhs_code (stmt) != ASSERT_EXPR))
1045 	    {
1046 	      stmts_to_remove.safe_push (stmt);
1047 	      continue;
1048 	    }
1049 	}
1050 
1051       /* Replace the statement with its folded version and mark it
1052 	 folded.  */
1053       did_replace = false;
1054       if (dump_file && (dump_flags & TDF_DETAILS))
1055 	{
1056 	  fprintf (dump_file, "Folding statement: ");
1057 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1058 	}
1059 
1060       gimple *old_stmt = stmt;
1061       bool was_noreturn = (is_gimple_call (stmt)
1062 			   && gimple_call_noreturn_p (stmt));
1063 
1064       /* Replace real uses in the statement.  */
1065       did_replace |= substitute_and_fold_engine->replace_uses_in (stmt);
1066 
1067       /* If we made a replacement, fold the statement.  */
1068       if (did_replace)
1069 	{
1070 	  fold_stmt (&i, follow_single_use_edges);
1071 	  stmt = gsi_stmt (i);
1072 	  gimple_set_modified (stmt, true);
1073 	}
1074       /* Also fold if we want to fold all statements.  */
1075       else if (substitute_and_fold_engine->fold_all_stmts
1076 	  && fold_stmt (&i, follow_single_use_edges))
1077 	{
1078 	  did_replace = true;
1079 	  stmt = gsi_stmt (i);
1080 	  gimple_set_modified (stmt, true);
1081 	}
1082 
1083       /* Some statements may be simplified using propagator
1084 	 specific information.  Do this before propagating
1085 	 into the stmt to not disturb pass specific information.  */
1086       update_stmt_if_modified (stmt);
1087       if (substitute_and_fold_engine->fold_stmt(&i))
1088 	{
1089 	  did_replace = true;
1090 	  prop_stats.num_stmts_folded++;
1091 	  stmt = gsi_stmt (i);
1092 	  gimple_set_modified (stmt, true);
1093 	}
1094 
1095       /* If this is a control statement the propagator left edges
1096          unexecuted on force the condition in a way consistent with
1097 	 that.  See PR66945 for cases where the propagator can end
1098 	 up with a different idea of a taken edge than folding
1099 	 (once undefined behavior is involved).  */
1100       if (gimple_code (stmt) == GIMPLE_COND)
1101 	{
1102 	  if ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE)
1103 	      ^ (EDGE_SUCC (bb, 1)->flags & EDGE_EXECUTABLE))
1104 	    {
1105 	      if (((EDGE_SUCC (bb, 0)->flags & EDGE_TRUE_VALUE) != 0)
1106 		  == ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE) != 0))
1107 		gimple_cond_make_true (as_a <gcond *> (stmt));
1108 	      else
1109 		gimple_cond_make_false (as_a <gcond *> (stmt));
1110 	      gimple_set_modified (stmt, true);
1111 	      did_replace = true;
1112 	    }
1113 	}
1114 
1115       /* Now cleanup.  */
1116       if (did_replace)
1117 	{
1118 	  /* If we cleaned up EH information from the statement,
1119 	     remove EH edges.  */
1120 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1121 	    bitmap_set_bit (need_eh_cleanup, bb->index);
1122 
1123 	  /* If we turned a not noreturn call into a noreturn one
1124 	     schedule it for fixup.  */
1125 	  if (!was_noreturn
1126 	      && is_gimple_call (stmt)
1127 	      && gimple_call_noreturn_p (stmt))
1128 	    stmts_to_fixup.safe_push (stmt);
1129 
1130 	  if (gimple_assign_single_p (stmt))
1131 	    {
1132 	      tree rhs = gimple_assign_rhs1 (stmt);
1133 
1134 	      if (TREE_CODE (rhs) == ADDR_EXPR)
1135 		recompute_tree_invariant_for_addr_expr (rhs);
1136 	    }
1137 
1138 	  /* Determine what needs to be done to update the SSA form.  */
1139 	  update_stmt_if_modified (stmt);
1140 	  if (!is_gimple_debug (stmt))
1141 	    something_changed = true;
1142 	}
1143 
1144       if (dump_file && (dump_flags & TDF_DETAILS))
1145 	{
1146 	  if (did_replace)
1147 	    {
1148 	      fprintf (dump_file, "Folded into: ");
1149 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1150 	      fprintf (dump_file, "\n");
1151 	    }
1152 	  else
1153 	    fprintf (dump_file, "Not folded\n");
1154 	}
1155     }
1156   return NULL;
1157 }
1158 
1159 
1160 
1161 /* Perform final substitution and folding of propagated values.
1162    Process the whole function if BLOCK is null, otherwise only
1163    process the blocks that BLOCK dominates.  In the latter case,
1164    it is the caller's responsibility to ensure that dominator
1165    information is available and up-to-date.
1166 
1167    PROP_VALUE[I] contains the single value that should be substituted
1168    at every use of SSA name N_I.  If PROP_VALUE is NULL, no values are
1169    substituted.
1170 
1171    If FOLD_FN is non-NULL the function will be invoked on all statements
1172    before propagating values for pass specific simplification.
1173 
1174    DO_DCE is true if trivially dead stmts can be removed.
1175 
1176    If DO_DCE is true, the statements within a BB are walked from
1177    last to first element.  Otherwise we scan from first to last element.
1178 
1179    Return TRUE when something changed.  */
1180 
1181 bool
substitute_and_fold(basic_block block)1182 substitute_and_fold_engine::substitute_and_fold (basic_block block)
1183 {
1184   if (dump_file && (dump_flags & TDF_DETAILS))
1185     fprintf (dump_file, "\nSubstituting values and folding statements\n\n");
1186 
1187   memset (&prop_stats, 0, sizeof (prop_stats));
1188 
1189   /* Don't call calculate_dominance_info when iterating over a subgraph.
1190      Callers that are using the interface this way are likely to want to
1191      iterate over several disjoint subgraphs, and it would be expensive
1192      in enable-checking builds to revalidate the whole dominance tree
1193      each time.  */
1194   if (block)
1195     gcc_assert (dom_info_state (CDI_DOMINATORS));
1196   else
1197     calculate_dominance_info (CDI_DOMINATORS);
1198   substitute_and_fold_dom_walker walker (CDI_DOMINATORS, this);
1199   walker.walk (block ? block : ENTRY_BLOCK_PTR_FOR_FN (cfun));
1200 
1201   /* We cannot remove stmts during the BB walk, especially not release
1202      SSA names there as that destroys the lattice of our callers.
1203      Remove stmts in reverse order to make debug stmt creation possible.  */
1204   while (!walker.stmts_to_remove.is_empty ())
1205     {
1206       gimple *stmt = walker.stmts_to_remove.pop ();
1207       if (dump_file && dump_flags & TDF_DETAILS)
1208 	{
1209 	  fprintf (dump_file, "Removing dead stmt ");
1210 	  print_gimple_stmt (dump_file, stmt, 0);
1211 	  fprintf (dump_file, "\n");
1212 	}
1213       prop_stats.num_dce++;
1214       gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1215       if (gimple_code (stmt) == GIMPLE_PHI)
1216 	remove_phi_node (&gsi, true);
1217       else
1218 	{
1219 	  unlink_stmt_vdef (stmt);
1220 	  gsi_remove (&gsi, true);
1221 	  release_defs (stmt);
1222 	}
1223     }
1224 
1225   if (!bitmap_empty_p (walker.need_eh_cleanup))
1226     gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
1227 
1228   /* Fixup stmts that became noreturn calls.  This may require splitting
1229      blocks and thus isn't possible during the dominator walk.  Do this
1230      in reverse order so we don't inadvertedly remove a stmt we want to
1231      fixup by visiting a dominating now noreturn call first.  */
1232   while (!walker.stmts_to_fixup.is_empty ())
1233     {
1234       gimple *stmt = walker.stmts_to_fixup.pop ();
1235       if (dump_file && dump_flags & TDF_DETAILS)
1236 	{
1237 	  fprintf (dump_file, "Fixing up noreturn call ");
1238 	  print_gimple_stmt (dump_file, stmt, 0);
1239 	  fprintf (dump_file, "\n");
1240 	}
1241       fixup_noreturn_call (stmt);
1242     }
1243 
1244   statistics_counter_event (cfun, "Constants propagated",
1245 			    prop_stats.num_const_prop);
1246   statistics_counter_event (cfun, "Copies propagated",
1247 			    prop_stats.num_copy_prop);
1248   statistics_counter_event (cfun, "Statements folded",
1249 			    prop_stats.num_stmts_folded);
1250   statistics_counter_event (cfun, "Statements deleted",
1251 			    prop_stats.num_dce);
1252 
1253   return walker.something_changed;
1254 }
1255 
1256 
1257 /* Return true if we may propagate ORIG into DEST, false otherwise.  */
1258 
1259 bool
may_propagate_copy(tree dest,tree orig)1260 may_propagate_copy (tree dest, tree orig)
1261 {
1262   tree type_d = TREE_TYPE (dest);
1263   tree type_o = TREE_TYPE (orig);
1264 
1265   /* If ORIG is a default definition which flows in from an abnormal edge
1266      then the copy can be propagated.  It is important that we do so to avoid
1267      uninitialized copies.  */
1268   if (TREE_CODE (orig) == SSA_NAME
1269       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)
1270       && SSA_NAME_IS_DEFAULT_DEF (orig)
1271       && (SSA_NAME_VAR (orig) == NULL_TREE
1272 	  || TREE_CODE (SSA_NAME_VAR (orig)) == VAR_DECL))
1273     ;
1274   /* Otherwise if ORIG just flows in from an abnormal edge then the copy cannot
1275      be propagated.  */
1276   else if (TREE_CODE (orig) == SSA_NAME
1277 	   && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1278     return false;
1279   /* Similarly if DEST flows in from an abnormal edge then the copy cannot be
1280      propagated.  */
1281   else if (TREE_CODE (dest) == SSA_NAME
1282 	   && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
1283     return false;
1284 
1285   /* Do not copy between types for which we *do* need a conversion.  */
1286   if (!useless_type_conversion_p (type_d, type_o))
1287     return false;
1288 
1289   /* Generally propagating virtual operands is not ok as that may
1290      create overlapping life-ranges.  */
1291   if (TREE_CODE (dest) == SSA_NAME && virtual_operand_p (dest))
1292     return false;
1293 
1294   /* Anything else is OK.  */
1295   return true;
1296 }
1297 
1298 /* Like may_propagate_copy, but use as the destination expression
1299    the principal expression (typically, the RHS) contained in
1300    statement DEST.  This is more efficient when working with the
1301    gimple tuples representation.  */
1302 
1303 bool
may_propagate_copy_into_stmt(gimple * dest,tree orig)1304 may_propagate_copy_into_stmt (gimple *dest, tree orig)
1305 {
1306   tree type_d;
1307   tree type_o;
1308 
1309   /* If the statement is a switch or a single-rhs assignment,
1310      then the expression to be replaced by the propagation may
1311      be an SSA_NAME.  Fortunately, there is an explicit tree
1312      for the expression, so we delegate to may_propagate_copy.  */
1313 
1314   if (gimple_assign_single_p (dest))
1315     return may_propagate_copy (gimple_assign_rhs1 (dest), orig);
1316   else if (gswitch *dest_swtch = dyn_cast <gswitch *> (dest))
1317     return may_propagate_copy (gimple_switch_index (dest_swtch), orig);
1318 
1319   /* In other cases, the expression is not materialized, so there
1320      is no destination to pass to may_propagate_copy.  On the other
1321      hand, the expression cannot be an SSA_NAME, so the analysis
1322      is much simpler.  */
1323 
1324   if (TREE_CODE (orig) == SSA_NAME
1325       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1326     return false;
1327 
1328   if (is_gimple_assign (dest))
1329     type_d = TREE_TYPE (gimple_assign_lhs (dest));
1330   else if (gimple_code (dest) == GIMPLE_COND)
1331     type_d = boolean_type_node;
1332   else if (is_gimple_call (dest)
1333            && gimple_call_lhs (dest) != NULL_TREE)
1334     type_d = TREE_TYPE (gimple_call_lhs (dest));
1335   else
1336     gcc_unreachable ();
1337 
1338   type_o = TREE_TYPE (orig);
1339 
1340   if (!useless_type_conversion_p (type_d, type_o))
1341     return false;
1342 
1343   return true;
1344 }
1345 
1346 /* Similarly, but we know that we're propagating into an ASM_EXPR.  */
1347 
1348 bool
may_propagate_copy_into_asm(tree dest ATTRIBUTE_UNUSED)1349 may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED)
1350 {
1351   return true;
1352 }
1353 
1354 
1355 /* Common code for propagate_value and replace_exp.
1356 
1357    Replace use operand OP_P with VAL.  FOR_PROPAGATION indicates if the
1358    replacement is done to propagate a value or not.  */
1359 
1360 static void
replace_exp_1(use_operand_p op_p,tree val,bool for_propagation ATTRIBUTE_UNUSED)1361 replace_exp_1 (use_operand_p op_p, tree val,
1362     	       bool for_propagation ATTRIBUTE_UNUSED)
1363 {
1364   if (flag_checking)
1365     {
1366       tree op = USE_FROM_PTR (op_p);
1367       gcc_assert (!(for_propagation
1368 		  && TREE_CODE (op) == SSA_NAME
1369 		  && TREE_CODE (val) == SSA_NAME
1370 		  && !may_propagate_copy (op, val)));
1371     }
1372 
1373   if (TREE_CODE (val) == SSA_NAME)
1374     SET_USE (op_p, val);
1375   else
1376     SET_USE (op_p, unshare_expr (val));
1377 }
1378 
1379 
1380 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1381    into the operand pointed to by OP_P.
1382 
1383    Use this version for const/copy propagation as it will perform additional
1384    checks to ensure validity of the const/copy propagation.  */
1385 
1386 void
propagate_value(use_operand_p op_p,tree val)1387 propagate_value (use_operand_p op_p, tree val)
1388 {
1389   replace_exp_1 (op_p, val, true);
1390 }
1391 
1392 /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME).
1393 
1394    Use this version when not const/copy propagating values.  For example,
1395    PRE uses this version when building expressions as they would appear
1396    in specific blocks taking into account actions of PHI nodes.
1397 
1398    The statement in which an expression has been replaced should be
1399    folded using fold_stmt_inplace.  */
1400 
1401 void
replace_exp(use_operand_p op_p,tree val)1402 replace_exp (use_operand_p op_p, tree val)
1403 {
1404   replace_exp_1 (op_p, val, false);
1405 }
1406 
1407 
1408 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1409    into the tree pointed to by OP_P.
1410 
1411    Use this version for const/copy propagation when SSA operands are not
1412    available.  It will perform the additional checks to ensure validity of
1413    the const/copy propagation, but will not update any operand information.
1414    Be sure to mark the stmt as modified.  */
1415 
1416 void
propagate_tree_value(tree * op_p,tree val)1417 propagate_tree_value (tree *op_p, tree val)
1418 {
1419   if (TREE_CODE (val) == SSA_NAME)
1420     *op_p = val;
1421   else
1422     *op_p = unshare_expr (val);
1423 }
1424 
1425 
1426 /* Like propagate_tree_value, but use as the operand to replace
1427    the principal expression (typically, the RHS) contained in the
1428    statement referenced by iterator GSI.  Note that it is not
1429    always possible to update the statement in-place, so a new
1430    statement may be created to replace the original.  */
1431 
1432 void
propagate_tree_value_into_stmt(gimple_stmt_iterator * gsi,tree val)1433 propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val)
1434 {
1435   gimple *stmt = gsi_stmt (*gsi);
1436 
1437   if (is_gimple_assign (stmt))
1438     {
1439       tree expr = NULL_TREE;
1440       if (gimple_assign_single_p (stmt))
1441         expr = gimple_assign_rhs1 (stmt);
1442       propagate_tree_value (&expr, val);
1443       gimple_assign_set_rhs_from_tree (gsi, expr);
1444     }
1445   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
1446     {
1447       tree lhs = NULL_TREE;
1448       tree rhs = build_zero_cst (TREE_TYPE (val));
1449       propagate_tree_value (&lhs, val);
1450       gimple_cond_set_code (cond_stmt, NE_EXPR);
1451       gimple_cond_set_lhs (cond_stmt, lhs);
1452       gimple_cond_set_rhs (cond_stmt, rhs);
1453     }
1454   else if (is_gimple_call (stmt)
1455            && gimple_call_lhs (stmt) != NULL_TREE)
1456     {
1457       tree expr = NULL_TREE;
1458       bool res;
1459       propagate_tree_value (&expr, val);
1460       res = update_call_from_tree (gsi, expr);
1461       gcc_assert (res);
1462     }
1463   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1464     propagate_tree_value (gimple_switch_index_ptr (swtch_stmt), val);
1465   else
1466     gcc_unreachable ();
1467 }
1468