1 /* Generic SSA value propagation engine.
2    Copyright (C) 2004-2018 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by the
9    Free Software Foundation; either version 3, or (at your option) any
10    later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15    for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "gimple-pretty-print.h"
29 #include "dumpfile.h"
30 #include "gimple-fold.h"
31 #include "tree-eh.h"
32 #include "gimplify.h"
33 #include "gimple-iterator.h"
34 #include "tree-cfg.h"
35 #include "tree-ssa.h"
36 #include "tree-ssa-propagate.h"
37 #include "domwalk.h"
38 #include "cfgloop.h"
39 #include "tree-cfgcleanup.h"
40 #include "cfganal.h"
41 
42 /* This file implements a generic value propagation engine based on
43    the same propagation used by the SSA-CCP algorithm [1].
44 
45    Propagation is performed by simulating the execution of every
46    statement that produces the value being propagated.  Simulation
47    proceeds as follows:
48 
49    1- Initially, all edges of the CFG are marked not executable and
50       the CFG worklist is seeded with all the statements in the entry
51       basic block (block 0).
52 
53    2- Every statement S is simulated with a call to the call-back
54       function SSA_PROP_VISIT_STMT.  This evaluation may produce 3
55       results:
56 
57       	SSA_PROP_NOT_INTERESTING: Statement S produces nothing of
58 	    interest and does not affect any of the work lists.
59 	    The statement may be simulated again if any of its input
60 	    operands change in future iterations of the simulator.
61 
62 	SSA_PROP_VARYING: The value produced by S cannot be determined
63 	    at compile time.  Further simulation of S is not required.
64 	    If S is a conditional jump, all the outgoing edges for the
65 	    block are considered executable and added to the work
66 	    list.
67 
68 	SSA_PROP_INTERESTING: S produces a value that can be computed
69 	    at compile time.  Its result can be propagated into the
70 	    statements that feed from S.  Furthermore, if S is a
71 	    conditional jump, only the edge known to be taken is added
72 	    to the work list.  Edges that are known not to execute are
73 	    never simulated.
74 
75    3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI.  The
76       return value from SSA_PROP_VISIT_PHI has the same semantics as
77       described in #2.
78 
79    4- Three work lists are kept.  Statements are only added to these
80       lists if they produce one of SSA_PROP_INTERESTING or
81       SSA_PROP_VARYING.
82 
83    	CFG_BLOCKS contains the list of blocks to be simulated.
84 	    Blocks are added to this list if their incoming edges are
85 	    found executable.
86 
87 	SSA_EDGE_WORKLIST contains the list of statements that we
88 	    need to revisit.
89 
90    5- Simulation terminates when all three work lists are drained.
91 
92    Before calling ssa_propagate, it is important to clear
93    prop_simulate_again_p for all the statements in the program that
94    should be simulated.  This initialization allows an implementation
95    to specify which statements should never be simulated.
96 
97    It is also important to compute def-use information before calling
98    ssa_propagate.
99 
100    References:
101 
102      [1] Constant propagation with conditional branches,
103          Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
104 
105      [2] Building an Optimizing Compiler,
106 	 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
107 
108      [3] Advanced Compiler Design and Implementation,
109 	 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
110 
111 /* Worklists of control flow edge destinations.  This contains
112    the CFG order number of the blocks so we can iterate in CFG
113    order by visiting in bit-order.  We use two worklists to
114    first make forward progress before iterating.  */
115 static bitmap cfg_blocks;
116 static bitmap cfg_blocks_back;
117 static int *bb_to_cfg_order;
118 static int *cfg_order_to_bb;
119 
120 /* Worklists of SSA edges which will need reexamination as their
121    definition has changed.  SSA edges are def-use edges in the SSA
122    web.  For each D-U edge, we store the target statement or PHI node
123    UID in a bitmap.  UIDs order stmts in execution order.  We use
124    two worklists to first make forward progress before iterating.  */
125 static bitmap ssa_edge_worklist;
126 static bitmap ssa_edge_worklist_back;
127 static vec<gimple *> uid_to_stmt;
128 
129 /* Current RPO index in the iteration.  */
130 static int curr_order;
131 
132 
133 /* We have just defined a new value for VAR.  If IS_VARYING is true,
134    add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add
135    them to INTERESTING_SSA_EDGES.  */
136 
137 static void
138 add_ssa_edge (tree var)
139 {
140   imm_use_iterator iter;
141   use_operand_p use_p;
142 
143   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
144     {
145       gimple *use_stmt = USE_STMT (use_p);
146       if (!prop_simulate_again_p (use_stmt))
147 	continue;
148 
149       /* If we did not yet simulate the block wait for this to happen
150          and do not add the stmt to the SSA edge worklist.  */
151       basic_block use_bb = gimple_bb (use_stmt);
152       if (! (use_bb->flags & BB_VISITED))
153 	continue;
154 
155       /* If this is a use on a not yet executable edge do not bother to
156 	 queue it.  */
157       if (gimple_code (use_stmt) == GIMPLE_PHI
158 	  && !(EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->flags
159 	       & EDGE_EXECUTABLE))
160 	continue;
161 
162       bitmap worklist;
163       if (bb_to_cfg_order[gimple_bb (use_stmt)->index] < curr_order)
164 	worklist = ssa_edge_worklist_back;
165       else
166 	worklist = ssa_edge_worklist;
167       if (bitmap_set_bit (worklist, gimple_uid (use_stmt)))
168 	{
169 	  uid_to_stmt[gimple_uid (use_stmt)] = use_stmt;
170 	  if (dump_file && (dump_flags & TDF_DETAILS))
171 	    {
172 	      fprintf (dump_file, "ssa_edge_worklist: adding SSA use in ");
173 	      print_gimple_stmt (dump_file, use_stmt, 0, TDF_SLIM);
174 	    }
175 	}
176     }
177 }
178 
179 
180 /* Add edge E to the control flow worklist.  */
181 
182 static void
183 add_control_edge (edge e)
184 {
185   basic_block bb = e->dest;
186   if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
187     return;
188 
189   /* If the edge had already been executed, skip it.  */
190   if (e->flags & EDGE_EXECUTABLE)
191     return;
192 
193   e->flags |= EDGE_EXECUTABLE;
194 
195   int bb_order = bb_to_cfg_order[bb->index];
196   if (bb_order < curr_order)
197     bitmap_set_bit (cfg_blocks_back, bb_order);
198   else
199     bitmap_set_bit (cfg_blocks, bb_order);
200 
201   if (dump_file && (dump_flags & TDF_DETAILS))
202     fprintf (dump_file, "Adding destination of edge (%d -> %d) to worklist\n",
203 	e->src->index, e->dest->index);
204 }
205 
206 
207 /* Simulate the execution of STMT and update the work lists accordingly.  */
208 
209 void
210 ssa_propagation_engine::simulate_stmt (gimple *stmt)
211 {
212   enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING;
213   edge taken_edge = NULL;
214   tree output_name = NULL_TREE;
215 
216   /* Pull the stmt off the SSA edge worklist.  */
217   bitmap_clear_bit (ssa_edge_worklist, gimple_uid (stmt));
218 
219   /* Don't bother visiting statements that are already
220      considered varying by the propagator.  */
221   if (!prop_simulate_again_p (stmt))
222     return;
223 
224   if (gimple_code (stmt) == GIMPLE_PHI)
225     {
226       val = visit_phi (as_a <gphi *> (stmt));
227       output_name = gimple_phi_result (stmt);
228     }
229   else
230     val = visit_stmt (stmt, &taken_edge, &output_name);
231 
232   if (val == SSA_PROP_VARYING)
233     {
234       prop_set_simulate_again (stmt, false);
235 
236       /* If the statement produced a new varying value, add the SSA
237 	 edges coming out of OUTPUT_NAME.  */
238       if (output_name)
239 	add_ssa_edge (output_name);
240 
241       /* If STMT transfers control out of its basic block, add
242 	 all outgoing edges to the work list.  */
243       if (stmt_ends_bb_p (stmt))
244 	{
245 	  edge e;
246 	  edge_iterator ei;
247 	  basic_block bb = gimple_bb (stmt);
248 	  FOR_EACH_EDGE (e, ei, bb->succs)
249 	    add_control_edge (e);
250 	}
251       return;
252     }
253   else if (val == SSA_PROP_INTERESTING)
254     {
255       /* If the statement produced new value, add the SSA edges coming
256 	 out of OUTPUT_NAME.  */
257       if (output_name)
258 	add_ssa_edge (output_name);
259 
260       /* If we know which edge is going to be taken out of this block,
261 	 add it to the CFG work list.  */
262       if (taken_edge)
263 	add_control_edge (taken_edge);
264     }
265 
266   /* If there are no SSA uses on the stmt whose defs are simulated
267      again then this stmt will be never visited again.  */
268   bool has_simulate_again_uses = false;
269   use_operand_p use_p;
270   ssa_op_iter iter;
271   if (gimple_code  (stmt) == GIMPLE_PHI)
272     {
273       edge_iterator ei;
274       edge e;
275       tree arg;
276       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
277 	if (!(e->flags & EDGE_EXECUTABLE)
278 	    || ((arg = PHI_ARG_DEF_FROM_EDGE (stmt, e))
279 		&& TREE_CODE (arg) == SSA_NAME
280 		&& !SSA_NAME_IS_DEFAULT_DEF (arg)
281 		&& prop_simulate_again_p (SSA_NAME_DEF_STMT (arg))))
282 	  {
283 	    has_simulate_again_uses = true;
284 	    break;
285 	  }
286     }
287   else
288     FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
289       {
290 	gimple *def_stmt = SSA_NAME_DEF_STMT (USE_FROM_PTR (use_p));
291 	if (!gimple_nop_p (def_stmt)
292 	    && prop_simulate_again_p (def_stmt))
293 	  {
294 	    has_simulate_again_uses = true;
295 	    break;
296 	  }
297       }
298   if (!has_simulate_again_uses)
299     {
300       if (dump_file && (dump_flags & TDF_DETAILS))
301 	fprintf (dump_file, "marking stmt to be not simulated again\n");
302       prop_set_simulate_again (stmt, false);
303     }
304 }
305 
306 
307 /* Simulate the execution of BLOCK.  Evaluate the statement associated
308    with each variable reference inside the block.  */
309 
310 void
311 ssa_propagation_engine::simulate_block (basic_block block)
312 {
313   gimple_stmt_iterator gsi;
314 
315   /* There is nothing to do for the exit block.  */
316   if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
317     return;
318 
319   if (dump_file && (dump_flags & TDF_DETAILS))
320     fprintf (dump_file, "\nSimulating block %d\n", block->index);
321 
322   /* Always simulate PHI nodes, even if we have simulated this block
323      before.  */
324   for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi))
325     simulate_stmt (gsi_stmt (gsi));
326 
327   /* If this is the first time we've simulated this block, then we
328      must simulate each of its statements.  */
329   if (! (block->flags & BB_VISITED))
330     {
331       gimple_stmt_iterator j;
332       unsigned int normal_edge_count;
333       edge e, normal_edge;
334       edge_iterator ei;
335 
336       for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j))
337 	simulate_stmt (gsi_stmt (j));
338 
339       /* Note that we have simulated this block.  */
340       block->flags |= BB_VISITED;
341 
342       /* We can not predict when abnormal and EH edges will be executed, so
343 	 once a block is considered executable, we consider any
344 	 outgoing abnormal edges as executable.
345 
346 	 TODO: This is not exactly true.  Simplifying statement might
347 	 prove it non-throwing and also computed goto can be handled
348 	 when destination is known.
349 
350 	 At the same time, if this block has only one successor that is
351 	 reached by non-abnormal edges, then add that successor to the
352 	 worklist.  */
353       normal_edge_count = 0;
354       normal_edge = NULL;
355       FOR_EACH_EDGE (e, ei, block->succs)
356 	{
357 	  if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
358 	    add_control_edge (e);
359 	  else
360 	    {
361 	      normal_edge_count++;
362 	      normal_edge = e;
363 	    }
364 	}
365 
366       if (normal_edge_count == 1)
367 	add_control_edge (normal_edge);
368     }
369 }
370 
371 
372 /* Initialize local data structures and work lists.  */
373 
374 static void
375 ssa_prop_init (void)
376 {
377   edge e;
378   edge_iterator ei;
379   basic_block bb;
380 
381   /* Worklists of SSA edges.  */
382   ssa_edge_worklist = BITMAP_ALLOC (NULL);
383   ssa_edge_worklist_back = BITMAP_ALLOC (NULL);
384 
385   /* Worklist of basic-blocks.  */
386   bb_to_cfg_order = XNEWVEC (int, last_basic_block_for_fn (cfun) + 1);
387   cfg_order_to_bb = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
388   int n = pre_and_rev_post_order_compute_fn (cfun, NULL,
389 					     cfg_order_to_bb, false);
390   for (int i = 0; i < n; ++i)
391     bb_to_cfg_order[cfg_order_to_bb[i]] = i;
392   cfg_blocks = BITMAP_ALLOC (NULL);
393   cfg_blocks_back = BITMAP_ALLOC (NULL);
394 
395   /* Initially assume that every edge in the CFG is not executable.
396      (including the edges coming out of the entry block).  Mark blocks
397      as not visited, blocks not yet visited will have all their statements
398      simulated once an incoming edge gets executable.  */
399   set_gimple_stmt_max_uid (cfun, 0);
400   for (int i = 0; i < n; ++i)
401     {
402       gimple_stmt_iterator si;
403       bb = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb[i]);
404 
405       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
406 	{
407 	  gimple *stmt = gsi_stmt (si);
408 	  gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
409 	}
410 
411       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
412 	{
413 	  gimple *stmt = gsi_stmt (si);
414 	  gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun));
415 	}
416 
417       bb->flags &= ~BB_VISITED;
418       FOR_EACH_EDGE (e, ei, bb->succs)
419 	e->flags &= ~EDGE_EXECUTABLE;
420     }
421   uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun));
422 
423   /* Seed the algorithm by adding the successors of the entry block to the
424      edge worklist.  */
425   FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
426     {
427       e->flags &= ~EDGE_EXECUTABLE;
428       add_control_edge (e);
429     }
430 }
431 
432 
433 /* Free allocated storage.  */
434 
435 static void
436 ssa_prop_fini (void)
437 {
438   BITMAP_FREE (cfg_blocks);
439   BITMAP_FREE (cfg_blocks_back);
440   free (bb_to_cfg_order);
441   free (cfg_order_to_bb);
442   BITMAP_FREE (ssa_edge_worklist);
443   BITMAP_FREE (ssa_edge_worklist_back);
444   uid_to_stmt.release ();
445 }
446 
447 
448 /* Return true if EXPR is an acceptable right-hand-side for a
449    GIMPLE assignment.  We validate the entire tree, not just
450    the root node, thus catching expressions that embed complex
451    operands that are not permitted in GIMPLE.  This function
452    is needed because the folding routines in fold-const.c
453    may return such expressions in some cases, e.g., an array
454    access with an embedded index addition.  It may make more
455    sense to have folding routines that are sensitive to the
456    constraints on GIMPLE operands, rather than abandoning any
457    any attempt to fold if the usual folding turns out to be too
458    aggressive.  */
459 
460 bool
461 valid_gimple_rhs_p (tree expr)
462 {
463   enum tree_code code = TREE_CODE (expr);
464 
465   switch (TREE_CODE_CLASS (code))
466     {
467     case tcc_declaration:
468       if (!is_gimple_variable (expr))
469 	return false;
470       break;
471 
472     case tcc_constant:
473       /* All constants are ok.  */
474       break;
475 
476     case tcc_comparison:
477       /* GENERIC allows comparisons with non-boolean types, reject
478          those for GIMPLE.  Let vector-typed comparisons pass - rules
479 	 for GENERIC and GIMPLE are the same here.  */
480       if (!(INTEGRAL_TYPE_P (TREE_TYPE (expr))
481 	    && (TREE_CODE (TREE_TYPE (expr)) == BOOLEAN_TYPE
482 		|| TYPE_PRECISION (TREE_TYPE (expr)) == 1))
483 	  && ! VECTOR_TYPE_P (TREE_TYPE (expr)))
484 	return false;
485 
486       /* Fallthru.  */
487     case tcc_binary:
488       if (!is_gimple_val (TREE_OPERAND (expr, 0))
489 	  || !is_gimple_val (TREE_OPERAND (expr, 1)))
490 	return false;
491       break;
492 
493     case tcc_unary:
494       if (!is_gimple_val (TREE_OPERAND (expr, 0)))
495 	return false;
496       break;
497 
498     case tcc_expression:
499       switch (code)
500         {
501         case ADDR_EXPR:
502           {
503 	    tree t;
504 	    if (is_gimple_min_invariant (expr))
505 	      return true;
506             t = TREE_OPERAND (expr, 0);
507             while (handled_component_p (t))
508               {
509                 /* ??? More checks needed, see the GIMPLE verifier.  */
510                 if ((TREE_CODE (t) == ARRAY_REF
511                      || TREE_CODE (t) == ARRAY_RANGE_REF)
512                     && !is_gimple_val (TREE_OPERAND (t, 1)))
513                   return false;
514                 t = TREE_OPERAND (t, 0);
515               }
516             if (!is_gimple_id (t))
517               return false;
518           }
519           break;
520 
521 	default:
522 	  if (get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS)
523 	    {
524 	      if (((code == VEC_COND_EXPR || code == COND_EXPR)
525 		   ? !is_gimple_condexpr (TREE_OPERAND (expr, 0))
526 		   : !is_gimple_val (TREE_OPERAND (expr, 0)))
527 		  || !is_gimple_val (TREE_OPERAND (expr, 1))
528 		  || !is_gimple_val (TREE_OPERAND (expr, 2)))
529 		return false;
530 	      break;
531 	    }
532 	  return false;
533 	}
534       break;
535 
536     case tcc_vl_exp:
537       return false;
538 
539     case tcc_exceptional:
540       if (code == CONSTRUCTOR)
541 	{
542 	  unsigned i;
543 	  tree elt;
544 	  FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt)
545 	    if (!is_gimple_val (elt))
546 	      return false;
547 	  return true;
548 	}
549       if (code != SSA_NAME)
550         return false;
551       break;
552 
553     case tcc_reference:
554       if (code == BIT_FIELD_REF)
555 	return is_gimple_val (TREE_OPERAND (expr, 0));
556       return false;
557 
558     default:
559       return false;
560     }
561 
562   return true;
563 }
564 
565 
566 /* Return true if EXPR is a CALL_EXPR suitable for representation
567    as a single GIMPLE_CALL statement.  If the arguments require
568    further gimplification, return false.  */
569 
570 static bool
571 valid_gimple_call_p (tree expr)
572 {
573   unsigned i, nargs;
574 
575   if (TREE_CODE (expr) != CALL_EXPR)
576     return false;
577 
578   nargs = call_expr_nargs (expr);
579   for (i = 0; i < nargs; i++)
580     {
581       tree arg = CALL_EXPR_ARG (expr, i);
582       if (is_gimple_reg_type (TREE_TYPE (arg)))
583 	{
584 	  if (!is_gimple_val (arg))
585 	    return false;
586 	}
587       else
588 	if (!is_gimple_lvalue (arg))
589 	  return false;
590     }
591 
592   return true;
593 }
594 
595 
596 /* Make SSA names defined by OLD_STMT point to NEW_STMT
597    as their defining statement.  */
598 
599 void
600 move_ssa_defining_stmt_for_defs (gimple *new_stmt, gimple *old_stmt)
601 {
602   tree var;
603   ssa_op_iter iter;
604 
605   if (gimple_in_ssa_p (cfun))
606     {
607       /* Make defined SSA_NAMEs point to the new
608          statement as their definition.  */
609       FOR_EACH_SSA_TREE_OPERAND (var, old_stmt, iter, SSA_OP_ALL_DEFS)
610         {
611           if (TREE_CODE (var) == SSA_NAME)
612             SSA_NAME_DEF_STMT (var) = new_stmt;
613         }
614     }
615 }
616 
617 /* Helper function for update_gimple_call and update_call_from_tree.
618    A GIMPLE_CALL STMT is being replaced with GIMPLE_CALL NEW_STMT.  */
619 
620 static void
621 finish_update_gimple_call (gimple_stmt_iterator *si_p, gimple *new_stmt,
622 			   gimple *stmt)
623 {
624   gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
625   move_ssa_defining_stmt_for_defs (new_stmt, stmt);
626   gimple_set_vuse (new_stmt, gimple_vuse (stmt));
627   gimple_set_vdef (new_stmt, gimple_vdef (stmt));
628   gimple_set_location (new_stmt, gimple_location (stmt));
629   if (gimple_block (new_stmt) == NULL_TREE)
630     gimple_set_block (new_stmt, gimple_block (stmt));
631   gsi_replace (si_p, new_stmt, false);
632 }
633 
634 /* Update a GIMPLE_CALL statement at iterator *SI_P to call to FN
635    with number of arguments NARGS, where the arguments in GIMPLE form
636    follow NARGS argument.  */
637 
638 bool
639 update_gimple_call (gimple_stmt_iterator *si_p, tree fn, int nargs, ...)
640 {
641   va_list ap;
642   gcall *new_stmt, *stmt = as_a <gcall *> (gsi_stmt (*si_p));
643 
644   gcc_assert (is_gimple_call (stmt));
645   va_start (ap, nargs);
646   new_stmt = gimple_build_call_valist (fn, nargs, ap);
647   finish_update_gimple_call (si_p, new_stmt, stmt);
648   va_end (ap);
649   return true;
650 }
651 
652 /* Update a GIMPLE_CALL statement at iterator *SI_P to reflect the
653    value of EXPR, which is expected to be the result of folding the
654    call.  This can only be done if EXPR is a CALL_EXPR with valid
655    GIMPLE operands as arguments, or if it is a suitable RHS expression
656    for a GIMPLE_ASSIGN.  More complex expressions will require
657    gimplification, which will introduce additional statements.  In this
658    event, no update is performed, and the function returns false.
659    Note that we cannot mutate a GIMPLE_CALL in-place, so we always
660    replace the statement at *SI_P with an entirely new statement.
661    The new statement need not be a call, e.g., if the original call
662    folded to a constant.  */
663 
664 bool
665 update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
666 {
667   gimple *stmt = gsi_stmt (*si_p);
668 
669   if (valid_gimple_call_p (expr))
670     {
671       /* The call has simplified to another call.  */
672       tree fn = CALL_EXPR_FN (expr);
673       unsigned i;
674       unsigned nargs = call_expr_nargs (expr);
675       vec<tree> args = vNULL;
676       gcall *new_stmt;
677 
678       if (nargs > 0)
679         {
680           args.create (nargs);
681           args.safe_grow_cleared (nargs);
682 
683           for (i = 0; i < nargs; i++)
684             args[i] = CALL_EXPR_ARG (expr, i);
685         }
686 
687       new_stmt = gimple_build_call_vec (fn, args);
688       finish_update_gimple_call (si_p, new_stmt, stmt);
689       args.release ();
690 
691       return true;
692     }
693   else if (valid_gimple_rhs_p (expr))
694     {
695       tree lhs = gimple_call_lhs (stmt);
696       gimple *new_stmt;
697 
698       /* The call has simplified to an expression
699          that cannot be represented as a GIMPLE_CALL. */
700       if (lhs)
701         {
702           /* A value is expected.
703              Introduce a new GIMPLE_ASSIGN statement.  */
704           STRIP_USELESS_TYPE_CONVERSION (expr);
705           new_stmt = gimple_build_assign (lhs, expr);
706           move_ssa_defining_stmt_for_defs (new_stmt, stmt);
707 	  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
708 	  gimple_set_vdef (new_stmt, gimple_vdef (stmt));
709         }
710       else if (!TREE_SIDE_EFFECTS (expr))
711         {
712           /* No value is expected, and EXPR has no effect.
713              Replace it with an empty statement.  */
714           new_stmt = gimple_build_nop ();
715 	  if (gimple_in_ssa_p (cfun))
716 	    {
717 	      unlink_stmt_vdef (stmt);
718 	      release_defs (stmt);
719 	    }
720         }
721       else
722         {
723           /* No value is expected, but EXPR has an effect,
724              e.g., it could be a reference to a volatile
725              variable.  Create an assignment statement
726              with a dummy (unused) lhs variable.  */
727           STRIP_USELESS_TYPE_CONVERSION (expr);
728 	  if (gimple_in_ssa_p (cfun))
729 	    lhs = make_ssa_name (TREE_TYPE (expr));
730 	  else
731 	    lhs = create_tmp_var (TREE_TYPE (expr));
732           new_stmt = gimple_build_assign (lhs, expr);
733 	  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
734 	  gimple_set_vdef (new_stmt, gimple_vdef (stmt));
735           move_ssa_defining_stmt_for_defs (new_stmt, stmt);
736         }
737       gimple_set_location (new_stmt, gimple_location (stmt));
738       gsi_replace (si_p, new_stmt, false);
739       return true;
740     }
741   else
742     /* The call simplified to an expression that is
743        not a valid GIMPLE RHS.  */
744     return false;
745 }
746 
747 /* Entry point to the propagation engine.
748 
749    The VISIT_STMT virtual function is called for every statement
750    visited and the VISIT_PHI virtual function is called for every PHI
751    node visited.  */
752 
753 void
754 ssa_propagation_engine::ssa_propagate (void)
755 {
756   ssa_prop_init ();
757 
758   curr_order = 0;
759 
760   /* Iterate until the worklists are empty.  We iterate both blocks
761      and stmts in RPO order, using sets of two worklists to first
762      complete the current iteration before iterating over backedges.  */
763   while (1)
764     {
765       int next_block_order = (bitmap_empty_p (cfg_blocks)
766 			      ? -1 : bitmap_first_set_bit (cfg_blocks));
767       int next_stmt_uid = (bitmap_empty_p (ssa_edge_worklist)
768 			   ? -1 : bitmap_first_set_bit (ssa_edge_worklist));
769       if (next_block_order == -1 && next_stmt_uid == -1)
770 	{
771 	  if (bitmap_empty_p (cfg_blocks_back)
772 	      && bitmap_empty_p (ssa_edge_worklist_back))
773 	    break;
774 
775 	  if (dump_file && (dump_flags & TDF_DETAILS))
776 	    fprintf (dump_file, "Regular worklists empty, now processing "
777 		     "backedge destinations\n");
778 	  std::swap (cfg_blocks, cfg_blocks_back);
779 	  std::swap (ssa_edge_worklist, ssa_edge_worklist_back);
780 	  continue;
781 	}
782 
783       int next_stmt_bb_order = -1;
784       gimple *next_stmt = NULL;
785       if (next_stmt_uid != -1)
786 	{
787 	  next_stmt = uid_to_stmt[next_stmt_uid];
788 	  next_stmt_bb_order = bb_to_cfg_order[gimple_bb (next_stmt)->index];
789 	}
790 
791       /* Pull the next block to simulate off the worklist if it comes first.  */
792       if (next_block_order != -1
793 	  && (next_stmt_bb_order == -1
794 	      || next_block_order <= next_stmt_bb_order))
795 	{
796 	  curr_order = next_block_order;
797 	  bitmap_clear_bit (cfg_blocks, next_block_order);
798 	  basic_block bb
799 	    = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [next_block_order]);
800 	  simulate_block (bb);
801 	}
802       /* Else simulate from the SSA edge worklist.  */
803       else
804 	{
805 	  curr_order = next_stmt_bb_order;
806 	  if (dump_file && (dump_flags & TDF_DETAILS))
807 	    {
808 	      fprintf (dump_file, "\nSimulating statement: ");
809 	      print_gimple_stmt (dump_file, next_stmt, 0, dump_flags);
810 	    }
811 	  simulate_stmt (next_stmt);
812 	}
813     }
814 
815   ssa_prop_fini ();
816 }
817 
818 
819 /* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref'
820    is a non-volatile pointer dereference, a structure reference or a
821    reference to a single _DECL.  Ignore volatile memory references
822    because they are not interesting for the optimizers.  */
823 
824 bool
825 stmt_makes_single_store (gimple *stmt)
826 {
827   tree lhs;
828 
829   if (gimple_code (stmt) != GIMPLE_ASSIGN
830       && gimple_code (stmt) != GIMPLE_CALL)
831     return false;
832 
833   if (!gimple_vdef (stmt))
834     return false;
835 
836   lhs = gimple_get_lhs (stmt);
837 
838   /* A call statement may have a null LHS.  */
839   if (!lhs)
840     return false;
841 
842   return (!TREE_THIS_VOLATILE (lhs)
843           && (DECL_P (lhs)
844 	      || REFERENCE_CLASS_P (lhs)));
845 }
846 
847 
848 /* Propagation statistics.  */
849 struct prop_stats_d
850 {
851   long num_const_prop;
852   long num_copy_prop;
853   long num_stmts_folded;
854   long num_dce;
855 };
856 
857 static struct prop_stats_d prop_stats;
858 
859 /* Replace USE references in statement STMT with the values stored in
860    PROP_VALUE. Return true if at least one reference was replaced.  */
861 
862 bool
863 substitute_and_fold_engine::replace_uses_in (gimple *stmt)
864 {
865   bool replaced = false;
866   use_operand_p use;
867   ssa_op_iter iter;
868 
869   FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE)
870     {
871       tree tuse = USE_FROM_PTR (use);
872       tree val = get_value (tuse);
873 
874       if (val == tuse || val == NULL_TREE)
875 	continue;
876 
877       if (gimple_code (stmt) == GIMPLE_ASM
878 	  && !may_propagate_copy_into_asm (tuse))
879 	continue;
880 
881       if (!may_propagate_copy (tuse, val))
882 	continue;
883 
884       if (TREE_CODE (val) != SSA_NAME)
885 	prop_stats.num_const_prop++;
886       else
887 	prop_stats.num_copy_prop++;
888 
889       propagate_value (use, val);
890 
891       replaced = true;
892     }
893 
894   return replaced;
895 }
896 
897 
898 /* Replace propagated values into all the arguments for PHI using the
899    values from PROP_VALUE.  */
900 
901 bool
902 substitute_and_fold_engine::replace_phi_args_in (gphi *phi)
903 {
904   size_t i;
905   bool replaced = false;
906 
907   if (dump_file && (dump_flags & TDF_DETAILS))
908     {
909       fprintf (dump_file, "Folding PHI node: ");
910       print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
911     }
912 
913   for (i = 0; i < gimple_phi_num_args (phi); i++)
914     {
915       tree arg = gimple_phi_arg_def (phi, i);
916 
917       if (TREE_CODE (arg) == SSA_NAME)
918 	{
919 	  tree val = get_value (arg);
920 
921 	  if (val && val != arg && may_propagate_copy (arg, val))
922 	    {
923 	      edge e = gimple_phi_arg_edge (phi, i);
924 
925 	      if (TREE_CODE (val) != SSA_NAME)
926 		prop_stats.num_const_prop++;
927 	      else
928 		prop_stats.num_copy_prop++;
929 
930 	      propagate_value (PHI_ARG_DEF_PTR (phi, i), val);
931 	      replaced = true;
932 
933 	      /* If we propagated a copy and this argument flows
934 		 through an abnormal edge, update the replacement
935 		 accordingly.  */
936 	      if (TREE_CODE (val) == SSA_NAME
937 		  && e->flags & EDGE_ABNORMAL
938 		  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
939 		{
940 		  /* This can only occur for virtual operands, since
941 		     for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
942 		     would prevent replacement.  */
943 		  gcc_checking_assert (virtual_operand_p (val));
944 		  SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
945 		}
946 	    }
947 	}
948     }
949 
950   if (dump_file && (dump_flags & TDF_DETAILS))
951     {
952       if (!replaced)
953 	fprintf (dump_file, "No folding possible\n");
954       else
955 	{
956 	  fprintf (dump_file, "Folded into: ");
957 	  print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
958 	  fprintf (dump_file, "\n");
959 	}
960     }
961 
962   return replaced;
963 }
964 
965 
966 class substitute_and_fold_dom_walker : public dom_walker
967 {
968 public:
969     substitute_and_fold_dom_walker (cdi_direction direction,
970 				    class substitute_and_fold_engine *engine)
971 	: dom_walker (direction),
972           something_changed (false),
973 	  substitute_and_fold_engine (engine)
974     {
975       stmts_to_remove.create (0);
976       stmts_to_fixup.create (0);
977       need_eh_cleanup = BITMAP_ALLOC (NULL);
978     }
979     ~substitute_and_fold_dom_walker ()
980     {
981       stmts_to_remove.release ();
982       stmts_to_fixup.release ();
983       BITMAP_FREE (need_eh_cleanup);
984     }
985 
986     virtual edge before_dom_children (basic_block);
987     virtual void after_dom_children (basic_block) {}
988 
989     bool something_changed;
990     vec<gimple *> stmts_to_remove;
991     vec<gimple *> stmts_to_fixup;
992     bitmap need_eh_cleanup;
993 
994     class substitute_and_fold_engine *substitute_and_fold_engine;
995 };
996 
997 edge
998 substitute_and_fold_dom_walker::before_dom_children (basic_block bb)
999 {
1000   /* Propagate known values into PHI nodes.  */
1001   for (gphi_iterator i = gsi_start_phis (bb);
1002        !gsi_end_p (i);
1003        gsi_next (&i))
1004     {
1005       gphi *phi = i.phi ();
1006       tree res = gimple_phi_result (phi);
1007       if (virtual_operand_p (res))
1008 	continue;
1009       if (res && TREE_CODE (res) == SSA_NAME)
1010 	{
1011 	  tree sprime = substitute_and_fold_engine->get_value (res);
1012 	  if (sprime
1013 	      && sprime != res
1014 	      && may_propagate_copy (res, sprime))
1015 	    {
1016 	      stmts_to_remove.safe_push (phi);
1017 	      continue;
1018 	    }
1019 	}
1020       something_changed |= substitute_and_fold_engine->replace_phi_args_in (phi);
1021     }
1022 
1023   /* Propagate known values into stmts.  In some case it exposes
1024      more trivially deletable stmts to walk backward.  */
1025   for (gimple_stmt_iterator i = gsi_start_bb (bb);
1026        !gsi_end_p (i);
1027        gsi_next (&i))
1028     {
1029       bool did_replace;
1030       gimple *stmt = gsi_stmt (i);
1031 
1032       /* No point propagating into a stmt we have a value for we
1033          can propagate into all uses.  Mark it for removal instead.  */
1034       tree lhs = gimple_get_lhs (stmt);
1035       if (lhs && TREE_CODE (lhs) == SSA_NAME)
1036 	{
1037 	  tree sprime = substitute_and_fold_engine->get_value (lhs);
1038 	  if (sprime
1039 	      && sprime != lhs
1040 	      && may_propagate_copy (lhs, sprime)
1041 	      && !stmt_could_throw_p (stmt)
1042 	      && !gimple_has_side_effects (stmt)
1043 	      /* We have to leave ASSERT_EXPRs around for jump-threading.  */
1044 	      && (!is_gimple_assign (stmt)
1045 		  || gimple_assign_rhs_code (stmt) != ASSERT_EXPR))
1046 	    {
1047 	      stmts_to_remove.safe_push (stmt);
1048 	      continue;
1049 	    }
1050 	}
1051 
1052       /* Replace the statement with its folded version and mark it
1053 	 folded.  */
1054       did_replace = false;
1055       if (dump_file && (dump_flags & TDF_DETAILS))
1056 	{
1057 	  fprintf (dump_file, "Folding statement: ");
1058 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1059 	}
1060 
1061       gimple *old_stmt = stmt;
1062       bool was_noreturn = (is_gimple_call (stmt)
1063 			   && gimple_call_noreturn_p (stmt));
1064 
1065       /* Replace real uses in the statement.  */
1066       did_replace |= substitute_and_fold_engine->replace_uses_in (stmt);
1067 
1068       /* If we made a replacement, fold the statement.  */
1069       if (did_replace)
1070 	{
1071 	  fold_stmt (&i, follow_single_use_edges);
1072 	  stmt = gsi_stmt (i);
1073 	  gimple_set_modified (stmt, true);
1074 	}
1075 
1076       /* Some statements may be simplified using propagator
1077 	 specific information.  Do this before propagating
1078 	 into the stmt to not disturb pass specific information.  */
1079       update_stmt_if_modified (stmt);
1080       if (substitute_and_fold_engine->fold_stmt(&i))
1081 	{
1082 	  did_replace = true;
1083 	  prop_stats.num_stmts_folded++;
1084 	  stmt = gsi_stmt (i);
1085 	  gimple_set_modified (stmt, true);
1086 	}
1087 
1088       /* If this is a control statement the propagator left edges
1089          unexecuted on force the condition in a way consistent with
1090 	 that.  See PR66945 for cases where the propagator can end
1091 	 up with a different idea of a taken edge than folding
1092 	 (once undefined behavior is involved).  */
1093       if (gimple_code (stmt) == GIMPLE_COND)
1094 	{
1095 	  if ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE)
1096 	      ^ (EDGE_SUCC (bb, 1)->flags & EDGE_EXECUTABLE))
1097 	    {
1098 	      if (((EDGE_SUCC (bb, 0)->flags & EDGE_TRUE_VALUE) != 0)
1099 		  == ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE) != 0))
1100 		gimple_cond_make_true (as_a <gcond *> (stmt));
1101 	      else
1102 		gimple_cond_make_false (as_a <gcond *> (stmt));
1103 	      gimple_set_modified (stmt, true);
1104 	      did_replace = true;
1105 	    }
1106 	}
1107 
1108       /* Now cleanup.  */
1109       if (did_replace)
1110 	{
1111 	  /* If we cleaned up EH information from the statement,
1112 	     remove EH edges.  */
1113 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1114 	    bitmap_set_bit (need_eh_cleanup, bb->index);
1115 
1116 	  /* If we turned a not noreturn call into a noreturn one
1117 	     schedule it for fixup.  */
1118 	  if (!was_noreturn
1119 	      && is_gimple_call (stmt)
1120 	      && gimple_call_noreturn_p (stmt))
1121 	    stmts_to_fixup.safe_push (stmt);
1122 
1123 	  if (gimple_assign_single_p (stmt))
1124 	    {
1125 	      tree rhs = gimple_assign_rhs1 (stmt);
1126 
1127 	      if (TREE_CODE (rhs) == ADDR_EXPR)
1128 		recompute_tree_invariant_for_addr_expr (rhs);
1129 	    }
1130 
1131 	  /* Determine what needs to be done to update the SSA form.  */
1132 	  update_stmt_if_modified (stmt);
1133 	  if (!is_gimple_debug (stmt))
1134 	    something_changed = true;
1135 	}
1136 
1137       if (dump_file && (dump_flags & TDF_DETAILS))
1138 	{
1139 	  if (did_replace)
1140 	    {
1141 	      fprintf (dump_file, "Folded into: ");
1142 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1143 	      fprintf (dump_file, "\n");
1144 	    }
1145 	  else
1146 	    fprintf (dump_file, "Not folded\n");
1147 	}
1148     }
1149   return NULL;
1150 }
1151 
1152 
1153 
1154 /* Perform final substitution and folding of propagated values.
1155 
1156    PROP_VALUE[I] contains the single value that should be substituted
1157    at every use of SSA name N_I.  If PROP_VALUE is NULL, no values are
1158    substituted.
1159 
1160    If FOLD_FN is non-NULL the function will be invoked on all statements
1161    before propagating values for pass specific simplification.
1162 
1163    DO_DCE is true if trivially dead stmts can be removed.
1164 
1165    If DO_DCE is true, the statements within a BB are walked from
1166    last to first element.  Otherwise we scan from first to last element.
1167 
1168    Return TRUE when something changed.  */
1169 
1170 bool
1171 substitute_and_fold_engine::substitute_and_fold (void)
1172 {
1173   if (dump_file && (dump_flags & TDF_DETAILS))
1174     fprintf (dump_file, "\nSubstituting values and folding statements\n\n");
1175 
1176   memset (&prop_stats, 0, sizeof (prop_stats));
1177 
1178   calculate_dominance_info (CDI_DOMINATORS);
1179   substitute_and_fold_dom_walker walker (CDI_DOMINATORS, this);
1180   walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1181 
1182   /* We cannot remove stmts during the BB walk, especially not release
1183      SSA names there as that destroys the lattice of our callers.
1184      Remove stmts in reverse order to make debug stmt creation possible.  */
1185   while (!walker.stmts_to_remove.is_empty ())
1186     {
1187       gimple *stmt = walker.stmts_to_remove.pop ();
1188       if (dump_file && dump_flags & TDF_DETAILS)
1189 	{
1190 	  fprintf (dump_file, "Removing dead stmt ");
1191 	  print_gimple_stmt (dump_file, stmt, 0);
1192 	  fprintf (dump_file, "\n");
1193 	}
1194       prop_stats.num_dce++;
1195       gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1196       if (gimple_code (stmt) == GIMPLE_PHI)
1197 	remove_phi_node (&gsi, true);
1198       else
1199 	{
1200 	  unlink_stmt_vdef (stmt);
1201 	  gsi_remove (&gsi, true);
1202 	  release_defs (stmt);
1203 	}
1204     }
1205 
1206   if (!bitmap_empty_p (walker.need_eh_cleanup))
1207     gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup);
1208 
1209   /* Fixup stmts that became noreturn calls.  This may require splitting
1210      blocks and thus isn't possible during the dominator walk.  Do this
1211      in reverse order so we don't inadvertedly remove a stmt we want to
1212      fixup by visiting a dominating now noreturn call first.  */
1213   while (!walker.stmts_to_fixup.is_empty ())
1214     {
1215       gimple *stmt = walker.stmts_to_fixup.pop ();
1216       if (dump_file && dump_flags & TDF_DETAILS)
1217 	{
1218 	  fprintf (dump_file, "Fixing up noreturn call ");
1219 	  print_gimple_stmt (dump_file, stmt, 0);
1220 	  fprintf (dump_file, "\n");
1221 	}
1222       fixup_noreturn_call (stmt);
1223     }
1224 
1225   statistics_counter_event (cfun, "Constants propagated",
1226 			    prop_stats.num_const_prop);
1227   statistics_counter_event (cfun, "Copies propagated",
1228 			    prop_stats.num_copy_prop);
1229   statistics_counter_event (cfun, "Statements folded",
1230 			    prop_stats.num_stmts_folded);
1231   statistics_counter_event (cfun, "Statements deleted",
1232 			    prop_stats.num_dce);
1233 
1234   return walker.something_changed;
1235 }
1236 
1237 
1238 /* Return true if we may propagate ORIG into DEST, false otherwise.  */
1239 
1240 bool
1241 may_propagate_copy (tree dest, tree orig)
1242 {
1243   tree type_d = TREE_TYPE (dest);
1244   tree type_o = TREE_TYPE (orig);
1245 
1246   /* If ORIG is a default definition which flows in from an abnormal edge
1247      then the copy can be propagated.  It is important that we do so to avoid
1248      uninitialized copies.  */
1249   if (TREE_CODE (orig) == SSA_NAME
1250       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)
1251       && SSA_NAME_IS_DEFAULT_DEF (orig)
1252       && (SSA_NAME_VAR (orig) == NULL_TREE
1253 	  || TREE_CODE (SSA_NAME_VAR (orig)) == VAR_DECL))
1254     ;
1255   /* Otherwise if ORIG just flows in from an abnormal edge then the copy cannot
1256      be propagated.  */
1257   else if (TREE_CODE (orig) == SSA_NAME
1258 	   && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1259     return false;
1260   /* Similarly if DEST flows in from an abnormal edge then the copy cannot be
1261      propagated.  */
1262   else if (TREE_CODE (dest) == SSA_NAME
1263 	   && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
1264     return false;
1265 
1266   /* Do not copy between types for which we *do* need a conversion.  */
1267   if (!useless_type_conversion_p (type_d, type_o))
1268     return false;
1269 
1270   /* Generally propagating virtual operands is not ok as that may
1271      create overlapping life-ranges.  */
1272   if (TREE_CODE (dest) == SSA_NAME && virtual_operand_p (dest))
1273     return false;
1274 
1275   /* Anything else is OK.  */
1276   return true;
1277 }
1278 
1279 /* Like may_propagate_copy, but use as the destination expression
1280    the principal expression (typically, the RHS) contained in
1281    statement DEST.  This is more efficient when working with the
1282    gimple tuples representation.  */
1283 
1284 bool
1285 may_propagate_copy_into_stmt (gimple *dest, tree orig)
1286 {
1287   tree type_d;
1288   tree type_o;
1289 
1290   /* If the statement is a switch or a single-rhs assignment,
1291      then the expression to be replaced by the propagation may
1292      be an SSA_NAME.  Fortunately, there is an explicit tree
1293      for the expression, so we delegate to may_propagate_copy.  */
1294 
1295   if (gimple_assign_single_p (dest))
1296     return may_propagate_copy (gimple_assign_rhs1 (dest), orig);
1297   else if (gswitch *dest_swtch = dyn_cast <gswitch *> (dest))
1298     return may_propagate_copy (gimple_switch_index (dest_swtch), orig);
1299 
1300   /* In other cases, the expression is not materialized, so there
1301      is no destination to pass to may_propagate_copy.  On the other
1302      hand, the expression cannot be an SSA_NAME, so the analysis
1303      is much simpler.  */
1304 
1305   if (TREE_CODE (orig) == SSA_NAME
1306       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig))
1307     return false;
1308 
1309   if (is_gimple_assign (dest))
1310     type_d = TREE_TYPE (gimple_assign_lhs (dest));
1311   else if (gimple_code (dest) == GIMPLE_COND)
1312     type_d = boolean_type_node;
1313   else if (is_gimple_call (dest)
1314            && gimple_call_lhs (dest) != NULL_TREE)
1315     type_d = TREE_TYPE (gimple_call_lhs (dest));
1316   else
1317     gcc_unreachable ();
1318 
1319   type_o = TREE_TYPE (orig);
1320 
1321   if (!useless_type_conversion_p (type_d, type_o))
1322     return false;
1323 
1324   return true;
1325 }
1326 
1327 /* Similarly, but we know that we're propagating into an ASM_EXPR.  */
1328 
1329 bool
1330 may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED)
1331 {
1332   return true;
1333 }
1334 
1335 
1336 /* Common code for propagate_value and replace_exp.
1337 
1338    Replace use operand OP_P with VAL.  FOR_PROPAGATION indicates if the
1339    replacement is done to propagate a value or not.  */
1340 
1341 static void
1342 replace_exp_1 (use_operand_p op_p, tree val,
1343     	       bool for_propagation ATTRIBUTE_UNUSED)
1344 {
1345   if (flag_checking)
1346     {
1347       tree op = USE_FROM_PTR (op_p);
1348       gcc_assert (!(for_propagation
1349 		  && TREE_CODE (op) == SSA_NAME
1350 		  && TREE_CODE (val) == SSA_NAME
1351 		  && !may_propagate_copy (op, val)));
1352     }
1353 
1354   if (TREE_CODE (val) == SSA_NAME)
1355     SET_USE (op_p, val);
1356   else
1357     SET_USE (op_p, unshare_expr (val));
1358 }
1359 
1360 
1361 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1362    into the operand pointed to by OP_P.
1363 
1364    Use this version for const/copy propagation as it will perform additional
1365    checks to ensure validity of the const/copy propagation.  */
1366 
1367 void
1368 propagate_value (use_operand_p op_p, tree val)
1369 {
1370   replace_exp_1 (op_p, val, true);
1371 }
1372 
1373 /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME).
1374 
1375    Use this version when not const/copy propagating values.  For example,
1376    PRE uses this version when building expressions as they would appear
1377    in specific blocks taking into account actions of PHI nodes.
1378 
1379    The statement in which an expression has been replaced should be
1380    folded using fold_stmt_inplace.  */
1381 
1382 void
1383 replace_exp (use_operand_p op_p, tree val)
1384 {
1385   replace_exp_1 (op_p, val, false);
1386 }
1387 
1388 
1389 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME)
1390    into the tree pointed to by OP_P.
1391 
1392    Use this version for const/copy propagation when SSA operands are not
1393    available.  It will perform the additional checks to ensure validity of
1394    the const/copy propagation, but will not update any operand information.
1395    Be sure to mark the stmt as modified.  */
1396 
1397 void
1398 propagate_tree_value (tree *op_p, tree val)
1399 {
1400   if (TREE_CODE (val) == SSA_NAME)
1401     *op_p = val;
1402   else
1403     *op_p = unshare_expr (val);
1404 }
1405 
1406 
1407 /* Like propagate_tree_value, but use as the operand to replace
1408    the principal expression (typically, the RHS) contained in the
1409    statement referenced by iterator GSI.  Note that it is not
1410    always possible to update the statement in-place, so a new
1411    statement may be created to replace the original.  */
1412 
1413 void
1414 propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val)
1415 {
1416   gimple *stmt = gsi_stmt (*gsi);
1417 
1418   if (is_gimple_assign (stmt))
1419     {
1420       tree expr = NULL_TREE;
1421       if (gimple_assign_single_p (stmt))
1422         expr = gimple_assign_rhs1 (stmt);
1423       propagate_tree_value (&expr, val);
1424       gimple_assign_set_rhs_from_tree (gsi, expr);
1425     }
1426   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
1427     {
1428       tree lhs = NULL_TREE;
1429       tree rhs = build_zero_cst (TREE_TYPE (val));
1430       propagate_tree_value (&lhs, val);
1431       gimple_cond_set_code (cond_stmt, NE_EXPR);
1432       gimple_cond_set_lhs (cond_stmt, lhs);
1433       gimple_cond_set_rhs (cond_stmt, rhs);
1434     }
1435   else if (is_gimple_call (stmt)
1436            && gimple_call_lhs (stmt) != NULL_TREE)
1437     {
1438       tree expr = NULL_TREE;
1439       bool res;
1440       propagate_tree_value (&expr, val);
1441       res = update_call_from_tree (gsi, expr);
1442       gcc_assert (res);
1443     }
1444   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1445     propagate_tree_value (gimple_switch_index_ptr (swtch_stmt), val);
1446   else
1447     gcc_unreachable ();
1448 }
1449