1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2013 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    References:
102 
103      Constant propagation with conditional branches,
104      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
105 
106      Building an Optimizing Compiler,
107      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
108 
109      Advanced Compiler Design and Implementation,
110      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
111 
112 #include "config.h"
113 #include "system.h"
114 #include "coretypes.h"
115 #include "tm.h"
116 #include "tree.h"
117 #include "flags.h"
118 #include "tm_p.h"
119 #include "basic-block.h"
120 #include "function.h"
121 #include "gimple-pretty-print.h"
122 #include "tree-flow.h"
123 #include "tree-pass.h"
124 #include "tree-ssa-propagate.h"
125 #include "value-prof.h"
126 #include "langhooks.h"
127 #include "target.h"
128 #include "diagnostic-core.h"
129 #include "dbgcnt.h"
130 #include "gimple-fold.h"
131 #include "params.h"
132 #include "hash-table.h"
133 
134 
135 /* Possible lattice values.  */
136 typedef enum
137 {
138   UNINITIALIZED,
139   UNDEFINED,
140   CONSTANT,
141   VARYING
142 } ccp_lattice_t;
143 
144 struct prop_value_d {
145     /* Lattice value.  */
146     ccp_lattice_t lattice_val;
147 
148     /* Propagated value.  */
149     tree value;
150 
151     /* Mask that applies to the propagated value during CCP.  For
152        X with a CONSTANT lattice value X & ~mask == value & ~mask.  */
153     double_int mask;
154 };
155 
156 typedef struct prop_value_d prop_value_t;
157 
158 /* Array of propagated constant values.  After propagation,
159    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
160    the constant is held in an SSA name representing a memory store
161    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
162    memory reference used to store (i.e., the LHS of the assignment
163    doing the store).  */
164 static prop_value_t *const_val;
165 static unsigned n_const_val;
166 
167 static void canonicalize_float_value (prop_value_t *);
168 static bool ccp_fold_stmt (gimple_stmt_iterator *);
169 
170 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
171 
172 static void
dump_lattice_value(FILE * outf,const char * prefix,prop_value_t val)173 dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
174 {
175   switch (val.lattice_val)
176     {
177     case UNINITIALIZED:
178       fprintf (outf, "%sUNINITIALIZED", prefix);
179       break;
180     case UNDEFINED:
181       fprintf (outf, "%sUNDEFINED", prefix);
182       break;
183     case VARYING:
184       fprintf (outf, "%sVARYING", prefix);
185       break;
186     case CONSTANT:
187       if (TREE_CODE (val.value) != INTEGER_CST
188 	  || val.mask.is_zero ())
189 	{
190 	  fprintf (outf, "%sCONSTANT ", prefix);
191 	  print_generic_expr (outf, val.value, dump_flags);
192 	}
193       else
194 	{
195 	  double_int cval = tree_to_double_int (val.value).and_not (val.mask);
196 	  fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
197 		   prefix, cval.high, cval.low);
198 	  fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
199 		   val.mask.high, val.mask.low);
200 	}
201       break;
202     default:
203       gcc_unreachable ();
204     }
205 }
206 
207 
208 /* Print lattice value VAL to stderr.  */
209 
210 void debug_lattice_value (prop_value_t val);
211 
212 DEBUG_FUNCTION void
debug_lattice_value(prop_value_t val)213 debug_lattice_value (prop_value_t val)
214 {
215   dump_lattice_value (stderr, "", val);
216   fprintf (stderr, "\n");
217 }
218 
219 
220 /* Compute a default value for variable VAR and store it in the
221    CONST_VAL array.  The following rules are used to get default
222    values:
223 
224    1- Global and static variables that are declared constant are
225       considered CONSTANT.
226 
227    2- Any other value is considered UNDEFINED.  This is useful when
228       considering PHI nodes.  PHI arguments that are undefined do not
229       change the constant value of the PHI node, which allows for more
230       constants to be propagated.
231 
232    3- Variables defined by statements other than assignments and PHI
233       nodes are considered VARYING.
234 
235    4- Initial values of variables that are not GIMPLE registers are
236       considered VARYING.  */
237 
238 static prop_value_t
get_default_value(tree var)239 get_default_value (tree var)
240 {
241   prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
242   gimple stmt;
243 
244   stmt = SSA_NAME_DEF_STMT (var);
245 
246   if (gimple_nop_p (stmt))
247     {
248       /* Variables defined by an empty statement are those used
249 	 before being initialized.  If VAR is a local variable, we
250 	 can assume initially that it is UNDEFINED, otherwise we must
251 	 consider it VARYING.  */
252       if (!virtual_operand_p (var)
253 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
254 	val.lattice_val = UNDEFINED;
255       else
256 	{
257 	  val.lattice_val = VARYING;
258 	  val.mask = double_int_minus_one;
259 	}
260     }
261   else if (is_gimple_assign (stmt)
262 	   /* Value-returning GIMPLE_CALL statements assign to
263 	      a variable, and are treated similarly to GIMPLE_ASSIGN.  */
264 	   || (is_gimple_call (stmt)
265 	       && gimple_call_lhs (stmt) != NULL_TREE)
266 	   || gimple_code (stmt) == GIMPLE_PHI)
267     {
268       tree cst;
269       if (gimple_assign_single_p (stmt)
270 	  && DECL_P (gimple_assign_rhs1 (stmt))
271 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
272 	{
273 	  val.lattice_val = CONSTANT;
274 	  val.value = cst;
275 	}
276       else
277 	/* Any other variable defined by an assignment or a PHI node
278 	   is considered UNDEFINED.  */
279 	val.lattice_val = UNDEFINED;
280     }
281   else
282     {
283       /* Otherwise, VAR will never take on a constant value.  */
284       val.lattice_val = VARYING;
285       val.mask = double_int_minus_one;
286     }
287 
288   return val;
289 }
290 
291 
292 /* Get the constant value associated with variable VAR.  */
293 
294 static inline prop_value_t *
get_value(tree var)295 get_value (tree var)
296 {
297   prop_value_t *val;
298 
299   if (const_val == NULL
300       || SSA_NAME_VERSION (var) >= n_const_val)
301     return NULL;
302 
303   val = &const_val[SSA_NAME_VERSION (var)];
304   if (val->lattice_val == UNINITIALIZED)
305     *val = get_default_value (var);
306 
307   canonicalize_float_value (val);
308 
309   return val;
310 }
311 
312 /* Return the constant tree value associated with VAR.  */
313 
314 static inline tree
get_constant_value(tree var)315 get_constant_value (tree var)
316 {
317   prop_value_t *val;
318   if (TREE_CODE (var) != SSA_NAME)
319     {
320       if (is_gimple_min_invariant (var))
321         return var;
322       return NULL_TREE;
323     }
324   val = get_value (var);
325   if (val
326       && val->lattice_val == CONSTANT
327       && (TREE_CODE (val->value) != INTEGER_CST
328 	  || val->mask.is_zero ()))
329     return val->value;
330   return NULL_TREE;
331 }
332 
333 /* Sets the value associated with VAR to VARYING.  */
334 
335 static inline void
set_value_varying(tree var)336 set_value_varying (tree var)
337 {
338   prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
339 
340   val->lattice_val = VARYING;
341   val->value = NULL_TREE;
342   val->mask = double_int_minus_one;
343 }
344 
345 /* For float types, modify the value of VAL to make ccp work correctly
346    for non-standard values (-0, NaN):
347 
348    If HONOR_SIGNED_ZEROS is false, and VAL = -0, we canonicalize it to 0.
349    If HONOR_NANS is false, and VAL is NaN, we canonicalize it to UNDEFINED.
350      This is to fix the following problem (see PR 29921): Suppose we have
351 
352      x = 0.0 * y
353 
354      and we set value of y to NaN.  This causes value of x to be set to NaN.
355      When we later determine that y is in fact VARYING, fold uses the fact
356      that HONOR_NANS is false, and we try to change the value of x to 0,
357      causing an ICE.  With HONOR_NANS being false, the real appearance of
358      NaN would cause undefined behavior, though, so claiming that y (and x)
359      are UNDEFINED initially is correct.  */
360 
361 static void
canonicalize_float_value(prop_value_t * val)362 canonicalize_float_value (prop_value_t *val)
363 {
364   enum machine_mode mode;
365   tree type;
366   REAL_VALUE_TYPE d;
367 
368   if (val->lattice_val != CONSTANT
369       || TREE_CODE (val->value) != REAL_CST)
370     return;
371 
372   d = TREE_REAL_CST (val->value);
373   type = TREE_TYPE (val->value);
374   mode = TYPE_MODE (type);
375 
376   if (!HONOR_SIGNED_ZEROS (mode)
377       && REAL_VALUE_MINUS_ZERO (d))
378     {
379       val->value = build_real (type, dconst0);
380       return;
381     }
382 
383   if (!HONOR_NANS (mode)
384       && REAL_VALUE_ISNAN (d))
385     {
386       val->lattice_val = UNDEFINED;
387       val->value = NULL;
388       return;
389     }
390 }
391 
392 /* Return whether the lattice transition is valid.  */
393 
394 static bool
valid_lattice_transition(prop_value_t old_val,prop_value_t new_val)395 valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
396 {
397   /* Lattice transitions must always be monotonically increasing in
398      value.  */
399   if (old_val.lattice_val < new_val.lattice_val)
400     return true;
401 
402   if (old_val.lattice_val != new_val.lattice_val)
403     return false;
404 
405   if (!old_val.value && !new_val.value)
406     return true;
407 
408   /* Now both lattice values are CONSTANT.  */
409 
410   /* Allow transitioning from PHI <&x, not executable> == &x
411      to PHI <&x, &y> == common alignment.  */
412   if (TREE_CODE (old_val.value) != INTEGER_CST
413       && TREE_CODE (new_val.value) == INTEGER_CST)
414     return true;
415 
416   /* Bit-lattices have to agree in the still valid bits.  */
417   if (TREE_CODE (old_val.value) == INTEGER_CST
418       && TREE_CODE (new_val.value) == INTEGER_CST)
419     return tree_to_double_int (old_val.value).and_not (new_val.mask)
420 	   == tree_to_double_int (new_val.value).and_not (new_val.mask);
421 
422   /* Otherwise constant values have to agree.  */
423   return operand_equal_p (old_val.value, new_val.value, 0);
424 }
425 
426 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
427    value is different from VAR's previous value.  */
428 
429 static bool
set_lattice_value(tree var,prop_value_t new_val)430 set_lattice_value (tree var, prop_value_t new_val)
431 {
432   /* We can deal with old UNINITIALIZED values just fine here.  */
433   prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
434 
435   canonicalize_float_value (&new_val);
436 
437   /* We have to be careful to not go up the bitwise lattice
438      represented by the mask.
439      ???  This doesn't seem to be the best place to enforce this.  */
440   if (new_val.lattice_val == CONSTANT
441       && old_val->lattice_val == CONSTANT
442       && TREE_CODE (new_val.value) == INTEGER_CST
443       && TREE_CODE (old_val->value) == INTEGER_CST)
444     {
445       double_int diff;
446       diff = tree_to_double_int (new_val.value)
447 	     ^ tree_to_double_int (old_val->value);
448       new_val.mask = new_val.mask | old_val->mask | diff;
449     }
450 
451   gcc_assert (valid_lattice_transition (*old_val, new_val));
452 
453   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
454      caller that this was a non-transition.  */
455   if (old_val->lattice_val != new_val.lattice_val
456       || (new_val.lattice_val == CONSTANT
457 	  && TREE_CODE (new_val.value) == INTEGER_CST
458 	  && (TREE_CODE (old_val->value) != INTEGER_CST
459 	      || new_val.mask != old_val->mask)))
460     {
461       /* ???  We would like to delay creation of INTEGER_CSTs from
462 	 partially constants here.  */
463 
464       if (dump_file && (dump_flags & TDF_DETAILS))
465 	{
466 	  dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
467 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
468 	}
469 
470       *old_val = new_val;
471 
472       gcc_assert (new_val.lattice_val != UNINITIALIZED);
473       return true;
474     }
475 
476   return false;
477 }
478 
479 static prop_value_t get_value_for_expr (tree, bool);
480 static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
481 static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
482 			       tree, double_int, double_int,
483 			       tree, double_int, double_int);
484 
485 /* Return a double_int that can be used for bitwise simplifications
486    from VAL.  */
487 
488 static double_int
value_to_double_int(prop_value_t val)489 value_to_double_int (prop_value_t val)
490 {
491   if (val.value
492       && TREE_CODE (val.value) == INTEGER_CST)
493     return tree_to_double_int (val.value);
494   else
495     return double_int_zero;
496 }
497 
498 /* Return the value for the address expression EXPR based on alignment
499    information.  */
500 
501 static prop_value_t
get_value_from_alignment(tree expr)502 get_value_from_alignment (tree expr)
503 {
504   tree type = TREE_TYPE (expr);
505   prop_value_t val;
506   unsigned HOST_WIDE_INT bitpos;
507   unsigned int align;
508 
509   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
510 
511   get_pointer_alignment_1 (expr, &align, &bitpos);
512   val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
513 	      ? double_int::mask (TYPE_PRECISION (type))
514 	      : double_int_minus_one)
515 	     .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1));
516   val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT;
517   if (val.lattice_val == CONSTANT)
518     val.value
519       = double_int_to_tree (type,
520 			    double_int::from_uhwi (bitpos / BITS_PER_UNIT));
521   else
522     val.value = NULL_TREE;
523 
524   return val;
525 }
526 
527 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
528    return constant bits extracted from alignment information for
529    invariant addresses.  */
530 
531 static prop_value_t
get_value_for_expr(tree expr,bool for_bits_p)532 get_value_for_expr (tree expr, bool for_bits_p)
533 {
534   prop_value_t val;
535 
536   if (TREE_CODE (expr) == SSA_NAME)
537     {
538       val = *get_value (expr);
539       if (for_bits_p
540 	  && val.lattice_val == CONSTANT
541 	  && TREE_CODE (val.value) == ADDR_EXPR)
542 	val = get_value_from_alignment (val.value);
543     }
544   else if (is_gimple_min_invariant (expr)
545 	   && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
546     {
547       val.lattice_val = CONSTANT;
548       val.value = expr;
549       val.mask = double_int_zero;
550       canonicalize_float_value (&val);
551     }
552   else if (TREE_CODE (expr) == ADDR_EXPR)
553     val = get_value_from_alignment (expr);
554   else
555     {
556       val.lattice_val = VARYING;
557       val.mask = double_int_minus_one;
558       val.value = NULL_TREE;
559     }
560   return val;
561 }
562 
563 /* Return the likely CCP lattice value for STMT.
564 
565    If STMT has no operands, then return CONSTANT.
566 
567    Else if undefinedness of operands of STMT cause its value to be
568    undefined, then return UNDEFINED.
569 
570    Else if any operands of STMT are constants, then return CONSTANT.
571 
572    Else return VARYING.  */
573 
574 static ccp_lattice_t
likely_value(gimple stmt)575 likely_value (gimple stmt)
576 {
577   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
578   tree use;
579   ssa_op_iter iter;
580   unsigned i;
581 
582   enum gimple_code code = gimple_code (stmt);
583 
584   /* This function appears to be called only for assignments, calls,
585      conditionals, and switches, due to the logic in visit_stmt.  */
586   gcc_assert (code == GIMPLE_ASSIGN
587               || code == GIMPLE_CALL
588               || code == GIMPLE_COND
589               || code == GIMPLE_SWITCH);
590 
591   /* If the statement has volatile operands, it won't fold to a
592      constant value.  */
593   if (gimple_has_volatile_ops (stmt))
594     return VARYING;
595 
596   /* Arrive here for more complex cases.  */
597   has_constant_operand = false;
598   has_undefined_operand = false;
599   all_undefined_operands = true;
600   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
601     {
602       prop_value_t *val = get_value (use);
603 
604       if (val->lattice_val == UNDEFINED)
605 	has_undefined_operand = true;
606       else
607 	all_undefined_operands = false;
608 
609       if (val->lattice_val == CONSTANT)
610 	has_constant_operand = true;
611     }
612 
613   /* There may be constants in regular rhs operands.  For calls we
614      have to ignore lhs, fndecl and static chain, otherwise only
615      the lhs.  */
616   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
617        i < gimple_num_ops (stmt); ++i)
618     {
619       tree op = gimple_op (stmt, i);
620       if (!op || TREE_CODE (op) == SSA_NAME)
621 	continue;
622       if (is_gimple_min_invariant (op))
623 	has_constant_operand = true;
624     }
625 
626   if (has_constant_operand)
627     all_undefined_operands = false;
628 
629   /* If the operation combines operands like COMPLEX_EXPR make sure to
630      not mark the result UNDEFINED if only one part of the result is
631      undefined.  */
632   if (has_undefined_operand && all_undefined_operands)
633     return UNDEFINED;
634   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
635     {
636       switch (gimple_assign_rhs_code (stmt))
637 	{
638 	/* Unary operators are handled with all_undefined_operands.  */
639 	case PLUS_EXPR:
640 	case MINUS_EXPR:
641 	case POINTER_PLUS_EXPR:
642 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
643 	     Not bitwise operators, one VARYING operand may specify the
644 	     result completely.  Not logical operators for the same reason.
645 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
646 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
647 	     the undefined operand may be promoted.  */
648 	  return UNDEFINED;
649 
650 	case ADDR_EXPR:
651 	  /* If any part of an address is UNDEFINED, like the index
652 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
653 	  return UNDEFINED;
654 
655 	default:
656 	  ;
657 	}
658     }
659   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
660      fall back to CONSTANT.  During iteration UNDEFINED may still drop
661      to CONSTANT.  */
662   if (has_undefined_operand)
663     return CONSTANT;
664 
665   /* We do not consider virtual operands here -- load from read-only
666      memory may have only VARYING virtual operands, but still be
667      constant.  */
668   if (has_constant_operand
669       || gimple_references_memory_p (stmt))
670     return CONSTANT;
671 
672   return VARYING;
673 }
674 
675 /* Returns true if STMT cannot be constant.  */
676 
677 static bool
surely_varying_stmt_p(gimple stmt)678 surely_varying_stmt_p (gimple stmt)
679 {
680   /* If the statement has operands that we cannot handle, it cannot be
681      constant.  */
682   if (gimple_has_volatile_ops (stmt))
683     return true;
684 
685   /* If it is a call and does not return a value or is not a
686      builtin and not an indirect call, it is varying.  */
687   if (is_gimple_call (stmt))
688     {
689       tree fndecl;
690       if (!gimple_call_lhs (stmt)
691 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
692 	      && !DECL_BUILT_IN (fndecl)))
693 	return true;
694     }
695 
696   /* Any other store operation is not interesting.  */
697   else if (gimple_vdef (stmt))
698     return true;
699 
700   /* Anything other than assignments and conditional jumps are not
701      interesting for CCP.  */
702   if (gimple_code (stmt) != GIMPLE_ASSIGN
703       && gimple_code (stmt) != GIMPLE_COND
704       && gimple_code (stmt) != GIMPLE_SWITCH
705       && gimple_code (stmt) != GIMPLE_CALL)
706     return true;
707 
708   return false;
709 }
710 
711 /* Initialize local data structures for CCP.  */
712 
713 static void
ccp_initialize(void)714 ccp_initialize (void)
715 {
716   basic_block bb;
717 
718   n_const_val = num_ssa_names;
719   const_val = XCNEWVEC (prop_value_t, n_const_val);
720 
721   /* Initialize simulation flags for PHI nodes and statements.  */
722   FOR_EACH_BB (bb)
723     {
724       gimple_stmt_iterator i;
725 
726       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
727         {
728 	  gimple stmt = gsi_stmt (i);
729 	  bool is_varying;
730 
731 	  /* If the statement is a control insn, then we do not
732 	     want to avoid simulating the statement once.  Failure
733 	     to do so means that those edges will never get added.  */
734 	  if (stmt_ends_bb_p (stmt))
735 	    is_varying = false;
736 	  else
737 	    is_varying = surely_varying_stmt_p (stmt);
738 
739 	  if (is_varying)
740 	    {
741 	      tree def;
742 	      ssa_op_iter iter;
743 
744 	      /* If the statement will not produce a constant, mark
745 		 all its outputs VARYING.  */
746 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
747 		set_value_varying (def);
748 	    }
749           prop_set_simulate_again (stmt, !is_varying);
750 	}
751     }
752 
753   /* Now process PHI nodes.  We never clear the simulate_again flag on
754      phi nodes, since we do not know which edges are executable yet,
755      except for phi nodes for virtual operands when we do not do store ccp.  */
756   FOR_EACH_BB (bb)
757     {
758       gimple_stmt_iterator i;
759 
760       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
761         {
762           gimple phi = gsi_stmt (i);
763 
764 	  if (virtual_operand_p (gimple_phi_result (phi)))
765             prop_set_simulate_again (phi, false);
766 	  else
767             prop_set_simulate_again (phi, true);
768 	}
769     }
770 }
771 
772 /* Debug count support. Reset the values of ssa names
773    VARYING when the total number ssa names analyzed is
774    beyond the debug count specified.  */
775 
776 static void
do_dbg_cnt(void)777 do_dbg_cnt (void)
778 {
779   unsigned i;
780   for (i = 0; i < num_ssa_names; i++)
781     {
782       if (!dbg_cnt (ccp))
783         {
784           const_val[i].lattice_val = VARYING;
785 	  const_val[i].mask = double_int_minus_one;
786           const_val[i].value = NULL_TREE;
787         }
788     }
789 }
790 
791 
792 /* Do final substitution of propagated values, cleanup the flowgraph and
793    free allocated storage.
794 
795    Return TRUE when something was optimized.  */
796 
797 static bool
ccp_finalize(void)798 ccp_finalize (void)
799 {
800   bool something_changed;
801   unsigned i;
802 
803   do_dbg_cnt ();
804 
805   /* Derive alignment and misalignment information from partially
806      constant pointers in the lattice.  */
807   for (i = 1; i < num_ssa_names; ++i)
808     {
809       tree name = ssa_name (i);
810       prop_value_t *val;
811       unsigned int tem, align;
812 
813       if (!name
814 	  || !POINTER_TYPE_P (TREE_TYPE (name)))
815 	continue;
816 
817       val = get_value (name);
818       if (val->lattice_val != CONSTANT
819 	  || TREE_CODE (val->value) != INTEGER_CST)
820 	continue;
821 
822       /* Trailing constant bits specify the alignment, trailing value
823 	 bits the misalignment.  */
824       tem = val->mask.low;
825       align = (tem & -tem);
826       if (align > 1)
827 	set_ptr_info_alignment (get_ptr_info (name), align,
828 				TREE_INT_CST_LOW (val->value) & (align - 1));
829     }
830 
831   /* Perform substitutions based on the known constant values.  */
832   something_changed = substitute_and_fold (get_constant_value,
833 					   ccp_fold_stmt, true);
834 
835   free (const_val);
836   const_val = NULL;
837   return something_changed;;
838 }
839 
840 
841 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
842    in VAL1.
843 
844    		any  M UNDEFINED   = any
845 		any  M VARYING     = VARYING
846 		Ci   M Cj	   = Ci		if (i == j)
847 		Ci   M Cj	   = VARYING	if (i != j)
848    */
849 
850 static void
ccp_lattice_meet(prop_value_t * val1,prop_value_t * val2)851 ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
852 {
853   if (val1->lattice_val == UNDEFINED)
854     {
855       /* UNDEFINED M any = any   */
856       *val1 = *val2;
857     }
858   else if (val2->lattice_val == UNDEFINED)
859     {
860       /* any M UNDEFINED = any
861          Nothing to do.  VAL1 already contains the value we want.  */
862       ;
863     }
864   else if (val1->lattice_val == VARYING
865            || val2->lattice_val == VARYING)
866     {
867       /* any M VARYING = VARYING.  */
868       val1->lattice_val = VARYING;
869       val1->mask = double_int_minus_one;
870       val1->value = NULL_TREE;
871     }
872   else if (val1->lattice_val == CONSTANT
873 	   && val2->lattice_val == CONSTANT
874 	   && TREE_CODE (val1->value) == INTEGER_CST
875 	   && TREE_CODE (val2->value) == INTEGER_CST)
876     {
877       /* Ci M Cj = Ci		if (i == j)
878 	 Ci M Cj = VARYING	if (i != j)
879 
880          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
881 	 drop to varying.  */
882       val1->mask = val1->mask | val2->mask
883 		   | (tree_to_double_int (val1->value)
884 		      ^ tree_to_double_int (val2->value));
885       if (val1->mask.is_minus_one ())
886 	{
887 	  val1->lattice_val = VARYING;
888 	  val1->value = NULL_TREE;
889 	}
890     }
891   else if (val1->lattice_val == CONSTANT
892 	   && val2->lattice_val == CONSTANT
893 	   && simple_cst_equal (val1->value, val2->value) == 1)
894     {
895       /* Ci M Cj = Ci		if (i == j)
896 	 Ci M Cj = VARYING	if (i != j)
897 
898          VAL1 already contains the value we want for equivalent values.  */
899     }
900   else if (val1->lattice_val == CONSTANT
901 	   && val2->lattice_val == CONSTANT
902 	   && (TREE_CODE (val1->value) == ADDR_EXPR
903 	       || TREE_CODE (val2->value) == ADDR_EXPR))
904     {
905       /* When not equal addresses are involved try meeting for
906 	 alignment.  */
907       prop_value_t tem = *val2;
908       if (TREE_CODE (val1->value) == ADDR_EXPR)
909 	*val1 = get_value_for_expr (val1->value, true);
910       if (TREE_CODE (val2->value) == ADDR_EXPR)
911 	tem = get_value_for_expr (val2->value, true);
912       ccp_lattice_meet (val1, &tem);
913     }
914   else
915     {
916       /* Any other combination is VARYING.  */
917       val1->lattice_val = VARYING;
918       val1->mask = double_int_minus_one;
919       val1->value = NULL_TREE;
920     }
921 }
922 
923 
924 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
925    lattice values to determine PHI_NODE's lattice value.  The value of a
926    PHI node is determined calling ccp_lattice_meet with all the arguments
927    of the PHI node that are incoming via executable edges.  */
928 
929 static enum ssa_prop_result
ccp_visit_phi_node(gimple phi)930 ccp_visit_phi_node (gimple phi)
931 {
932   unsigned i;
933   prop_value_t *old_val, new_val;
934 
935   if (dump_file && (dump_flags & TDF_DETAILS))
936     {
937       fprintf (dump_file, "\nVisiting PHI node: ");
938       print_gimple_stmt (dump_file, phi, 0, dump_flags);
939     }
940 
941   old_val = get_value (gimple_phi_result (phi));
942   switch (old_val->lattice_val)
943     {
944     case VARYING:
945       return SSA_PROP_VARYING;
946 
947     case CONSTANT:
948       new_val = *old_val;
949       break;
950 
951     case UNDEFINED:
952       new_val.lattice_val = UNDEFINED;
953       new_val.value = NULL_TREE;
954       break;
955 
956     default:
957       gcc_unreachable ();
958     }
959 
960   for (i = 0; i < gimple_phi_num_args (phi); i++)
961     {
962       /* Compute the meet operator over all the PHI arguments flowing
963 	 through executable edges.  */
964       edge e = gimple_phi_arg_edge (phi, i);
965 
966       if (dump_file && (dump_flags & TDF_DETAILS))
967 	{
968 	  fprintf (dump_file,
969 	      "\n    Argument #%d (%d -> %d %sexecutable)\n",
970 	      i, e->src->index, e->dest->index,
971 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
972 	}
973 
974       /* If the incoming edge is executable, Compute the meet operator for
975 	 the existing value of the PHI node and the current PHI argument.  */
976       if (e->flags & EDGE_EXECUTABLE)
977 	{
978 	  tree arg = gimple_phi_arg (phi, i)->def;
979 	  prop_value_t arg_val = get_value_for_expr (arg, false);
980 
981 	  ccp_lattice_meet (&new_val, &arg_val);
982 
983 	  if (dump_file && (dump_flags & TDF_DETAILS))
984 	    {
985 	      fprintf (dump_file, "\t");
986 	      print_generic_expr (dump_file, arg, dump_flags);
987 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
988 	      fprintf (dump_file, "\n");
989 	    }
990 
991 	  if (new_val.lattice_val == VARYING)
992 	    break;
993 	}
994     }
995 
996   if (dump_file && (dump_flags & TDF_DETAILS))
997     {
998       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
999       fprintf (dump_file, "\n\n");
1000     }
1001 
1002   /* Make the transition to the new value.  */
1003   if (set_lattice_value (gimple_phi_result (phi), new_val))
1004     {
1005       if (new_val.lattice_val == VARYING)
1006 	return SSA_PROP_VARYING;
1007       else
1008 	return SSA_PROP_INTERESTING;
1009     }
1010   else
1011     return SSA_PROP_NOT_INTERESTING;
1012 }
1013 
1014 /* Return the constant value for OP or OP otherwise.  */
1015 
1016 static tree
valueize_op(tree op)1017 valueize_op (tree op)
1018 {
1019   if (TREE_CODE (op) == SSA_NAME)
1020     {
1021       tree tem = get_constant_value (op);
1022       if (tem)
1023 	return tem;
1024     }
1025   return op;
1026 }
1027 
1028 /* CCP specific front-end to the non-destructive constant folding
1029    routines.
1030 
1031    Attempt to simplify the RHS of STMT knowing that one or more
1032    operands are constants.
1033 
1034    If simplification is possible, return the simplified RHS,
1035    otherwise return the original RHS or NULL_TREE.  */
1036 
1037 static tree
ccp_fold(gimple stmt)1038 ccp_fold (gimple stmt)
1039 {
1040   location_t loc = gimple_location (stmt);
1041   switch (gimple_code (stmt))
1042     {
1043     case GIMPLE_COND:
1044       {
1045         /* Handle comparison operators that can appear in GIMPLE form.  */
1046         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1047         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1048         enum tree_code code = gimple_cond_code (stmt);
1049         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1050       }
1051 
1052     case GIMPLE_SWITCH:
1053       {
1054 	/* Return the constant switch index.  */
1055         return valueize_op (gimple_switch_index (stmt));
1056       }
1057 
1058     case GIMPLE_ASSIGN:
1059     case GIMPLE_CALL:
1060       return gimple_fold_stmt_to_constant_1 (stmt, valueize_op);
1061 
1062     default:
1063       gcc_unreachable ();
1064     }
1065 }
1066 
1067 /* Apply the operation CODE in type TYPE to the value, mask pair
1068    RVAL and RMASK representing a value of type RTYPE and set
1069    the value, mask pair *VAL and *MASK to the result.  */
1070 
1071 static void
bit_value_unop_1(enum tree_code code,tree type,double_int * val,double_int * mask,tree rtype,double_int rval,double_int rmask)1072 bit_value_unop_1 (enum tree_code code, tree type,
1073 		  double_int *val, double_int *mask,
1074 		  tree rtype, double_int rval, double_int rmask)
1075 {
1076   switch (code)
1077     {
1078     case BIT_NOT_EXPR:
1079       *mask = rmask;
1080       *val = ~rval;
1081       break;
1082 
1083     case NEGATE_EXPR:
1084       {
1085 	double_int temv, temm;
1086 	/* Return ~rval + 1.  */
1087 	bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
1088 	bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1089 			 type, temv, temm,
1090 			 type, double_int_one, double_int_zero);
1091 	break;
1092       }
1093 
1094     CASE_CONVERT:
1095       {
1096 	bool uns;
1097 
1098 	/* First extend mask and value according to the original type.  */
1099 	uns = TYPE_UNSIGNED (rtype);
1100 	*mask = rmask.ext (TYPE_PRECISION (rtype), uns);
1101 	*val = rval.ext (TYPE_PRECISION (rtype), uns);
1102 
1103 	/* Then extend mask and value according to the target type.  */
1104 	uns = TYPE_UNSIGNED (type);
1105 	*mask = (*mask).ext (TYPE_PRECISION (type), uns);
1106 	*val = (*val).ext (TYPE_PRECISION (type), uns);
1107 	break;
1108       }
1109 
1110     default:
1111       *mask = double_int_minus_one;
1112       break;
1113     }
1114 }
1115 
1116 /* Apply the operation CODE in type TYPE to the value, mask pairs
1117    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1118    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1119 
1120 static void
bit_value_binop_1(enum tree_code code,tree type,double_int * val,double_int * mask,tree r1type,double_int r1val,double_int r1mask,tree r2type,double_int r2val,double_int r2mask)1121 bit_value_binop_1 (enum tree_code code, tree type,
1122 		   double_int *val, double_int *mask,
1123 		   tree r1type, double_int r1val, double_int r1mask,
1124 		   tree r2type, double_int r2val, double_int r2mask)
1125 {
1126   bool uns = TYPE_UNSIGNED (type);
1127   /* Assume we'll get a constant result.  Use an initial varying value,
1128      we fall back to varying in the end if necessary.  */
1129   *mask = double_int_minus_one;
1130   switch (code)
1131     {
1132     case BIT_AND_EXPR:
1133       /* The mask is constant where there is a known not
1134 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1135       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1136       *val = r1val & r2val;
1137       break;
1138 
1139     case BIT_IOR_EXPR:
1140       /* The mask is constant where there is a known
1141 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1142       *mask = (r1mask | r2mask)
1143 	      .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1144       *val = r1val | r2val;
1145       break;
1146 
1147     case BIT_XOR_EXPR:
1148       /* m1 | m2  */
1149       *mask = r1mask | r2mask;
1150       *val = r1val ^ r2val;
1151       break;
1152 
1153     case LROTATE_EXPR:
1154     case RROTATE_EXPR:
1155       if (r2mask.is_zero ())
1156 	{
1157 	  HOST_WIDE_INT shift = r2val.low;
1158 	  if (code == RROTATE_EXPR)
1159 	    shift = -shift;
1160 	  *mask = r1mask.lrotate (shift, TYPE_PRECISION (type));
1161 	  *val = r1val.lrotate (shift, TYPE_PRECISION (type));
1162 	}
1163       break;
1164 
1165     case LSHIFT_EXPR:
1166     case RSHIFT_EXPR:
1167       /* ???  We can handle partially known shift counts if we know
1168 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1169 	 is zero.  */
1170       if (r2mask.is_zero ())
1171 	{
1172 	  HOST_WIDE_INT shift = r2val.low;
1173 	  if (code == RSHIFT_EXPR)
1174 	    shift = -shift;
1175 	  /* We need to know if we are doing a left or a right shift
1176 	     to properly shift in zeros for left shift and unsigned
1177 	     right shifts and the sign bit for signed right shifts.
1178 	     For signed right shifts we shift in varying in case
1179 	     the sign bit was varying.  */
1180 	  if (shift > 0)
1181 	    {
1182 	      *mask = r1mask.llshift (shift, TYPE_PRECISION (type));
1183 	      *val = r1val.llshift (shift, TYPE_PRECISION (type));
1184 	    }
1185 	  else if (shift < 0)
1186 	    {
1187 	      shift = -shift;
1188 	      *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns);
1189 	      *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns);
1190 	    }
1191 	  else
1192 	    {
1193 	      *mask = r1mask;
1194 	      *val = r1val;
1195 	    }
1196 	}
1197       break;
1198 
1199     case PLUS_EXPR:
1200     case POINTER_PLUS_EXPR:
1201       {
1202 	double_int lo, hi;
1203 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1204 	   zero wherever possible.  */
1205 	lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1206 	lo = lo.ext (TYPE_PRECISION (type), uns);
1207 	/* Do the addition with unknown bits set to one, to give carry-ins of
1208 	   one wherever possible.  */
1209 	hi = (r1val | r1mask) + (r2val | r2mask);
1210 	hi = hi.ext (TYPE_PRECISION (type), uns);
1211 	/* Each bit in the result is known if (a) the corresponding bits in
1212 	   both inputs are known, and (b) the carry-in to that bit position
1213 	   is known.  We can check condition (b) by seeing if we got the same
1214 	   result with minimised carries as with maximised carries.  */
1215 	*mask = r1mask | r2mask | (lo ^ hi);
1216 	*mask = (*mask).ext (TYPE_PRECISION (type), uns);
1217 	/* It shouldn't matter whether we choose lo or hi here.  */
1218 	*val = lo;
1219 	break;
1220       }
1221 
1222     case MINUS_EXPR:
1223       {
1224 	double_int temv, temm;
1225 	bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
1226 			  r2type, r2val, r2mask);
1227 	bit_value_binop_1 (PLUS_EXPR, type, val, mask,
1228 			   r1type, r1val, r1mask,
1229 			   r2type, temv, temm);
1230 	break;
1231       }
1232 
1233     case MULT_EXPR:
1234       {
1235 	/* Just track trailing zeros in both operands and transfer
1236 	   them to the other.  */
1237 	int r1tz = (r1val | r1mask).trailing_zeros ();
1238 	int r2tz = (r2val | r2mask).trailing_zeros ();
1239 	if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
1240 	  {
1241 	    *mask = double_int_zero;
1242 	    *val = double_int_zero;
1243 	  }
1244 	else if (r1tz + r2tz > 0)
1245 	  {
1246 	    *mask = ~double_int::mask (r1tz + r2tz);
1247 	    *mask = (*mask).ext (TYPE_PRECISION (type), uns);
1248 	    *val = double_int_zero;
1249 	  }
1250 	break;
1251       }
1252 
1253     case EQ_EXPR:
1254     case NE_EXPR:
1255       {
1256 	double_int m = r1mask | r2mask;
1257 	if (r1val.and_not (m) != r2val.and_not (m))
1258 	  {
1259 	    *mask = double_int_zero;
1260 	    *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
1261 	  }
1262 	else
1263 	  {
1264 	    /* We know the result of a comparison is always one or zero.  */
1265 	    *mask = double_int_one;
1266 	    *val = double_int_zero;
1267 	  }
1268 	break;
1269       }
1270 
1271     case GE_EXPR:
1272     case GT_EXPR:
1273       {
1274 	double_int tem = r1val;
1275 	r1val = r2val;
1276 	r2val = tem;
1277 	tem = r1mask;
1278 	r1mask = r2mask;
1279 	r2mask = tem;
1280 	code = swap_tree_comparison (code);
1281       }
1282       /* Fallthru.  */
1283     case LT_EXPR:
1284     case LE_EXPR:
1285       {
1286 	int minmax, maxmin;
1287 	/* If the most significant bits are not known we know nothing.  */
1288 	if (r1mask.is_negative () || r2mask.is_negative ())
1289 	  break;
1290 
1291 	/* For comparisons the signedness is in the comparison operands.  */
1292 	uns = TYPE_UNSIGNED (r1type);
1293 
1294 	/* If we know the most significant bits we know the values
1295 	   value ranges by means of treating varying bits as zero
1296 	   or one.  Do a cross comparison of the max/min pairs.  */
1297 	maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns);
1298 	minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns);
1299 	if (maxmin < 0)  /* r1 is less than r2.  */
1300 	  {
1301 	    *mask = double_int_zero;
1302 	    *val = double_int_one;
1303 	  }
1304 	else if (minmax > 0)  /* r1 is not less or equal to r2.  */
1305 	  {
1306 	    *mask = double_int_zero;
1307 	    *val = double_int_zero;
1308 	  }
1309 	else if (maxmin == minmax)  /* r1 and r2 are equal.  */
1310 	  {
1311 	    /* This probably should never happen as we'd have
1312 	       folded the thing during fully constant value folding.  */
1313 	    *mask = double_int_zero;
1314 	    *val = (code == LE_EXPR ? double_int_one :  double_int_zero);
1315 	  }
1316 	else
1317 	  {
1318 	    /* We know the result of a comparison is always one or zero.  */
1319 	    *mask = double_int_one;
1320 	    *val = double_int_zero;
1321 	  }
1322 	break;
1323       }
1324 
1325     default:;
1326     }
1327 }
1328 
1329 /* Return the propagation value when applying the operation CODE to
1330    the value RHS yielding type TYPE.  */
1331 
1332 static prop_value_t
bit_value_unop(enum tree_code code,tree type,tree rhs)1333 bit_value_unop (enum tree_code code, tree type, tree rhs)
1334 {
1335   prop_value_t rval = get_value_for_expr (rhs, true);
1336   double_int value, mask;
1337   prop_value_t val;
1338 
1339   if (rval.lattice_val == UNDEFINED)
1340     return rval;
1341 
1342   gcc_assert ((rval.lattice_val == CONSTANT
1343 	       && TREE_CODE (rval.value) == INTEGER_CST)
1344 	      || rval.mask.is_minus_one ());
1345   bit_value_unop_1 (code, type, &value, &mask,
1346 		    TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
1347   if (!mask.is_minus_one ())
1348     {
1349       val.lattice_val = CONSTANT;
1350       val.mask = mask;
1351       /* ???  Delay building trees here.  */
1352       val.value = double_int_to_tree (type, value);
1353     }
1354   else
1355     {
1356       val.lattice_val = VARYING;
1357       val.value = NULL_TREE;
1358       val.mask = double_int_minus_one;
1359     }
1360   return val;
1361 }
1362 
1363 /* Return the propagation value when applying the operation CODE to
1364    the values RHS1 and RHS2 yielding type TYPE.  */
1365 
1366 static prop_value_t
bit_value_binop(enum tree_code code,tree type,tree rhs1,tree rhs2)1367 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1368 {
1369   prop_value_t r1val = get_value_for_expr (rhs1, true);
1370   prop_value_t r2val = get_value_for_expr (rhs2, true);
1371   double_int value, mask;
1372   prop_value_t val;
1373 
1374   if (r1val.lattice_val == UNDEFINED
1375       || r2val.lattice_val == UNDEFINED)
1376     {
1377       val.lattice_val = VARYING;
1378       val.value = NULL_TREE;
1379       val.mask = double_int_minus_one;
1380       return val;
1381     }
1382 
1383   gcc_assert ((r1val.lattice_val == CONSTANT
1384 	       && TREE_CODE (r1val.value) == INTEGER_CST)
1385 	      || r1val.mask.is_minus_one ());
1386   gcc_assert ((r2val.lattice_val == CONSTANT
1387 	       && TREE_CODE (r2val.value) == INTEGER_CST)
1388 	      || r2val.mask.is_minus_one ());
1389   bit_value_binop_1 (code, type, &value, &mask,
1390 		     TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
1391 		     TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
1392   if (!mask.is_minus_one ())
1393     {
1394       val.lattice_val = CONSTANT;
1395       val.mask = mask;
1396       /* ???  Delay building trees here.  */
1397       val.value = double_int_to_tree (type, value);
1398     }
1399   else
1400     {
1401       val.lattice_val = VARYING;
1402       val.value = NULL_TREE;
1403       val.mask = double_int_minus_one;
1404     }
1405   return val;
1406 }
1407 
1408 /* Return the propagation value when applying __builtin_assume_aligned to
1409    its arguments.  */
1410 
1411 static prop_value_t
bit_value_assume_aligned(gimple stmt)1412 bit_value_assume_aligned (gimple stmt)
1413 {
1414   tree ptr = gimple_call_arg (stmt, 0), align, misalign = NULL_TREE;
1415   tree type = TREE_TYPE (ptr);
1416   unsigned HOST_WIDE_INT aligni, misaligni = 0;
1417   prop_value_t ptrval = get_value_for_expr (ptr, true);
1418   prop_value_t alignval;
1419   double_int value, mask;
1420   prop_value_t val;
1421   if (ptrval.lattice_val == UNDEFINED)
1422     return ptrval;
1423   gcc_assert ((ptrval.lattice_val == CONSTANT
1424 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
1425 	      || ptrval.mask.is_minus_one ());
1426   align = gimple_call_arg (stmt, 1);
1427   if (!host_integerp (align, 1))
1428     return ptrval;
1429   aligni = tree_low_cst (align, 1);
1430   if (aligni <= 1
1431       || (aligni & (aligni - 1)) != 0)
1432     return ptrval;
1433   if (gimple_call_num_args (stmt) > 2)
1434     {
1435       misalign = gimple_call_arg (stmt, 2);
1436       if (!host_integerp (misalign, 1))
1437 	return ptrval;
1438       misaligni = tree_low_cst (misalign, 1);
1439       if (misaligni >= aligni)
1440 	return ptrval;
1441     }
1442   align = build_int_cst_type (type, -aligni);
1443   alignval = get_value_for_expr (align, true);
1444   bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
1445 		     type, value_to_double_int (ptrval), ptrval.mask,
1446 		     type, value_to_double_int (alignval), alignval.mask);
1447   if (!mask.is_minus_one ())
1448     {
1449       val.lattice_val = CONSTANT;
1450       val.mask = mask;
1451       gcc_assert ((mask.low & (aligni - 1)) == 0);
1452       gcc_assert ((value.low & (aligni - 1)) == 0);
1453       value.low |= misaligni;
1454       /* ???  Delay building trees here.  */
1455       val.value = double_int_to_tree (type, value);
1456     }
1457   else
1458     {
1459       val.lattice_val = VARYING;
1460       val.value = NULL_TREE;
1461       val.mask = double_int_minus_one;
1462     }
1463   return val;
1464 }
1465 
1466 /* Evaluate statement STMT.
1467    Valid only for assignments, calls, conditionals, and switches. */
1468 
1469 static prop_value_t
evaluate_stmt(gimple stmt)1470 evaluate_stmt (gimple stmt)
1471 {
1472   prop_value_t val;
1473   tree simplified = NULL_TREE;
1474   ccp_lattice_t likelyvalue = likely_value (stmt);
1475   bool is_constant = false;
1476   unsigned int align;
1477 
1478   if (dump_file && (dump_flags & TDF_DETAILS))
1479     {
1480       fprintf (dump_file, "which is likely ");
1481       switch (likelyvalue)
1482 	{
1483 	case CONSTANT:
1484 	  fprintf (dump_file, "CONSTANT");
1485 	  break;
1486 	case UNDEFINED:
1487 	  fprintf (dump_file, "UNDEFINED");
1488 	  break;
1489 	case VARYING:
1490 	  fprintf (dump_file, "VARYING");
1491 	  break;
1492 	default:;
1493 	}
1494       fprintf (dump_file, "\n");
1495     }
1496 
1497   /* If the statement is likely to have a CONSTANT result, then try
1498      to fold the statement to determine the constant value.  */
1499   /* FIXME.  This is the only place that we call ccp_fold.
1500      Since likely_value never returns CONSTANT for calls, we will
1501      not attempt to fold them, including builtins that may profit.  */
1502   if (likelyvalue == CONSTANT)
1503     {
1504       fold_defer_overflow_warnings ();
1505       simplified = ccp_fold (stmt);
1506       is_constant = simplified && is_gimple_min_invariant (simplified);
1507       fold_undefer_overflow_warnings (is_constant, stmt, 0);
1508       if (is_constant)
1509 	{
1510 	  /* The statement produced a constant value.  */
1511 	  val.lattice_val = CONSTANT;
1512 	  val.value = simplified;
1513 	  val.mask = double_int_zero;
1514 	}
1515     }
1516   /* If the statement is likely to have a VARYING result, then do not
1517      bother folding the statement.  */
1518   else if (likelyvalue == VARYING)
1519     {
1520       enum gimple_code code = gimple_code (stmt);
1521       if (code == GIMPLE_ASSIGN)
1522         {
1523           enum tree_code subcode = gimple_assign_rhs_code (stmt);
1524 
1525           /* Other cases cannot satisfy is_gimple_min_invariant
1526              without folding.  */
1527           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1528             simplified = gimple_assign_rhs1 (stmt);
1529         }
1530       else if (code == GIMPLE_SWITCH)
1531         simplified = gimple_switch_index (stmt);
1532       else
1533 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
1534 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1535       is_constant = simplified && is_gimple_min_invariant (simplified);
1536       if (is_constant)
1537 	{
1538 	  /* The statement produced a constant value.  */
1539 	  val.lattice_val = CONSTANT;
1540 	  val.value = simplified;
1541 	  val.mask = double_int_zero;
1542 	}
1543     }
1544 
1545   /* Resort to simplification for bitwise tracking.  */
1546   if (flag_tree_bit_ccp
1547       && (likelyvalue == CONSTANT || is_gimple_call (stmt))
1548       && !is_constant)
1549     {
1550       enum gimple_code code = gimple_code (stmt);
1551       val.lattice_val = VARYING;
1552       val.value = NULL_TREE;
1553       val.mask = double_int_minus_one;
1554       if (code == GIMPLE_ASSIGN)
1555 	{
1556 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
1557 	  tree rhs1 = gimple_assign_rhs1 (stmt);
1558 	  switch (get_gimple_rhs_class (subcode))
1559 	    {
1560 	    case GIMPLE_SINGLE_RHS:
1561 	      if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1562 		  || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1563 		val = get_value_for_expr (rhs1, true);
1564 	      break;
1565 
1566 	    case GIMPLE_UNARY_RHS:
1567 	      if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1568 		   || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1569 		  && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
1570 		      || POINTER_TYPE_P (gimple_expr_type (stmt))))
1571 		val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
1572 	      break;
1573 
1574 	    case GIMPLE_BINARY_RHS:
1575 	      if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1576 		  || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1577 		{
1578 		  tree lhs = gimple_assign_lhs (stmt);
1579 		  tree rhs2 = gimple_assign_rhs2 (stmt);
1580 		  val = bit_value_binop (subcode,
1581 					 TREE_TYPE (lhs), rhs1, rhs2);
1582 		}
1583 	      break;
1584 
1585 	    default:;
1586 	    }
1587 	}
1588       else if (code == GIMPLE_COND)
1589 	{
1590 	  enum tree_code code = gimple_cond_code (stmt);
1591 	  tree rhs1 = gimple_cond_lhs (stmt);
1592 	  tree rhs2 = gimple_cond_rhs (stmt);
1593 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1594 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1595 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1596 	}
1597       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1598 	{
1599 	  tree fndecl = gimple_call_fndecl (stmt);
1600 	  switch (DECL_FUNCTION_CODE (fndecl))
1601 	    {
1602 	    case BUILT_IN_MALLOC:
1603 	    case BUILT_IN_REALLOC:
1604 	    case BUILT_IN_CALLOC:
1605 	    case BUILT_IN_STRDUP:
1606 	    case BUILT_IN_STRNDUP:
1607 	      val.lattice_val = CONSTANT;
1608 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1609 	      val.mask = double_int::from_shwi
1610 		  	   (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
1611 			      / BITS_PER_UNIT - 1));
1612 	      break;
1613 
1614 	    case BUILT_IN_ALLOCA:
1615 	    case BUILT_IN_ALLOCA_WITH_ALIGN:
1616 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1617 		       ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1618 		       : BIGGEST_ALIGNMENT);
1619 	      val.lattice_val = CONSTANT;
1620 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1621 	      val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align)
1622 						  / BITS_PER_UNIT - 1));
1623 	      break;
1624 
1625 	    /* These builtins return their first argument, unmodified.  */
1626 	    case BUILT_IN_MEMCPY:
1627 	    case BUILT_IN_MEMMOVE:
1628 	    case BUILT_IN_MEMSET:
1629 	    case BUILT_IN_STRCPY:
1630 	    case BUILT_IN_STRNCPY:
1631 	    case BUILT_IN_MEMCPY_CHK:
1632 	    case BUILT_IN_MEMMOVE_CHK:
1633 	    case BUILT_IN_MEMSET_CHK:
1634 	    case BUILT_IN_STRCPY_CHK:
1635 	    case BUILT_IN_STRNCPY_CHK:
1636 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1637 	      break;
1638 
1639 	    case BUILT_IN_ASSUME_ALIGNED:
1640 	      val = bit_value_assume_aligned (stmt);
1641 	      break;
1642 
1643 	    default:;
1644 	    }
1645 	}
1646       is_constant = (val.lattice_val == CONSTANT);
1647     }
1648 
1649   if (!is_constant)
1650     {
1651       /* The statement produced a nonconstant value.  If the statement
1652 	 had UNDEFINED operands, then the result of the statement
1653 	 should be UNDEFINED.  Otherwise, the statement is VARYING.  */
1654       if (likelyvalue == UNDEFINED)
1655 	{
1656 	  val.lattice_val = likelyvalue;
1657 	  val.mask = double_int_zero;
1658 	}
1659       else
1660 	{
1661 	  val.lattice_val = VARYING;
1662 	  val.mask = double_int_minus_one;
1663 	}
1664 
1665       val.value = NULL_TREE;
1666     }
1667 
1668   return val;
1669 }
1670 
1671 typedef hash_table <pointer_hash <gimple_statement_d> > gimple_htab;
1672 
1673 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
1674    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
1675 
1676 static void
insert_clobber_before_stack_restore(tree saved_val,tree var,gimple_htab * visited)1677 insert_clobber_before_stack_restore (tree saved_val, tree var,
1678 				     gimple_htab *visited)
1679 {
1680   gimple stmt, clobber_stmt;
1681   tree clobber;
1682   imm_use_iterator iter;
1683   gimple_stmt_iterator i;
1684   gimple *slot;
1685 
1686   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
1687     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1688       {
1689 	clobber = build_constructor (TREE_TYPE (var),
1690 				     NULL);
1691 	TREE_THIS_VOLATILE (clobber) = 1;
1692 	clobber_stmt = gimple_build_assign (var, clobber);
1693 
1694 	i = gsi_for_stmt (stmt);
1695 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
1696       }
1697     else if (gimple_code (stmt) == GIMPLE_PHI)
1698       {
1699 	if (!visited->is_created ())
1700 	  visited->create (10);
1701 
1702 	slot = visited->find_slot (stmt, INSERT);
1703 	if (*slot != NULL)
1704 	  continue;
1705 
1706 	*slot = stmt;
1707 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
1708 					     visited);
1709       }
1710     else if (gimple_assign_ssa_name_copy_p (stmt))
1711       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
1712 					   visited);
1713     else
1714       gcc_assert (is_gimple_debug (stmt));
1715 }
1716 
1717 /* Advance the iterator to the previous non-debug gimple statement in the same
1718    or dominating basic block.  */
1719 
1720 static inline void
gsi_prev_dom_bb_nondebug(gimple_stmt_iterator * i)1721 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
1722 {
1723   basic_block dom;
1724 
1725   gsi_prev_nondebug (i);
1726   while (gsi_end_p (*i))
1727     {
1728       dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
1729       if (dom == NULL || dom == ENTRY_BLOCK_PTR)
1730 	return;
1731 
1732       *i = gsi_last_bb (dom);
1733     }
1734 }
1735 
1736 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
1737    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
1738 
1739    It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
1740    previous pass (such as DOM) duplicated it along multiple paths to a BB.  In
1741    that case the function gives up without inserting the clobbers.  */
1742 
1743 static void
insert_clobbers_for_var(gimple_stmt_iterator i,tree var)1744 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
1745 {
1746   gimple stmt;
1747   tree saved_val;
1748   gimple_htab visited;
1749 
1750   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
1751     {
1752       stmt = gsi_stmt (i);
1753 
1754       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
1755 	continue;
1756 
1757       saved_val = gimple_call_lhs (stmt);
1758       if (saved_val == NULL_TREE)
1759 	continue;
1760 
1761       insert_clobber_before_stack_restore (saved_val, var, &visited);
1762       break;
1763     }
1764 
1765   if (visited.is_created ())
1766     visited.dispose ();
1767 }
1768 
1769 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
1770    fixed-size array and returns the address, if found, otherwise returns
1771    NULL_TREE.  */
1772 
1773 static tree
fold_builtin_alloca_with_align(gimple stmt)1774 fold_builtin_alloca_with_align (gimple stmt)
1775 {
1776   unsigned HOST_WIDE_INT size, threshold, n_elem;
1777   tree lhs, arg, block, var, elem_type, array_type;
1778 
1779   /* Get lhs.  */
1780   lhs = gimple_call_lhs (stmt);
1781   if (lhs == NULL_TREE)
1782     return NULL_TREE;
1783 
1784   /* Detect constant argument.  */
1785   arg = get_constant_value (gimple_call_arg (stmt, 0));
1786   if (arg == NULL_TREE
1787       || TREE_CODE (arg) != INTEGER_CST
1788       || !host_integerp (arg, 1))
1789     return NULL_TREE;
1790 
1791   size = TREE_INT_CST_LOW (arg);
1792 
1793   /* Heuristic: don't fold large allocas.  */
1794   threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
1795   /* In case the alloca is located at function entry, it has the same lifetime
1796      as a declared array, so we allow a larger size.  */
1797   block = gimple_block (stmt);
1798   if (!(cfun->after_inlining
1799         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
1800     threshold /= 10;
1801   if (size > threshold)
1802     return NULL_TREE;
1803 
1804   /* Declare array.  */
1805   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
1806   n_elem = size * 8 / BITS_PER_UNIT;
1807   array_type = build_array_type_nelts (elem_type, n_elem);
1808   var = create_tmp_var (array_type, NULL);
1809   DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1));
1810   {
1811     struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
1812     if (pi != NULL && !pi->pt.anything)
1813       {
1814 	bool singleton_p;
1815 	unsigned uid;
1816 	singleton_p = pt_solution_singleton_p (&pi->pt, &uid);
1817 	gcc_assert (singleton_p);
1818 	SET_DECL_PT_UID (var, uid);
1819       }
1820   }
1821 
1822   /* Fold alloca to the address of the array.  */
1823   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
1824 }
1825 
1826 /* Fold the stmt at *GSI with CCP specific information that propagating
1827    and regular folding does not catch.  */
1828 
1829 static bool
ccp_fold_stmt(gimple_stmt_iterator * gsi)1830 ccp_fold_stmt (gimple_stmt_iterator *gsi)
1831 {
1832   gimple stmt = gsi_stmt (*gsi);
1833 
1834   switch (gimple_code (stmt))
1835     {
1836     case GIMPLE_COND:
1837       {
1838 	prop_value_t val;
1839 	/* Statement evaluation will handle type mismatches in constants
1840 	   more gracefully than the final propagation.  This allows us to
1841 	   fold more conditionals here.  */
1842 	val = evaluate_stmt (stmt);
1843 	if (val.lattice_val != CONSTANT
1844 	    || !val.mask.is_zero ())
1845 	  return false;
1846 
1847 	if (dump_file)
1848 	  {
1849 	    fprintf (dump_file, "Folding predicate ");
1850 	    print_gimple_expr (dump_file, stmt, 0, 0);
1851 	    fprintf (dump_file, " to ");
1852 	    print_generic_expr (dump_file, val.value, 0);
1853 	    fprintf (dump_file, "\n");
1854 	  }
1855 
1856 	if (integer_zerop (val.value))
1857 	  gimple_cond_make_false (stmt);
1858 	else
1859 	  gimple_cond_make_true (stmt);
1860 
1861 	return true;
1862       }
1863 
1864     case GIMPLE_CALL:
1865       {
1866 	tree lhs = gimple_call_lhs (stmt);
1867 	int flags = gimple_call_flags (stmt);
1868 	tree val;
1869 	tree argt;
1870 	bool changed = false;
1871 	unsigned i;
1872 
1873 	/* If the call was folded into a constant make sure it goes
1874 	   away even if we cannot propagate into all uses because of
1875 	   type issues.  */
1876 	if (lhs
1877 	    && TREE_CODE (lhs) == SSA_NAME
1878 	    && (val = get_constant_value (lhs))
1879 	    /* Don't optimize away calls that have side-effects.  */
1880 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
1881 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
1882 	  {
1883 	    tree new_rhs = unshare_expr (val);
1884 	    bool res;
1885 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
1886 					    TREE_TYPE (new_rhs)))
1887 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
1888 	    res = update_call_from_tree (gsi, new_rhs);
1889 	    gcc_assert (res);
1890 	    return true;
1891 	  }
1892 
1893 	/* Internal calls provide no argument types, so the extra laxity
1894 	   for normal calls does not apply.  */
1895 	if (gimple_call_internal_p (stmt))
1896 	  return false;
1897 
1898         /* The heuristic of fold_builtin_alloca_with_align differs before and
1899 	   after inlining, so we don't require the arg to be changed into a
1900 	   constant for folding, but just to be constant.  */
1901         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
1902           {
1903             tree new_rhs = fold_builtin_alloca_with_align (stmt);
1904             if (new_rhs)
1905 	      {
1906 		bool res = update_call_from_tree (gsi, new_rhs);
1907 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
1908 		gcc_assert (res);
1909 		insert_clobbers_for_var (*gsi, var);
1910 		return true;
1911 	      }
1912           }
1913 
1914 	/* Propagate into the call arguments.  Compared to replace_uses_in
1915 	   this can use the argument slot types for type verification
1916 	   instead of the current argument type.  We also can safely
1917 	   drop qualifiers here as we are dealing with constants anyway.  */
1918 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
1919 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
1920 	     ++i, argt = TREE_CHAIN (argt))
1921 	  {
1922 	    tree arg = gimple_call_arg (stmt, i);
1923 	    if (TREE_CODE (arg) == SSA_NAME
1924 		&& (val = get_constant_value (arg))
1925 		&& useless_type_conversion_p
1926 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
1927 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
1928 	      {
1929 		gimple_call_set_arg (stmt, i, unshare_expr (val));
1930 		changed = true;
1931 	      }
1932 	  }
1933 
1934 	return changed;
1935       }
1936 
1937     case GIMPLE_ASSIGN:
1938       {
1939 	tree lhs = gimple_assign_lhs (stmt);
1940 	tree val;
1941 
1942 	/* If we have a load that turned out to be constant replace it
1943 	   as we cannot propagate into all uses in all cases.  */
1944 	if (gimple_assign_single_p (stmt)
1945 	    && TREE_CODE (lhs) == SSA_NAME
1946 	    && (val = get_constant_value (lhs)))
1947 	  {
1948 	    tree rhs = unshare_expr (val);
1949 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
1950 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
1951 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
1952 	    return true;
1953 	  }
1954 
1955 	return false;
1956       }
1957 
1958     default:
1959       return false;
1960     }
1961 }
1962 
1963 /* Visit the assignment statement STMT.  Set the value of its LHS to the
1964    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
1965    creates virtual definitions, set the value of each new name to that
1966    of the RHS (if we can derive a constant out of the RHS).
1967    Value-returning call statements also perform an assignment, and
1968    are handled here.  */
1969 
1970 static enum ssa_prop_result
visit_assignment(gimple stmt,tree * output_p)1971 visit_assignment (gimple stmt, tree *output_p)
1972 {
1973   prop_value_t val;
1974   enum ssa_prop_result retval;
1975 
1976   tree lhs = gimple_get_lhs (stmt);
1977 
1978   gcc_assert (gimple_code (stmt) != GIMPLE_CALL
1979               || gimple_call_lhs (stmt) != NULL_TREE);
1980 
1981   if (gimple_assign_single_p (stmt)
1982       && gimple_assign_rhs_code (stmt) == SSA_NAME)
1983     /* For a simple copy operation, we copy the lattice values.  */
1984     val = *get_value (gimple_assign_rhs1 (stmt));
1985   else
1986     /* Evaluate the statement, which could be
1987        either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
1988     val = evaluate_stmt (stmt);
1989 
1990   retval = SSA_PROP_NOT_INTERESTING;
1991 
1992   /* Set the lattice value of the statement's output.  */
1993   if (TREE_CODE (lhs) == SSA_NAME)
1994     {
1995       /* If STMT is an assignment to an SSA_NAME, we only have one
1996 	 value to set.  */
1997       if (set_lattice_value (lhs, val))
1998 	{
1999 	  *output_p = lhs;
2000 	  if (val.lattice_val == VARYING)
2001 	    retval = SSA_PROP_VARYING;
2002 	  else
2003 	    retval = SSA_PROP_INTERESTING;
2004 	}
2005     }
2006 
2007   return retval;
2008 }
2009 
2010 
2011 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2012    if it can determine which edge will be taken.  Otherwise, return
2013    SSA_PROP_VARYING.  */
2014 
2015 static enum ssa_prop_result
visit_cond_stmt(gimple stmt,edge * taken_edge_p)2016 visit_cond_stmt (gimple stmt, edge *taken_edge_p)
2017 {
2018   prop_value_t val;
2019   basic_block block;
2020 
2021   block = gimple_bb (stmt);
2022   val = evaluate_stmt (stmt);
2023   if (val.lattice_val != CONSTANT
2024       || !val.mask.is_zero ())
2025     return SSA_PROP_VARYING;
2026 
2027   /* Find which edge out of the conditional block will be taken and add it
2028      to the worklist.  If no single edge can be determined statically,
2029      return SSA_PROP_VARYING to feed all the outgoing edges to the
2030      propagation engine.  */
2031   *taken_edge_p = find_taken_edge (block, val.value);
2032   if (*taken_edge_p)
2033     return SSA_PROP_INTERESTING;
2034   else
2035     return SSA_PROP_VARYING;
2036 }
2037 
2038 
2039 /* Evaluate statement STMT.  If the statement produces an output value and
2040    its evaluation changes the lattice value of its output, return
2041    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2042    output value.
2043 
2044    If STMT is a conditional branch and we can determine its truth
2045    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2046    value, return SSA_PROP_VARYING.  */
2047 
2048 static enum ssa_prop_result
ccp_visit_stmt(gimple stmt,edge * taken_edge_p,tree * output_p)2049 ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
2050 {
2051   tree def;
2052   ssa_op_iter iter;
2053 
2054   if (dump_file && (dump_flags & TDF_DETAILS))
2055     {
2056       fprintf (dump_file, "\nVisiting statement:\n");
2057       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2058     }
2059 
2060   switch (gimple_code (stmt))
2061     {
2062       case GIMPLE_ASSIGN:
2063         /* If the statement is an assignment that produces a single
2064            output value, evaluate its RHS to see if the lattice value of
2065            its output has changed.  */
2066         return visit_assignment (stmt, output_p);
2067 
2068       case GIMPLE_CALL:
2069         /* A value-returning call also performs an assignment.  */
2070         if (gimple_call_lhs (stmt) != NULL_TREE)
2071           return visit_assignment (stmt, output_p);
2072         break;
2073 
2074       case GIMPLE_COND:
2075       case GIMPLE_SWITCH:
2076         /* If STMT is a conditional branch, see if we can determine
2077            which branch will be taken.   */
2078         /* FIXME.  It appears that we should be able to optimize
2079            computed GOTOs here as well.  */
2080         return visit_cond_stmt (stmt, taken_edge_p);
2081 
2082       default:
2083         break;
2084     }
2085 
2086   /* Any other kind of statement is not interesting for constant
2087      propagation and, therefore, not worth simulating.  */
2088   if (dump_file && (dump_flags & TDF_DETAILS))
2089     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2090 
2091   /* Definitions made by statements other than assignments to
2092      SSA_NAMEs represent unknown modifications to their outputs.
2093      Mark them VARYING.  */
2094   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2095     {
2096       prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
2097       set_lattice_value (def, v);
2098     }
2099 
2100   return SSA_PROP_VARYING;
2101 }
2102 
2103 
2104 /* Main entry point for SSA Conditional Constant Propagation.  */
2105 
2106 static unsigned int
do_ssa_ccp(void)2107 do_ssa_ccp (void)
2108 {
2109   unsigned int todo = 0;
2110   calculate_dominance_info (CDI_DOMINATORS);
2111   ccp_initialize ();
2112   ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2113   if (ccp_finalize ())
2114     todo = (TODO_cleanup_cfg | TODO_update_ssa);
2115   free_dominance_info (CDI_DOMINATORS);
2116   return todo;
2117 }
2118 
2119 
2120 static bool
gate_ccp(void)2121 gate_ccp (void)
2122 {
2123   return flag_tree_ccp != 0;
2124 }
2125 
2126 
2127 struct gimple_opt_pass pass_ccp =
2128 {
2129  {
2130   GIMPLE_PASS,
2131   "ccp",				/* name */
2132   OPTGROUP_NONE,                        /* optinfo_flags */
2133   gate_ccp,				/* gate */
2134   do_ssa_ccp,				/* execute */
2135   NULL,					/* sub */
2136   NULL,					/* next */
2137   0,					/* static_pass_number */
2138   TV_TREE_CCP,				/* tv_id */
2139   PROP_cfg | PROP_ssa,			/* properties_required */
2140   0,					/* properties_provided */
2141   0,					/* properties_destroyed */
2142   0,					/* todo_flags_start */
2143   TODO_verify_ssa
2144   | TODO_update_address_taken
2145   | TODO_verify_stmts | TODO_ggc_collect/* todo_flags_finish */
2146  }
2147 };
2148 
2149 
2150 
2151 /* Try to optimize out __builtin_stack_restore.  Optimize it out
2152    if there is another __builtin_stack_restore in the same basic
2153    block and no calls or ASM_EXPRs are in between, or if this block's
2154    only outgoing edge is to EXIT_BLOCK and there are no calls or
2155    ASM_EXPRs after this __builtin_stack_restore.  */
2156 
2157 static tree
optimize_stack_restore(gimple_stmt_iterator i)2158 optimize_stack_restore (gimple_stmt_iterator i)
2159 {
2160   tree callee;
2161   gimple stmt;
2162 
2163   basic_block bb = gsi_bb (i);
2164   gimple call = gsi_stmt (i);
2165 
2166   if (gimple_code (call) != GIMPLE_CALL
2167       || gimple_call_num_args (call) != 1
2168       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2169       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2170     return NULL_TREE;
2171 
2172   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2173     {
2174       stmt = gsi_stmt (i);
2175       if (gimple_code (stmt) == GIMPLE_ASM)
2176 	return NULL_TREE;
2177       if (gimple_code (stmt) != GIMPLE_CALL)
2178 	continue;
2179 
2180       callee = gimple_call_fndecl (stmt);
2181       if (!callee
2182 	  || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2183 	  /* All regular builtins are ok, just obviously not alloca.  */
2184 	  || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2185 	  || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2186 	return NULL_TREE;
2187 
2188       if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2189 	goto second_stack_restore;
2190     }
2191 
2192   if (!gsi_end_p (i))
2193     return NULL_TREE;
2194 
2195   /* Allow one successor of the exit block, or zero successors.  */
2196   switch (EDGE_COUNT (bb->succs))
2197     {
2198     case 0:
2199       break;
2200     case 1:
2201       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR)
2202 	return NULL_TREE;
2203       break;
2204     default:
2205       return NULL_TREE;
2206     }
2207  second_stack_restore:
2208 
2209   /* If there's exactly one use, then zap the call to __builtin_stack_save.
2210      If there are multiple uses, then the last one should remove the call.
2211      In any case, whether the call to __builtin_stack_save can be removed
2212      or not is irrelevant to removing the call to __builtin_stack_restore.  */
2213   if (has_single_use (gimple_call_arg (call, 0)))
2214     {
2215       gimple stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2216       if (is_gimple_call (stack_save))
2217 	{
2218 	  callee = gimple_call_fndecl (stack_save);
2219 	  if (callee
2220 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2221 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2222 	    {
2223 	      gimple_stmt_iterator stack_save_gsi;
2224 	      tree rhs;
2225 
2226 	      stack_save_gsi = gsi_for_stmt (stack_save);
2227 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2228 	      update_call_from_tree (&stack_save_gsi, rhs);
2229 	    }
2230 	}
2231     }
2232 
2233   /* No effect, so the statement will be deleted.  */
2234   return integer_zero_node;
2235 }
2236 
2237 /* If va_list type is a simple pointer and nothing special is needed,
2238    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2239    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2240    pointer assignment.  */
2241 
2242 static tree
optimize_stdarg_builtin(gimple call)2243 optimize_stdarg_builtin (gimple call)
2244 {
2245   tree callee, lhs, rhs, cfun_va_list;
2246   bool va_list_simple_ptr;
2247   location_t loc = gimple_location (call);
2248 
2249   if (gimple_code (call) != GIMPLE_CALL)
2250     return NULL_TREE;
2251 
2252   callee = gimple_call_fndecl (call);
2253 
2254   cfun_va_list = targetm.fn_abi_va_list (callee);
2255   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2256 		       && (TREE_TYPE (cfun_va_list) == void_type_node
2257 			   || TREE_TYPE (cfun_va_list) == char_type_node);
2258 
2259   switch (DECL_FUNCTION_CODE (callee))
2260     {
2261     case BUILT_IN_VA_START:
2262       if (!va_list_simple_ptr
2263 	  || targetm.expand_builtin_va_start != NULL
2264 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2265 	return NULL_TREE;
2266 
2267       if (gimple_call_num_args (call) != 2)
2268 	return NULL_TREE;
2269 
2270       lhs = gimple_call_arg (call, 0);
2271       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2272 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2273 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2274 	return NULL_TREE;
2275 
2276       lhs = build_fold_indirect_ref_loc (loc, lhs);
2277       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2278                              1, integer_zero_node);
2279       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2280       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2281 
2282     case BUILT_IN_VA_COPY:
2283       if (!va_list_simple_ptr)
2284 	return NULL_TREE;
2285 
2286       if (gimple_call_num_args (call) != 2)
2287 	return NULL_TREE;
2288 
2289       lhs = gimple_call_arg (call, 0);
2290       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2291 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2292 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2293 	return NULL_TREE;
2294 
2295       lhs = build_fold_indirect_ref_loc (loc, lhs);
2296       rhs = gimple_call_arg (call, 1);
2297       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2298 	  != TYPE_MAIN_VARIANT (cfun_va_list))
2299 	return NULL_TREE;
2300 
2301       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2302       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2303 
2304     case BUILT_IN_VA_END:
2305       /* No effect, so the statement will be deleted.  */
2306       return integer_zero_node;
2307 
2308     default:
2309       gcc_unreachable ();
2310     }
2311 }
2312 
2313 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2314    the incoming jumps.  Return true if at least one jump was changed.  */
2315 
2316 static bool
optimize_unreachable(gimple_stmt_iterator i)2317 optimize_unreachable (gimple_stmt_iterator i)
2318 {
2319   basic_block bb = gsi_bb (i);
2320   gimple_stmt_iterator gsi;
2321   gimple stmt;
2322   edge_iterator ei;
2323   edge e;
2324   bool ret;
2325 
2326   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2327     {
2328       stmt = gsi_stmt (gsi);
2329 
2330       if (is_gimple_debug (stmt))
2331        continue;
2332 
2333       if (gimple_code (stmt) == GIMPLE_LABEL)
2334 	{
2335 	  /* Verify we do not need to preserve the label.  */
2336 	  if (FORCED_LABEL (gimple_label_label (stmt)))
2337 	    return false;
2338 
2339 	  continue;
2340 	}
2341 
2342       /* Only handle the case that __builtin_unreachable is the first statement
2343 	 in the block.  We rely on DCE to remove stmts without side-effects
2344 	 before __builtin_unreachable.  */
2345       if (gsi_stmt (gsi) != gsi_stmt (i))
2346         return false;
2347     }
2348 
2349   ret = false;
2350   FOR_EACH_EDGE (e, ei, bb->preds)
2351     {
2352       gsi = gsi_last_bb (e->src);
2353       if (gsi_end_p (gsi))
2354 	continue;
2355 
2356       stmt = gsi_stmt (gsi);
2357       if (gimple_code (stmt) == GIMPLE_COND)
2358 	{
2359 	  if (e->flags & EDGE_TRUE_VALUE)
2360 	    gimple_cond_make_false (stmt);
2361 	  else if (e->flags & EDGE_FALSE_VALUE)
2362 	    gimple_cond_make_true (stmt);
2363 	  else
2364 	    gcc_unreachable ();
2365 	  update_stmt (stmt);
2366 	}
2367       else
2368 	{
2369 	  /* Todo: handle other cases, f.i. switch statement.  */
2370 	  continue;
2371 	}
2372 
2373       ret = true;
2374     }
2375 
2376   return ret;
2377 }
2378 
2379 /* A simple pass that attempts to fold all builtin functions.  This pass
2380    is run after we've propagated as many constants as we can.  */
2381 
2382 static unsigned int
execute_fold_all_builtins(void)2383 execute_fold_all_builtins (void)
2384 {
2385   bool cfg_changed = false;
2386   basic_block bb;
2387   unsigned int todoflags = 0;
2388 
2389   FOR_EACH_BB (bb)
2390     {
2391       gimple_stmt_iterator i;
2392       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
2393 	{
2394           gimple stmt, old_stmt;
2395 	  tree callee, result;
2396 	  enum built_in_function fcode;
2397 
2398 	  stmt = gsi_stmt (i);
2399 
2400           if (gimple_code (stmt) != GIMPLE_CALL)
2401 	    {
2402 	      gsi_next (&i);
2403 	      continue;
2404 	    }
2405 	  callee = gimple_call_fndecl (stmt);
2406 	  if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
2407 	    {
2408 	      gsi_next (&i);
2409 	      continue;
2410 	    }
2411 	  fcode = DECL_FUNCTION_CODE (callee);
2412 
2413 	  result = gimple_fold_builtin (stmt);
2414 
2415 	  if (result)
2416 	    gimple_remove_stmt_histograms (cfun, stmt);
2417 
2418 	  if (!result)
2419 	    switch (DECL_FUNCTION_CODE (callee))
2420 	      {
2421 	      case BUILT_IN_CONSTANT_P:
2422 		/* Resolve __builtin_constant_p.  If it hasn't been
2423 		   folded to integer_one_node by now, it's fairly
2424 		   certain that the value simply isn't constant.  */
2425                 result = integer_zero_node;
2426 		break;
2427 
2428 	      case BUILT_IN_ASSUME_ALIGNED:
2429 		/* Remove __builtin_assume_aligned.  */
2430 		result = gimple_call_arg (stmt, 0);
2431 		break;
2432 
2433 	      case BUILT_IN_STACK_RESTORE:
2434 		result = optimize_stack_restore (i);
2435 		if (result)
2436 		  break;
2437 		gsi_next (&i);
2438 		continue;
2439 
2440 	      case BUILT_IN_UNREACHABLE:
2441 		if (optimize_unreachable (i))
2442 		  cfg_changed = true;
2443 		break;
2444 
2445 	      case BUILT_IN_VA_START:
2446 	      case BUILT_IN_VA_END:
2447 	      case BUILT_IN_VA_COPY:
2448 		/* These shouldn't be folded before pass_stdarg.  */
2449 		result = optimize_stdarg_builtin (stmt);
2450 		if (result)
2451 		  break;
2452 		/* FALLTHRU */
2453 
2454 	      default:
2455 		gsi_next (&i);
2456 		continue;
2457 	      }
2458 
2459 	  if (result == NULL_TREE)
2460 	    break;
2461 
2462 	  if (dump_file && (dump_flags & TDF_DETAILS))
2463 	    {
2464 	      fprintf (dump_file, "Simplified\n  ");
2465 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2466 	    }
2467 
2468           old_stmt = stmt;
2469           if (!update_call_from_tree (&i, result))
2470 	    {
2471 	      gimplify_and_update_call_from_tree (&i, result);
2472 	      todoflags |= TODO_update_address_taken;
2473 	    }
2474 
2475 	  stmt = gsi_stmt (i);
2476 	  update_stmt (stmt);
2477 
2478 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
2479 	      && gimple_purge_dead_eh_edges (bb))
2480 	    cfg_changed = true;
2481 
2482 	  if (dump_file && (dump_flags & TDF_DETAILS))
2483 	    {
2484 	      fprintf (dump_file, "to\n  ");
2485 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2486 	      fprintf (dump_file, "\n");
2487 	    }
2488 
2489 	  /* Retry the same statement if it changed into another
2490 	     builtin, there might be new opportunities now.  */
2491           if (gimple_code (stmt) != GIMPLE_CALL)
2492 	    {
2493 	      gsi_next (&i);
2494 	      continue;
2495 	    }
2496 	  callee = gimple_call_fndecl (stmt);
2497 	  if (!callee
2498               || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2499 	      || DECL_FUNCTION_CODE (callee) == fcode)
2500 	    gsi_next (&i);
2501 	}
2502     }
2503 
2504   /* Delete unreachable blocks.  */
2505   if (cfg_changed)
2506     todoflags |= TODO_cleanup_cfg;
2507 
2508   return todoflags;
2509 }
2510 
2511 
2512 struct gimple_opt_pass pass_fold_builtins =
2513 {
2514  {
2515   GIMPLE_PASS,
2516   "fab",				/* name */
2517   OPTGROUP_NONE,                        /* optinfo_flags */
2518   NULL,					/* gate */
2519   execute_fold_all_builtins,		/* execute */
2520   NULL,					/* sub */
2521   NULL,					/* next */
2522   0,					/* static_pass_number */
2523   TV_NONE,				/* tv_id */
2524   PROP_cfg | PROP_ssa,			/* properties_required */
2525   0,					/* properties_provided */
2526   0,					/* properties_destroyed */
2527   0,					/* todo_flags_start */
2528   TODO_verify_ssa
2529     | TODO_update_ssa			/* todo_flags_finish */
2530  }
2531 };
2532