1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2018 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    This algorithm uses wide-ints at the max precision of the target.
102    This means that, with one uninteresting exception, variables with
103    UNSIGNED types never go to VARYING because the bits above the
104    precision of the type of the variable are always zero.  The
105    uninteresting case is a variable of UNSIGNED type that has the
106    maximum precision of the target.  Such variables can go to VARYING,
107    but this causes no loss of infomation since these variables will
108    never be extended.
109 
110    References:
111 
112      Constant propagation with conditional branches,
113      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114 
115      Building an Optimizing Compiler,
116      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117 
118      Advanced Compiler Design and Implementation,
119      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
120 
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
142 #include "cfgloop.h"
143 #include "stor-layout.h"
144 #include "optabs-query.h"
145 #include "tree-ssa-ccp.h"
146 #include "tree-dfa.h"
147 #include "diagnostic-core.h"
148 #include "stringpool.h"
149 #include "attribs.h"
150 #include "tree-vector-builder.h"
151 
152 /* Possible lattice values.  */
153 typedef enum
154 {
155   UNINITIALIZED,
156   UNDEFINED,
157   CONSTANT,
158   VARYING
159 } ccp_lattice_t;
160 
161 struct ccp_prop_value_t {
162     /* Lattice value.  */
163     ccp_lattice_t lattice_val;
164 
165     /* Propagated value.  */
166     tree value;
167 
168     /* Mask that applies to the propagated value during CCP.  For X
169        with a CONSTANT lattice value X & ~mask == value & ~mask.  The
170        zero bits in the mask cover constant values.  The ones mean no
171        information.  */
172     widest_int mask;
173 };
174 
175 class ccp_propagate : public ssa_propagation_engine
176 {
177  public:
178   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
179   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
180 };
181 
182 /* Array of propagated constant values.  After propagation,
183    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
184    the constant is held in an SSA name representing a memory store
185    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
186    memory reference used to store (i.e., the LHS of the assignment
187    doing the store).  */
188 static ccp_prop_value_t *const_val;
189 static unsigned n_const_val;
190 
191 static void canonicalize_value (ccp_prop_value_t *);
192 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
193 
194 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
195 
196 static void
dump_lattice_value(FILE * outf,const char * prefix,ccp_prop_value_t val)197 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
198 {
199   switch (val.lattice_val)
200     {
201     case UNINITIALIZED:
202       fprintf (outf, "%sUNINITIALIZED", prefix);
203       break;
204     case UNDEFINED:
205       fprintf (outf, "%sUNDEFINED", prefix);
206       break;
207     case VARYING:
208       fprintf (outf, "%sVARYING", prefix);
209       break;
210     case CONSTANT:
211       if (TREE_CODE (val.value) != INTEGER_CST
212 	  || val.mask == 0)
213 	{
214 	  fprintf (outf, "%sCONSTANT ", prefix);
215 	  print_generic_expr (outf, val.value, dump_flags);
216 	}
217       else
218 	{
219 	  widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
220 					     val.mask);
221 	  fprintf (outf, "%sCONSTANT ", prefix);
222 	  print_hex (cval, outf);
223 	  fprintf (outf, " (");
224 	  print_hex (val.mask, outf);
225 	  fprintf (outf, ")");
226 	}
227       break;
228     default:
229       gcc_unreachable ();
230     }
231 }
232 
233 
234 /* Print lattice value VAL to stderr.  */
235 
236 void debug_lattice_value (ccp_prop_value_t val);
237 
238 DEBUG_FUNCTION void
debug_lattice_value(ccp_prop_value_t val)239 debug_lattice_value (ccp_prop_value_t val)
240 {
241   dump_lattice_value (stderr, "", val);
242   fprintf (stderr, "\n");
243 }
244 
245 /* Extend NONZERO_BITS to a full mask, based on sgn.  */
246 
247 static widest_int
extend_mask(const wide_int & nonzero_bits,signop sgn)248 extend_mask (const wide_int &nonzero_bits, signop sgn)
249 {
250   return widest_int::from (nonzero_bits, sgn);
251 }
252 
253 /* Compute a default value for variable VAR and store it in the
254    CONST_VAL array.  The following rules are used to get default
255    values:
256 
257    1- Global and static variables that are declared constant are
258       considered CONSTANT.
259 
260    2- Any other value is considered UNDEFINED.  This is useful when
261       considering PHI nodes.  PHI arguments that are undefined do not
262       change the constant value of the PHI node, which allows for more
263       constants to be propagated.
264 
265    3- Variables defined by statements other than assignments and PHI
266       nodes are considered VARYING.
267 
268    4- Initial values of variables that are not GIMPLE registers are
269       considered VARYING.  */
270 
271 static ccp_prop_value_t
get_default_value(tree var)272 get_default_value (tree var)
273 {
274   ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
275   gimple *stmt;
276 
277   stmt = SSA_NAME_DEF_STMT (var);
278 
279   if (gimple_nop_p (stmt))
280     {
281       /* Variables defined by an empty statement are those used
282 	 before being initialized.  If VAR is a local variable, we
283 	 can assume initially that it is UNDEFINED, otherwise we must
284 	 consider it VARYING.  */
285       if (!virtual_operand_p (var)
286 	  && SSA_NAME_VAR (var)
287 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
288 	val.lattice_val = UNDEFINED;
289       else
290 	{
291 	  val.lattice_val = VARYING;
292 	  val.mask = -1;
293 	  if (flag_tree_bit_ccp)
294 	    {
295 	      wide_int nonzero_bits = get_nonzero_bits (var);
296 	      if (nonzero_bits != -1)
297 		{
298 		  val.lattice_val = CONSTANT;
299 		  val.value = build_zero_cst (TREE_TYPE (var));
300 		  val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
301 		}
302 	    }
303 	}
304     }
305   else if (is_gimple_assign (stmt))
306     {
307       tree cst;
308       if (gimple_assign_single_p (stmt)
309 	  && DECL_P (gimple_assign_rhs1 (stmt))
310 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
311 	{
312 	  val.lattice_val = CONSTANT;
313 	  val.value = cst;
314 	}
315       else
316 	{
317 	  /* Any other variable defined by an assignment is considered
318 	     UNDEFINED.  */
319 	  val.lattice_val = UNDEFINED;
320 	}
321     }
322   else if ((is_gimple_call (stmt)
323 	    && gimple_call_lhs (stmt) != NULL_TREE)
324 	   || gimple_code (stmt) == GIMPLE_PHI)
325     {
326       /* A variable defined by a call or a PHI node is considered
327 	 UNDEFINED.  */
328       val.lattice_val = UNDEFINED;
329     }
330   else
331     {
332       /* Otherwise, VAR will never take on a constant value.  */
333       val.lattice_val = VARYING;
334       val.mask = -1;
335     }
336 
337   return val;
338 }
339 
340 
341 /* Get the constant value associated with variable VAR.  */
342 
343 static inline ccp_prop_value_t *
get_value(tree var)344 get_value (tree var)
345 {
346   ccp_prop_value_t *val;
347 
348   if (const_val == NULL
349       || SSA_NAME_VERSION (var) >= n_const_val)
350     return NULL;
351 
352   val = &const_val[SSA_NAME_VERSION (var)];
353   if (val->lattice_val == UNINITIALIZED)
354     *val = get_default_value (var);
355 
356   canonicalize_value (val);
357 
358   return val;
359 }
360 
361 /* Return the constant tree value associated with VAR.  */
362 
363 static inline tree
get_constant_value(tree var)364 get_constant_value (tree var)
365 {
366   ccp_prop_value_t *val;
367   if (TREE_CODE (var) != SSA_NAME)
368     {
369       if (is_gimple_min_invariant (var))
370         return var;
371       return NULL_TREE;
372     }
373   val = get_value (var);
374   if (val
375       && val->lattice_val == CONSTANT
376       && (TREE_CODE (val->value) != INTEGER_CST
377 	  || val->mask == 0))
378     return val->value;
379   return NULL_TREE;
380 }
381 
382 /* Sets the value associated with VAR to VARYING.  */
383 
384 static inline void
set_value_varying(tree var)385 set_value_varying (tree var)
386 {
387   ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
388 
389   val->lattice_val = VARYING;
390   val->value = NULL_TREE;
391   val->mask = -1;
392 }
393 
394 /* For integer constants, make sure to drop TREE_OVERFLOW.  */
395 
396 static void
canonicalize_value(ccp_prop_value_t * val)397 canonicalize_value (ccp_prop_value_t *val)
398 {
399   if (val->lattice_val != CONSTANT)
400     return;
401 
402   if (TREE_OVERFLOW_P (val->value))
403     val->value = drop_tree_overflow (val->value);
404 }
405 
406 /* Return whether the lattice transition is valid.  */
407 
408 static bool
valid_lattice_transition(ccp_prop_value_t old_val,ccp_prop_value_t new_val)409 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
410 {
411   /* Lattice transitions must always be monotonically increasing in
412      value.  */
413   if (old_val.lattice_val < new_val.lattice_val)
414     return true;
415 
416   if (old_val.lattice_val != new_val.lattice_val)
417     return false;
418 
419   if (!old_val.value && !new_val.value)
420     return true;
421 
422   /* Now both lattice values are CONSTANT.  */
423 
424   /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
425      when only a single copy edge is executable.  */
426   if (TREE_CODE (old_val.value) == SSA_NAME
427       && TREE_CODE (new_val.value) == SSA_NAME)
428     return true;
429 
430   /* Allow transitioning from a constant to a copy.  */
431   if (is_gimple_min_invariant (old_val.value)
432       && TREE_CODE (new_val.value) == SSA_NAME)
433     return true;
434 
435   /* Allow transitioning from PHI <&x, not executable> == &x
436      to PHI <&x, &y> == common alignment.  */
437   if (TREE_CODE (old_val.value) != INTEGER_CST
438       && TREE_CODE (new_val.value) == INTEGER_CST)
439     return true;
440 
441   /* Bit-lattices have to agree in the still valid bits.  */
442   if (TREE_CODE (old_val.value) == INTEGER_CST
443       && TREE_CODE (new_val.value) == INTEGER_CST)
444     return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
445 	    == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
446 
447   /* Otherwise constant values have to agree.  */
448   if (operand_equal_p (old_val.value, new_val.value, 0))
449     return true;
450 
451   /* At least the kinds and types should agree now.  */
452   if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
453       || !types_compatible_p (TREE_TYPE (old_val.value),
454 			      TREE_TYPE (new_val.value)))
455     return false;
456 
457   /* For floats and !HONOR_NANS allow transitions from (partial) NaN
458      to non-NaN.  */
459   tree type = TREE_TYPE (new_val.value);
460   if (SCALAR_FLOAT_TYPE_P (type)
461       && !HONOR_NANS (type))
462     {
463       if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
464 	return true;
465     }
466   else if (VECTOR_FLOAT_TYPE_P (type)
467 	   && !HONOR_NANS (type))
468     {
469       unsigned int count
470 	= tree_vector_builder::binary_encoded_nelts (old_val.value,
471 						     new_val.value);
472       for (unsigned int i = 0; i < count; ++i)
473 	if (!REAL_VALUE_ISNAN
474 	       (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
475 	    && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
476 				 VECTOR_CST_ENCODED_ELT (new_val.value, i), 0))
477 	  return false;
478       return true;
479     }
480   else if (COMPLEX_FLOAT_TYPE_P (type)
481 	   && !HONOR_NANS (type))
482     {
483       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
484 	  && !operand_equal_p (TREE_REALPART (old_val.value),
485 			       TREE_REALPART (new_val.value), 0))
486 	return false;
487       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
488 	  && !operand_equal_p (TREE_IMAGPART (old_val.value),
489 			       TREE_IMAGPART (new_val.value), 0))
490 	return false;
491       return true;
492     }
493   return false;
494 }
495 
496 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
497    value is different from VAR's previous value.  */
498 
499 static bool
set_lattice_value(tree var,ccp_prop_value_t * new_val)500 set_lattice_value (tree var, ccp_prop_value_t *new_val)
501 {
502   /* We can deal with old UNINITIALIZED values just fine here.  */
503   ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
504 
505   canonicalize_value (new_val);
506 
507   /* We have to be careful to not go up the bitwise lattice
508      represented by the mask.  Instead of dropping to VARYING
509      use the meet operator to retain a conservative value.
510      Missed optimizations like PR65851 makes this necessary.
511      It also ensures we converge to a stable lattice solution.  */
512   if (old_val->lattice_val != UNINITIALIZED)
513     ccp_lattice_meet (new_val, old_val);
514 
515   gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
516 
517   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
518      caller that this was a non-transition.  */
519   if (old_val->lattice_val != new_val->lattice_val
520       || (new_val->lattice_val == CONSTANT
521 	  && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
522 	      || (TREE_CODE (new_val->value) == INTEGER_CST
523 		  && (new_val->mask != old_val->mask
524 		      || (wi::bit_and_not (wi::to_widest (old_val->value),
525 					   new_val->mask)
526 			  != wi::bit_and_not (wi::to_widest (new_val->value),
527 					      new_val->mask))))
528 	      || (TREE_CODE (new_val->value) != INTEGER_CST
529 		  && !operand_equal_p (new_val->value, old_val->value, 0)))))
530     {
531       /* ???  We would like to delay creation of INTEGER_CSTs from
532 	 partially constants here.  */
533 
534       if (dump_file && (dump_flags & TDF_DETAILS))
535 	{
536 	  dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
537 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
538 	}
539 
540       *old_val = *new_val;
541 
542       gcc_assert (new_val->lattice_val != UNINITIALIZED);
543       return true;
544     }
545 
546   return false;
547 }
548 
549 static ccp_prop_value_t get_value_for_expr (tree, bool);
550 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
551 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
552 		      signop, int, const widest_int &, const widest_int &,
553 		      signop, int, const widest_int &, const widest_int &);
554 
555 /* Return a widest_int that can be used for bitwise simplifications
556    from VAL.  */
557 
558 static widest_int
value_to_wide_int(ccp_prop_value_t val)559 value_to_wide_int (ccp_prop_value_t val)
560 {
561   if (val.value
562       && TREE_CODE (val.value) == INTEGER_CST)
563     return wi::to_widest (val.value);
564 
565   return 0;
566 }
567 
568 /* Return the value for the address expression EXPR based on alignment
569    information.  */
570 
571 static ccp_prop_value_t
get_value_from_alignment(tree expr)572 get_value_from_alignment (tree expr)
573 {
574   tree type = TREE_TYPE (expr);
575   ccp_prop_value_t val;
576   unsigned HOST_WIDE_INT bitpos;
577   unsigned int align;
578 
579   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
580 
581   get_pointer_alignment_1 (expr, &align, &bitpos);
582   val.mask = wi::bit_and_not
583     (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
584      ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
585      : -1,
586      align / BITS_PER_UNIT - 1);
587   val.lattice_val
588     = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
589   if (val.lattice_val == CONSTANT)
590     val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
591   else
592     val.value = NULL_TREE;
593 
594   return val;
595 }
596 
597 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
598    return constant bits extracted from alignment information for
599    invariant addresses.  */
600 
601 static ccp_prop_value_t
get_value_for_expr(tree expr,bool for_bits_p)602 get_value_for_expr (tree expr, bool for_bits_p)
603 {
604   ccp_prop_value_t val;
605 
606   if (TREE_CODE (expr) == SSA_NAME)
607     {
608       ccp_prop_value_t *val_ = get_value (expr);
609       if (val_)
610 	val = *val_;
611       else
612 	{
613 	  val.lattice_val = VARYING;
614 	  val.value = NULL_TREE;
615 	  val.mask = -1;
616 	}
617       if (for_bits_p
618 	  && val.lattice_val == CONSTANT
619 	  && TREE_CODE (val.value) == ADDR_EXPR)
620 	val = get_value_from_alignment (val.value);
621       /* Fall back to a copy value.  */
622       if (!for_bits_p
623 	  && val.lattice_val == VARYING
624 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
625 	{
626 	  val.lattice_val = CONSTANT;
627 	  val.value = expr;
628 	  val.mask = -1;
629 	}
630     }
631   else if (is_gimple_min_invariant (expr)
632 	   && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
633     {
634       val.lattice_val = CONSTANT;
635       val.value = expr;
636       val.mask = 0;
637       canonicalize_value (&val);
638     }
639   else if (TREE_CODE (expr) == ADDR_EXPR)
640     val = get_value_from_alignment (expr);
641   else
642     {
643       val.lattice_val = VARYING;
644       val.mask = -1;
645       val.value = NULL_TREE;
646     }
647 
648   if (val.lattice_val == VARYING
649       && TYPE_UNSIGNED (TREE_TYPE (expr)))
650     val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
651 
652   return val;
653 }
654 
655 /* Return the likely CCP lattice value for STMT.
656 
657    If STMT has no operands, then return CONSTANT.
658 
659    Else if undefinedness of operands of STMT cause its value to be
660    undefined, then return UNDEFINED.
661 
662    Else if any operands of STMT are constants, then return CONSTANT.
663 
664    Else return VARYING.  */
665 
666 static ccp_lattice_t
likely_value(gimple * stmt)667 likely_value (gimple *stmt)
668 {
669   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
670   bool has_nsa_operand;
671   tree use;
672   ssa_op_iter iter;
673   unsigned i;
674 
675   enum gimple_code code = gimple_code (stmt);
676 
677   /* This function appears to be called only for assignments, calls,
678      conditionals, and switches, due to the logic in visit_stmt.  */
679   gcc_assert (code == GIMPLE_ASSIGN
680               || code == GIMPLE_CALL
681               || code == GIMPLE_COND
682               || code == GIMPLE_SWITCH);
683 
684   /* If the statement has volatile operands, it won't fold to a
685      constant value.  */
686   if (gimple_has_volatile_ops (stmt))
687     return VARYING;
688 
689   /* Arrive here for more complex cases.  */
690   has_constant_operand = false;
691   has_undefined_operand = false;
692   all_undefined_operands = true;
693   has_nsa_operand = false;
694   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
695     {
696       ccp_prop_value_t *val = get_value (use);
697 
698       if (val && val->lattice_val == UNDEFINED)
699 	has_undefined_operand = true;
700       else
701 	all_undefined_operands = false;
702 
703       if (val && val->lattice_val == CONSTANT)
704 	has_constant_operand = true;
705 
706       if (SSA_NAME_IS_DEFAULT_DEF (use)
707 	  || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
708 	has_nsa_operand = true;
709     }
710 
711   /* There may be constants in regular rhs operands.  For calls we
712      have to ignore lhs, fndecl and static chain, otherwise only
713      the lhs.  */
714   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
715        i < gimple_num_ops (stmt); ++i)
716     {
717       tree op = gimple_op (stmt, i);
718       if (!op || TREE_CODE (op) == SSA_NAME)
719 	continue;
720       if (is_gimple_min_invariant (op))
721 	has_constant_operand = true;
722     }
723 
724   if (has_constant_operand)
725     all_undefined_operands = false;
726 
727   if (has_undefined_operand
728       && code == GIMPLE_CALL
729       && gimple_call_internal_p (stmt))
730     switch (gimple_call_internal_fn (stmt))
731       {
732 	/* These 3 builtins use the first argument just as a magic
733 	   way how to find out a decl uid.  */
734       case IFN_GOMP_SIMD_LANE:
735       case IFN_GOMP_SIMD_VF:
736       case IFN_GOMP_SIMD_LAST_LANE:
737 	has_undefined_operand = false;
738 	break;
739       default:
740 	break;
741       }
742 
743   /* If the operation combines operands like COMPLEX_EXPR make sure to
744      not mark the result UNDEFINED if only one part of the result is
745      undefined.  */
746   if (has_undefined_operand && all_undefined_operands)
747     return UNDEFINED;
748   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
749     {
750       switch (gimple_assign_rhs_code (stmt))
751 	{
752 	/* Unary operators are handled with all_undefined_operands.  */
753 	case PLUS_EXPR:
754 	case MINUS_EXPR:
755 	case POINTER_PLUS_EXPR:
756 	case BIT_XOR_EXPR:
757 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
758 	     Not bitwise operators, one VARYING operand may specify the
759 	     result completely.
760 	     Not logical operators for the same reason, apart from XOR.
761 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
762 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
763 	     the undefined operand may be promoted.  */
764 	  return UNDEFINED;
765 
766 	case ADDR_EXPR:
767 	  /* If any part of an address is UNDEFINED, like the index
768 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
769 	  return UNDEFINED;
770 
771 	default:
772 	  ;
773 	}
774     }
775   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
776      fall back to CONSTANT.  During iteration UNDEFINED may still drop
777      to CONSTANT.  */
778   if (has_undefined_operand)
779     return CONSTANT;
780 
781   /* We do not consider virtual operands here -- load from read-only
782      memory may have only VARYING virtual operands, but still be
783      constant.  Also we can combine the stmt with definitions from
784      operands whose definitions are not simulated again.  */
785   if (has_constant_operand
786       || has_nsa_operand
787       || gimple_references_memory_p (stmt))
788     return CONSTANT;
789 
790   return VARYING;
791 }
792 
793 /* Returns true if STMT cannot be constant.  */
794 
795 static bool
surely_varying_stmt_p(gimple * stmt)796 surely_varying_stmt_p (gimple *stmt)
797 {
798   /* If the statement has operands that we cannot handle, it cannot be
799      constant.  */
800   if (gimple_has_volatile_ops (stmt))
801     return true;
802 
803   /* If it is a call and does not return a value or is not a
804      builtin and not an indirect call or a call to function with
805      assume_aligned/alloc_align attribute, it is varying.  */
806   if (is_gimple_call (stmt))
807     {
808       tree fndecl, fntype = gimple_call_fntype (stmt);
809       if (!gimple_call_lhs (stmt)
810 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
811 	      && !DECL_BUILT_IN (fndecl)
812 	      && !lookup_attribute ("assume_aligned",
813 				    TYPE_ATTRIBUTES (fntype))
814 	      && !lookup_attribute ("alloc_align",
815 				    TYPE_ATTRIBUTES (fntype))))
816 	return true;
817     }
818 
819   /* Any other store operation is not interesting.  */
820   else if (gimple_vdef (stmt))
821     return true;
822 
823   /* Anything other than assignments and conditional jumps are not
824      interesting for CCP.  */
825   if (gimple_code (stmt) != GIMPLE_ASSIGN
826       && gimple_code (stmt) != GIMPLE_COND
827       && gimple_code (stmt) != GIMPLE_SWITCH
828       && gimple_code (stmt) != GIMPLE_CALL)
829     return true;
830 
831   return false;
832 }
833 
834 /* Initialize local data structures for CCP.  */
835 
836 static void
ccp_initialize(void)837 ccp_initialize (void)
838 {
839   basic_block bb;
840 
841   n_const_val = num_ssa_names;
842   const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
843 
844   /* Initialize simulation flags for PHI nodes and statements.  */
845   FOR_EACH_BB_FN (bb, cfun)
846     {
847       gimple_stmt_iterator i;
848 
849       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
850         {
851 	  gimple *stmt = gsi_stmt (i);
852 	  bool is_varying;
853 
854 	  /* If the statement is a control insn, then we do not
855 	     want to avoid simulating the statement once.  Failure
856 	     to do so means that those edges will never get added.  */
857 	  if (stmt_ends_bb_p (stmt))
858 	    is_varying = false;
859 	  else
860 	    is_varying = surely_varying_stmt_p (stmt);
861 
862 	  if (is_varying)
863 	    {
864 	      tree def;
865 	      ssa_op_iter iter;
866 
867 	      /* If the statement will not produce a constant, mark
868 		 all its outputs VARYING.  */
869 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
870 		set_value_varying (def);
871 	    }
872           prop_set_simulate_again (stmt, !is_varying);
873 	}
874     }
875 
876   /* Now process PHI nodes.  We never clear the simulate_again flag on
877      phi nodes, since we do not know which edges are executable yet,
878      except for phi nodes for virtual operands when we do not do store ccp.  */
879   FOR_EACH_BB_FN (bb, cfun)
880     {
881       gphi_iterator i;
882 
883       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
884         {
885           gphi *phi = i.phi ();
886 
887 	  if (virtual_operand_p (gimple_phi_result (phi)))
888             prop_set_simulate_again (phi, false);
889 	  else
890             prop_set_simulate_again (phi, true);
891 	}
892     }
893 }
894 
895 /* Debug count support. Reset the values of ssa names
896    VARYING when the total number ssa names analyzed is
897    beyond the debug count specified.  */
898 
899 static void
do_dbg_cnt(void)900 do_dbg_cnt (void)
901 {
902   unsigned i;
903   for (i = 0; i < num_ssa_names; i++)
904     {
905       if (!dbg_cnt (ccp))
906         {
907           const_val[i].lattice_val = VARYING;
908 	  const_val[i].mask = -1;
909           const_val[i].value = NULL_TREE;
910         }
911     }
912 }
913 
914 
915 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods.  */
916 class ccp_folder : public substitute_and_fold_engine
917 {
918  public:
919   tree get_value (tree) FINAL OVERRIDE;
920   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
921 };
922 
923 /* This method just wraps GET_CONSTANT_VALUE for now.  Over time
924    naked calls to GET_CONSTANT_VALUE should be eliminated in favor
925    of calling member functions.  */
926 
927 tree
get_value(tree op)928 ccp_folder::get_value (tree op)
929 {
930   return get_constant_value (op);
931 }
932 
933 /* Do final substitution of propagated values, cleanup the flowgraph and
934    free allocated storage.  If NONZERO_P, record nonzero bits.
935 
936    Return TRUE when something was optimized.  */
937 
938 static bool
ccp_finalize(bool nonzero_p)939 ccp_finalize (bool nonzero_p)
940 {
941   bool something_changed;
942   unsigned i;
943   tree name;
944 
945   do_dbg_cnt ();
946 
947   /* Derive alignment and misalignment information from partially
948      constant pointers in the lattice or nonzero bits from partially
949      constant integers.  */
950   FOR_EACH_SSA_NAME (i, name, cfun)
951     {
952       ccp_prop_value_t *val;
953       unsigned int tem, align;
954 
955       if (!POINTER_TYPE_P (TREE_TYPE (name))
956 	  && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
957 	      /* Don't record nonzero bits before IPA to avoid
958 		 using too much memory.  */
959 	      || !nonzero_p))
960 	continue;
961 
962       val = get_value (name);
963       if (val->lattice_val != CONSTANT
964 	  || TREE_CODE (val->value) != INTEGER_CST
965 	  || val->mask == 0)
966 	continue;
967 
968       if (POINTER_TYPE_P (TREE_TYPE (name)))
969 	{
970 	  /* Trailing mask bits specify the alignment, trailing value
971 	     bits the misalignment.  */
972 	  tem = val->mask.to_uhwi ();
973 	  align = least_bit_hwi (tem);
974 	  if (align > 1)
975 	    set_ptr_info_alignment (get_ptr_info (name), align,
976 				    (TREE_INT_CST_LOW (val->value)
977 				     & (align - 1)));
978 	}
979       else
980 	{
981 	  unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
982 	  wide_int nonzero_bits
983 	    = (wide_int::from (val->mask, precision, UNSIGNED)
984 	       | wi::to_wide (val->value));
985 	  nonzero_bits &= get_nonzero_bits (name);
986 	  set_nonzero_bits (name, nonzero_bits);
987 	}
988     }
989 
990   /* Perform substitutions based on the known constant values.  */
991   class ccp_folder ccp_folder;
992   something_changed = ccp_folder.substitute_and_fold ();
993 
994   free (const_val);
995   const_val = NULL;
996   return something_changed;
997 }
998 
999 
1000 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
1001    in VAL1.
1002 
1003    		any  M UNDEFINED   = any
1004 		any  M VARYING     = VARYING
1005 		Ci   M Cj	   = Ci		if (i == j)
1006 		Ci   M Cj	   = VARYING	if (i != j)
1007    */
1008 
1009 static void
ccp_lattice_meet(ccp_prop_value_t * val1,ccp_prop_value_t * val2)1010 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1011 {
1012   if (val1->lattice_val == UNDEFINED
1013       /* For UNDEFINED M SSA we can't always SSA because its definition
1014          may not dominate the PHI node.  Doing optimistic copy propagation
1015 	 also causes a lot of gcc.dg/uninit-pred*.c FAILs.  */
1016       && (val2->lattice_val != CONSTANT
1017 	  || TREE_CODE (val2->value) != SSA_NAME))
1018     {
1019       /* UNDEFINED M any = any   */
1020       *val1 = *val2;
1021     }
1022   else if (val2->lattice_val == UNDEFINED
1023 	   /* See above.  */
1024 	   && (val1->lattice_val != CONSTANT
1025 	       || TREE_CODE (val1->value) != SSA_NAME))
1026     {
1027       /* any M UNDEFINED = any
1028          Nothing to do.  VAL1 already contains the value we want.  */
1029       ;
1030     }
1031   else if (val1->lattice_val == VARYING
1032            || val2->lattice_val == VARYING)
1033     {
1034       /* any M VARYING = VARYING.  */
1035       val1->lattice_val = VARYING;
1036       val1->mask = -1;
1037       val1->value = NULL_TREE;
1038     }
1039   else if (val1->lattice_val == CONSTANT
1040 	   && val2->lattice_val == CONSTANT
1041 	   && TREE_CODE (val1->value) == INTEGER_CST
1042 	   && TREE_CODE (val2->value) == INTEGER_CST)
1043     {
1044       /* Ci M Cj = Ci		if (i == j)
1045 	 Ci M Cj = VARYING	if (i != j)
1046 
1047          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
1048 	 drop to varying.  */
1049       val1->mask = (val1->mask | val2->mask
1050 		    | (wi::to_widest (val1->value)
1051 		       ^ wi::to_widest (val2->value)));
1052       if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1053 	{
1054 	  val1->lattice_val = VARYING;
1055 	  val1->value = NULL_TREE;
1056 	}
1057     }
1058   else if (val1->lattice_val == CONSTANT
1059 	   && val2->lattice_val == CONSTANT
1060 	   && operand_equal_p (val1->value, val2->value, 0))
1061     {
1062       /* Ci M Cj = Ci		if (i == j)
1063 	 Ci M Cj = VARYING	if (i != j)
1064 
1065          VAL1 already contains the value we want for equivalent values.  */
1066     }
1067   else if (val1->lattice_val == CONSTANT
1068 	   && val2->lattice_val == CONSTANT
1069 	   && (TREE_CODE (val1->value) == ADDR_EXPR
1070 	       || TREE_CODE (val2->value) == ADDR_EXPR))
1071     {
1072       /* When not equal addresses are involved try meeting for
1073 	 alignment.  */
1074       ccp_prop_value_t tem = *val2;
1075       if (TREE_CODE (val1->value) == ADDR_EXPR)
1076 	*val1 = get_value_for_expr (val1->value, true);
1077       if (TREE_CODE (val2->value) == ADDR_EXPR)
1078 	tem = get_value_for_expr (val2->value, true);
1079       ccp_lattice_meet (val1, &tem);
1080     }
1081   else
1082     {
1083       /* Any other combination is VARYING.  */
1084       val1->lattice_val = VARYING;
1085       val1->mask = -1;
1086       val1->value = NULL_TREE;
1087     }
1088 }
1089 
1090 
1091 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1092    lattice values to determine PHI_NODE's lattice value.  The value of a
1093    PHI node is determined calling ccp_lattice_meet with all the arguments
1094    of the PHI node that are incoming via executable edges.  */
1095 
1096 enum ssa_prop_result
visit_phi(gphi * phi)1097 ccp_propagate::visit_phi (gphi *phi)
1098 {
1099   unsigned i;
1100   ccp_prop_value_t new_val;
1101 
1102   if (dump_file && (dump_flags & TDF_DETAILS))
1103     {
1104       fprintf (dump_file, "\nVisiting PHI node: ");
1105       print_gimple_stmt (dump_file, phi, 0, dump_flags);
1106     }
1107 
1108   new_val.lattice_val = UNDEFINED;
1109   new_val.value = NULL_TREE;
1110   new_val.mask = 0;
1111 
1112   bool first = true;
1113   bool non_exec_edge = false;
1114   for (i = 0; i < gimple_phi_num_args (phi); i++)
1115     {
1116       /* Compute the meet operator over all the PHI arguments flowing
1117 	 through executable edges.  */
1118       edge e = gimple_phi_arg_edge (phi, i);
1119 
1120       if (dump_file && (dump_flags & TDF_DETAILS))
1121 	{
1122 	  fprintf (dump_file,
1123 	      "\tArgument #%d (%d -> %d %sexecutable)\n",
1124 	      i, e->src->index, e->dest->index,
1125 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1126 	}
1127 
1128       /* If the incoming edge is executable, Compute the meet operator for
1129 	 the existing value of the PHI node and the current PHI argument.  */
1130       if (e->flags & EDGE_EXECUTABLE)
1131 	{
1132 	  tree arg = gimple_phi_arg (phi, i)->def;
1133 	  ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1134 
1135 	  if (first)
1136 	    {
1137 	      new_val = arg_val;
1138 	      first = false;
1139 	    }
1140 	  else
1141 	    ccp_lattice_meet (&new_val, &arg_val);
1142 
1143 	  if (dump_file && (dump_flags & TDF_DETAILS))
1144 	    {
1145 	      fprintf (dump_file, "\t");
1146 	      print_generic_expr (dump_file, arg, dump_flags);
1147 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
1148 	      fprintf (dump_file, "\n");
1149 	    }
1150 
1151 	  if (new_val.lattice_val == VARYING)
1152 	    break;
1153 	}
1154       else
1155 	non_exec_edge = true;
1156     }
1157 
1158   /* In case there were non-executable edges and the value is a copy
1159      make sure its definition dominates the PHI node.  */
1160   if (non_exec_edge
1161       && new_val.lattice_val == CONSTANT
1162       && TREE_CODE (new_val.value) == SSA_NAME
1163       && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1164       && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1165 			   gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1166     {
1167       new_val.lattice_val = VARYING;
1168       new_val.value = NULL_TREE;
1169       new_val.mask = -1;
1170     }
1171 
1172   if (dump_file && (dump_flags & TDF_DETAILS))
1173     {
1174       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
1175       fprintf (dump_file, "\n\n");
1176     }
1177 
1178   /* Make the transition to the new value.  */
1179   if (set_lattice_value (gimple_phi_result (phi), &new_val))
1180     {
1181       if (new_val.lattice_val == VARYING)
1182 	return SSA_PROP_VARYING;
1183       else
1184 	return SSA_PROP_INTERESTING;
1185     }
1186   else
1187     return SSA_PROP_NOT_INTERESTING;
1188 }
1189 
1190 /* Return the constant value for OP or OP otherwise.  */
1191 
1192 static tree
valueize_op(tree op)1193 valueize_op (tree op)
1194 {
1195   if (TREE_CODE (op) == SSA_NAME)
1196     {
1197       tree tem = get_constant_value (op);
1198       if (tem)
1199 	return tem;
1200     }
1201   return op;
1202 }
1203 
1204 /* Return the constant value for OP, but signal to not follow SSA
1205    edges if the definition may be simulated again.  */
1206 
1207 static tree
valueize_op_1(tree op)1208 valueize_op_1 (tree op)
1209 {
1210   if (TREE_CODE (op) == SSA_NAME)
1211     {
1212       /* If the definition may be simulated again we cannot follow
1213          this SSA edge as the SSA propagator does not necessarily
1214 	 re-visit the use.  */
1215       gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1216       if (!gimple_nop_p (def_stmt)
1217 	  && prop_simulate_again_p (def_stmt))
1218 	return NULL_TREE;
1219       tree tem = get_constant_value (op);
1220       if (tem)
1221 	return tem;
1222     }
1223   return op;
1224 }
1225 
1226 /* CCP specific front-end to the non-destructive constant folding
1227    routines.
1228 
1229    Attempt to simplify the RHS of STMT knowing that one or more
1230    operands are constants.
1231 
1232    If simplification is possible, return the simplified RHS,
1233    otherwise return the original RHS or NULL_TREE.  */
1234 
1235 static tree
ccp_fold(gimple * stmt)1236 ccp_fold (gimple *stmt)
1237 {
1238   location_t loc = gimple_location (stmt);
1239   switch (gimple_code (stmt))
1240     {
1241     case GIMPLE_COND:
1242       {
1243         /* Handle comparison operators that can appear in GIMPLE form.  */
1244         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1245         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1246         enum tree_code code = gimple_cond_code (stmt);
1247         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1248       }
1249 
1250     case GIMPLE_SWITCH:
1251       {
1252 	/* Return the constant switch index.  */
1253         return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1254       }
1255 
1256     case GIMPLE_ASSIGN:
1257     case GIMPLE_CALL:
1258       return gimple_fold_stmt_to_constant_1 (stmt,
1259 					     valueize_op, valueize_op_1);
1260 
1261     default:
1262       gcc_unreachable ();
1263     }
1264 }
1265 
1266 /* Apply the operation CODE in type TYPE to the value, mask pair
1267    RVAL and RMASK representing a value of type RTYPE and set
1268    the value, mask pair *VAL and *MASK to the result.  */
1269 
1270 void
bit_value_unop(enum tree_code code,signop type_sgn,int type_precision,widest_int * val,widest_int * mask,signop rtype_sgn,int rtype_precision,const widest_int & rval,const widest_int & rmask)1271 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1272 		widest_int *val, widest_int *mask,
1273 		signop rtype_sgn, int rtype_precision,
1274 		const widest_int &rval, const widest_int &rmask)
1275 {
1276   switch (code)
1277     {
1278     case BIT_NOT_EXPR:
1279       *mask = rmask;
1280       *val = ~rval;
1281       break;
1282 
1283     case NEGATE_EXPR:
1284       {
1285 	widest_int temv, temm;
1286 	/* Return ~rval + 1.  */
1287 	bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1288 			type_sgn, type_precision, rval, rmask);
1289 	bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1290 			 type_sgn, type_precision, temv, temm,
1291 			 type_sgn, type_precision, 1, 0);
1292 	break;
1293       }
1294 
1295     CASE_CONVERT:
1296       {
1297 	/* First extend mask and value according to the original type.  */
1298 	*mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1299 	*val = wi::ext (rval, rtype_precision, rtype_sgn);
1300 
1301 	/* Then extend mask and value according to the target type.  */
1302 	*mask = wi::ext (*mask, type_precision, type_sgn);
1303 	*val = wi::ext (*val, type_precision, type_sgn);
1304 	break;
1305       }
1306 
1307     default:
1308       *mask = -1;
1309       break;
1310     }
1311 }
1312 
1313 /* Apply the operation CODE in type TYPE to the value, mask pairs
1314    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1315    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1316 
1317 void
bit_value_binop(enum tree_code code,signop sgn,int width,widest_int * val,widest_int * mask,signop r1type_sgn,int r1type_precision,const widest_int & r1val,const widest_int & r1mask,signop r2type_sgn,int r2type_precision,const widest_int & r2val,const widest_int & r2mask)1318 bit_value_binop (enum tree_code code, signop sgn, int width,
1319 		 widest_int *val, widest_int *mask,
1320 		 signop r1type_sgn, int r1type_precision,
1321 		 const widest_int &r1val, const widest_int &r1mask,
1322 		 signop r2type_sgn, int r2type_precision,
1323 		 const widest_int &r2val, const widest_int &r2mask)
1324 {
1325   bool swap_p = false;
1326 
1327   /* Assume we'll get a constant result.  Use an initial non varying
1328      value, we fall back to varying in the end if necessary.  */
1329   *mask = -1;
1330 
1331   switch (code)
1332     {
1333     case BIT_AND_EXPR:
1334       /* The mask is constant where there is a known not
1335 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1336       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1337       *val = r1val & r2val;
1338       break;
1339 
1340     case BIT_IOR_EXPR:
1341       /* The mask is constant where there is a known
1342 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1343       *mask = wi::bit_and_not (r1mask | r2mask,
1344 			       wi::bit_and_not (r1val, r1mask)
1345 			       | wi::bit_and_not (r2val, r2mask));
1346       *val = r1val | r2val;
1347       break;
1348 
1349     case BIT_XOR_EXPR:
1350       /* m1 | m2  */
1351       *mask = r1mask | r2mask;
1352       *val = r1val ^ r2val;
1353       break;
1354 
1355     case LROTATE_EXPR:
1356     case RROTATE_EXPR:
1357       if (r2mask == 0)
1358 	{
1359 	  widest_int shift = r2val;
1360 	  if (shift == 0)
1361 	    {
1362 	      *mask = r1mask;
1363 	      *val = r1val;
1364 	    }
1365 	  else
1366 	    {
1367 	      if (wi::neg_p (shift))
1368 		{
1369 		  shift = -shift;
1370 		  if (code == RROTATE_EXPR)
1371 		    code = LROTATE_EXPR;
1372 		  else
1373 		    code = RROTATE_EXPR;
1374 		}
1375 	      if (code == RROTATE_EXPR)
1376 		{
1377 		  *mask = wi::rrotate (r1mask, shift, width);
1378 		  *val = wi::rrotate (r1val, shift, width);
1379 		}
1380 	      else
1381 		{
1382 		  *mask = wi::lrotate (r1mask, shift, width);
1383 		  *val = wi::lrotate (r1val, shift, width);
1384 		}
1385 	    }
1386 	}
1387       break;
1388 
1389     case LSHIFT_EXPR:
1390     case RSHIFT_EXPR:
1391       /* ???  We can handle partially known shift counts if we know
1392 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1393 	 is zero.  */
1394       if (r2mask == 0)
1395 	{
1396 	  widest_int shift = r2val;
1397 	  if (shift == 0)
1398 	    {
1399 	      *mask = r1mask;
1400 	      *val = r1val;
1401 	    }
1402 	  else
1403 	    {
1404 	      if (wi::neg_p (shift))
1405 		{
1406 		  shift = -shift;
1407 		  if (code == RSHIFT_EXPR)
1408 		    code = LSHIFT_EXPR;
1409 		  else
1410 		    code = RSHIFT_EXPR;
1411 		}
1412 	      if (code == RSHIFT_EXPR)
1413 		{
1414 		  *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1415 		  *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1416 		}
1417 	      else
1418 		{
1419 		  *mask = wi::ext (r1mask << shift, width, sgn);
1420 		  *val = wi::ext (r1val << shift, width, sgn);
1421 		}
1422 	    }
1423 	}
1424       break;
1425 
1426     case PLUS_EXPR:
1427     case POINTER_PLUS_EXPR:
1428       {
1429 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1430 	   zero wherever possible.  */
1431 	widest_int lo = (wi::bit_and_not (r1val, r1mask)
1432 			 + wi::bit_and_not (r2val, r2mask));
1433 	lo = wi::ext (lo, width, sgn);
1434 	/* Do the addition with unknown bits set to one, to give carry-ins of
1435 	   one wherever possible.  */
1436 	widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1437 	hi = wi::ext (hi, width, sgn);
1438 	/* Each bit in the result is known if (a) the corresponding bits in
1439 	   both inputs are known, and (b) the carry-in to that bit position
1440 	   is known.  We can check condition (b) by seeing if we got the same
1441 	   result with minimised carries as with maximised carries.  */
1442 	*mask = r1mask | r2mask | (lo ^ hi);
1443 	*mask = wi::ext (*mask, width, sgn);
1444 	/* It shouldn't matter whether we choose lo or hi here.  */
1445 	*val = lo;
1446 	break;
1447       }
1448 
1449     case MINUS_EXPR:
1450       {
1451 	widest_int temv, temm;
1452 	bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1453 			  r2type_sgn, r2type_precision, r2val, r2mask);
1454 	bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1455 			 r1type_sgn, r1type_precision, r1val, r1mask,
1456 			 r2type_sgn, r2type_precision, temv, temm);
1457 	break;
1458       }
1459 
1460     case MULT_EXPR:
1461       {
1462 	/* Just track trailing zeros in both operands and transfer
1463 	   them to the other.  */
1464 	int r1tz = wi::ctz (r1val | r1mask);
1465 	int r2tz = wi::ctz (r2val | r2mask);
1466 	if (r1tz + r2tz >= width)
1467 	  {
1468 	    *mask = 0;
1469 	    *val = 0;
1470 	  }
1471 	else if (r1tz + r2tz > 0)
1472 	  {
1473 	    *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1474 			     width, sgn);
1475 	    *val = 0;
1476 	  }
1477 	break;
1478       }
1479 
1480     case EQ_EXPR:
1481     case NE_EXPR:
1482       {
1483 	widest_int m = r1mask | r2mask;
1484 	if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1485 	  {
1486 	    *mask = 0;
1487 	    *val = ((code == EQ_EXPR) ? 0 : 1);
1488 	  }
1489 	else
1490 	  {
1491 	    /* We know the result of a comparison is always one or zero.  */
1492 	    *mask = 1;
1493 	    *val = 0;
1494 	  }
1495 	break;
1496       }
1497 
1498     case GE_EXPR:
1499     case GT_EXPR:
1500       swap_p = true;
1501       code = swap_tree_comparison (code);
1502       /* Fall through.  */
1503     case LT_EXPR:
1504     case LE_EXPR:
1505       {
1506 	int minmax, maxmin;
1507 
1508 	const widest_int &o1val = swap_p ? r2val : r1val;
1509 	const widest_int &o1mask = swap_p ? r2mask : r1mask;
1510 	const widest_int &o2val = swap_p ? r1val : r2val;
1511 	const widest_int &o2mask = swap_p ? r1mask : r2mask;
1512 
1513 	/* If the most significant bits are not known we know nothing.  */
1514 	if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1515 	  break;
1516 
1517 	/* For comparisons the signedness is in the comparison operands.  */
1518 	sgn = r1type_sgn;
1519 
1520 	/* If we know the most significant bits we know the values
1521 	   value ranges by means of treating varying bits as zero
1522 	   or one.  Do a cross comparison of the max/min pairs.  */
1523 	maxmin = wi::cmp (o1val | o1mask,
1524 			  wi::bit_and_not (o2val, o2mask), sgn);
1525 	minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
1526 			  o2val | o2mask, sgn);
1527 	if (maxmin < 0)  /* o1 is less than o2.  */
1528 	  {
1529 	    *mask = 0;
1530 	    *val = 1;
1531 	  }
1532 	else if (minmax > 0)  /* o1 is not less or equal to o2.  */
1533 	  {
1534 	    *mask = 0;
1535 	    *val = 0;
1536 	  }
1537 	else if (maxmin == minmax)  /* o1 and o2 are equal.  */
1538 	  {
1539 	    /* This probably should never happen as we'd have
1540 	       folded the thing during fully constant value folding.  */
1541 	    *mask = 0;
1542 	    *val = (code == LE_EXPR ? 1 : 0);
1543 	  }
1544 	else
1545 	  {
1546 	    /* We know the result of a comparison is always one or zero.  */
1547 	    *mask = 1;
1548 	    *val = 0;
1549 	  }
1550 	break;
1551       }
1552 
1553     default:;
1554     }
1555 }
1556 
1557 /* Return the propagation value when applying the operation CODE to
1558    the value RHS yielding type TYPE.  */
1559 
1560 static ccp_prop_value_t
bit_value_unop(enum tree_code code,tree type,tree rhs)1561 bit_value_unop (enum tree_code code, tree type, tree rhs)
1562 {
1563   ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1564   widest_int value, mask;
1565   ccp_prop_value_t val;
1566 
1567   if (rval.lattice_val == UNDEFINED)
1568     return rval;
1569 
1570   gcc_assert ((rval.lattice_val == CONSTANT
1571 	       && TREE_CODE (rval.value) == INTEGER_CST)
1572 	      || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1573   bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1574 		  TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1575 		  value_to_wide_int (rval), rval.mask);
1576   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1577     {
1578       val.lattice_val = CONSTANT;
1579       val.mask = mask;
1580       /* ???  Delay building trees here.  */
1581       val.value = wide_int_to_tree (type, value);
1582     }
1583   else
1584     {
1585       val.lattice_val = VARYING;
1586       val.value = NULL_TREE;
1587       val.mask = -1;
1588     }
1589   return val;
1590 }
1591 
1592 /* Return the propagation value when applying the operation CODE to
1593    the values RHS1 and RHS2 yielding type TYPE.  */
1594 
1595 static ccp_prop_value_t
bit_value_binop(enum tree_code code,tree type,tree rhs1,tree rhs2)1596 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1597 {
1598   ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1599   ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1600   widest_int value, mask;
1601   ccp_prop_value_t val;
1602 
1603   if (r1val.lattice_val == UNDEFINED
1604       || r2val.lattice_val == UNDEFINED)
1605     {
1606       val.lattice_val = VARYING;
1607       val.value = NULL_TREE;
1608       val.mask = -1;
1609       return val;
1610     }
1611 
1612   gcc_assert ((r1val.lattice_val == CONSTANT
1613 	       && TREE_CODE (r1val.value) == INTEGER_CST)
1614 	      || wi::sext (r1val.mask,
1615 			   TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1616   gcc_assert ((r2val.lattice_val == CONSTANT
1617 	       && TREE_CODE (r2val.value) == INTEGER_CST)
1618 	      || wi::sext (r2val.mask,
1619 			   TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1620   bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1621 		   TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1622 		   value_to_wide_int (r1val), r1val.mask,
1623 		   TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1624 		   value_to_wide_int (r2val), r2val.mask);
1625 
1626   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1627     {
1628       val.lattice_val = CONSTANT;
1629       val.mask = mask;
1630       /* ???  Delay building trees here.  */
1631       val.value = wide_int_to_tree (type, value);
1632     }
1633   else
1634     {
1635       val.lattice_val = VARYING;
1636       val.value = NULL_TREE;
1637       val.mask = -1;
1638     }
1639   return val;
1640 }
1641 
1642 /* Return the propagation value for __builtin_assume_aligned
1643    and functions with assume_aligned or alloc_aligned attribute.
1644    For __builtin_assume_aligned, ATTR is NULL_TREE,
1645    for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1646    is false, for alloc_aligned attribute ATTR is non-NULL and
1647    ALLOC_ALIGNED is true.  */
1648 
1649 static ccp_prop_value_t
bit_value_assume_aligned(gimple * stmt,tree attr,ccp_prop_value_t ptrval,bool alloc_aligned)1650 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1651 			  bool alloc_aligned)
1652 {
1653   tree align, misalign = NULL_TREE, type;
1654   unsigned HOST_WIDE_INT aligni, misaligni = 0;
1655   ccp_prop_value_t alignval;
1656   widest_int value, mask;
1657   ccp_prop_value_t val;
1658 
1659   if (attr == NULL_TREE)
1660     {
1661       tree ptr = gimple_call_arg (stmt, 0);
1662       type = TREE_TYPE (ptr);
1663       ptrval = get_value_for_expr (ptr, true);
1664     }
1665   else
1666     {
1667       tree lhs = gimple_call_lhs (stmt);
1668       type = TREE_TYPE (lhs);
1669     }
1670 
1671   if (ptrval.lattice_val == UNDEFINED)
1672     return ptrval;
1673   gcc_assert ((ptrval.lattice_val == CONSTANT
1674 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
1675 	      || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1676   if (attr == NULL_TREE)
1677     {
1678       /* Get aligni and misaligni from __builtin_assume_aligned.  */
1679       align = gimple_call_arg (stmt, 1);
1680       if (!tree_fits_uhwi_p (align))
1681 	return ptrval;
1682       aligni = tree_to_uhwi (align);
1683       if (gimple_call_num_args (stmt) > 2)
1684 	{
1685 	  misalign = gimple_call_arg (stmt, 2);
1686 	  if (!tree_fits_uhwi_p (misalign))
1687 	    return ptrval;
1688 	  misaligni = tree_to_uhwi (misalign);
1689 	}
1690     }
1691   else
1692     {
1693       /* Get aligni and misaligni from assume_aligned or
1694 	 alloc_align attributes.  */
1695       if (TREE_VALUE (attr) == NULL_TREE)
1696 	return ptrval;
1697       attr = TREE_VALUE (attr);
1698       align = TREE_VALUE (attr);
1699       if (!tree_fits_uhwi_p (align))
1700 	return ptrval;
1701       aligni = tree_to_uhwi (align);
1702       if (alloc_aligned)
1703 	{
1704 	  if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1705 	    return ptrval;
1706 	  align = gimple_call_arg (stmt, aligni - 1);
1707 	  if (!tree_fits_uhwi_p (align))
1708 	    return ptrval;
1709 	  aligni = tree_to_uhwi (align);
1710 	}
1711       else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1712 	{
1713 	  misalign = TREE_VALUE (TREE_CHAIN (attr));
1714 	  if (!tree_fits_uhwi_p (misalign))
1715 	    return ptrval;
1716 	  misaligni = tree_to_uhwi (misalign);
1717 	}
1718     }
1719   if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1720     return ptrval;
1721 
1722   align = build_int_cst_type (type, -aligni);
1723   alignval = get_value_for_expr (align, true);
1724   bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1725 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1726 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1727 
1728   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1729     {
1730       val.lattice_val = CONSTANT;
1731       val.mask = mask;
1732       gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1733       gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1734       value |= misaligni;
1735       /* ???  Delay building trees here.  */
1736       val.value = wide_int_to_tree (type, value);
1737     }
1738   else
1739     {
1740       val.lattice_val = VARYING;
1741       val.value = NULL_TREE;
1742       val.mask = -1;
1743     }
1744   return val;
1745 }
1746 
1747 /* Evaluate statement STMT.
1748    Valid only for assignments, calls, conditionals, and switches. */
1749 
1750 static ccp_prop_value_t
evaluate_stmt(gimple * stmt)1751 evaluate_stmt (gimple *stmt)
1752 {
1753   ccp_prop_value_t val;
1754   tree simplified = NULL_TREE;
1755   ccp_lattice_t likelyvalue = likely_value (stmt);
1756   bool is_constant = false;
1757   unsigned int align;
1758 
1759   if (dump_file && (dump_flags & TDF_DETAILS))
1760     {
1761       fprintf (dump_file, "which is likely ");
1762       switch (likelyvalue)
1763 	{
1764 	case CONSTANT:
1765 	  fprintf (dump_file, "CONSTANT");
1766 	  break;
1767 	case UNDEFINED:
1768 	  fprintf (dump_file, "UNDEFINED");
1769 	  break;
1770 	case VARYING:
1771 	  fprintf (dump_file, "VARYING");
1772 	  break;
1773 	default:;
1774 	}
1775       fprintf (dump_file, "\n");
1776     }
1777 
1778   /* If the statement is likely to have a CONSTANT result, then try
1779      to fold the statement to determine the constant value.  */
1780   /* FIXME.  This is the only place that we call ccp_fold.
1781      Since likely_value never returns CONSTANT for calls, we will
1782      not attempt to fold them, including builtins that may profit.  */
1783   if (likelyvalue == CONSTANT)
1784     {
1785       fold_defer_overflow_warnings ();
1786       simplified = ccp_fold (stmt);
1787       if (simplified
1788 	  && TREE_CODE (simplified) == SSA_NAME)
1789 	{
1790 	  /* We may not use values of something that may be simulated again,
1791 	     see valueize_op_1.  */
1792 	  if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1793 	      || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1794 	    {
1795 	      ccp_prop_value_t *val = get_value (simplified);
1796 	      if (val && val->lattice_val != VARYING)
1797 		{
1798 		  fold_undefer_overflow_warnings (true, stmt, 0);
1799 		  return *val;
1800 		}
1801 	    }
1802 	  else
1803 	    /* We may also not place a non-valueized copy in the lattice
1804 	       as that might become stale if we never re-visit this stmt.  */
1805 	    simplified = NULL_TREE;
1806 	}
1807       is_constant = simplified && is_gimple_min_invariant (simplified);
1808       fold_undefer_overflow_warnings (is_constant, stmt, 0);
1809       if (is_constant)
1810 	{
1811 	  /* The statement produced a constant value.  */
1812 	  val.lattice_val = CONSTANT;
1813 	  val.value = simplified;
1814 	  val.mask = 0;
1815 	  return val;
1816 	}
1817     }
1818   /* If the statement is likely to have a VARYING result, then do not
1819      bother folding the statement.  */
1820   else if (likelyvalue == VARYING)
1821     {
1822       enum gimple_code code = gimple_code (stmt);
1823       if (code == GIMPLE_ASSIGN)
1824         {
1825           enum tree_code subcode = gimple_assign_rhs_code (stmt);
1826 
1827           /* Other cases cannot satisfy is_gimple_min_invariant
1828              without folding.  */
1829           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1830             simplified = gimple_assign_rhs1 (stmt);
1831         }
1832       else if (code == GIMPLE_SWITCH)
1833         simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1834       else
1835 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
1836 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1837       is_constant = simplified && is_gimple_min_invariant (simplified);
1838       if (is_constant)
1839 	{
1840 	  /* The statement produced a constant value.  */
1841 	  val.lattice_val = CONSTANT;
1842 	  val.value = simplified;
1843 	  val.mask = 0;
1844 	}
1845     }
1846   /* If the statement result is likely UNDEFINED, make it so.  */
1847   else if (likelyvalue == UNDEFINED)
1848     {
1849       val.lattice_val = UNDEFINED;
1850       val.value = NULL_TREE;
1851       val.mask = 0;
1852       return val;
1853     }
1854 
1855   /* Resort to simplification for bitwise tracking.  */
1856   if (flag_tree_bit_ccp
1857       && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1858 	  || (gimple_assign_single_p (stmt)
1859 	      && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1860       && !is_constant)
1861     {
1862       enum gimple_code code = gimple_code (stmt);
1863       val.lattice_val = VARYING;
1864       val.value = NULL_TREE;
1865       val.mask = -1;
1866       if (code == GIMPLE_ASSIGN)
1867 	{
1868 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
1869 	  tree rhs1 = gimple_assign_rhs1 (stmt);
1870 	  tree lhs = gimple_assign_lhs (stmt);
1871 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1872 	       || POINTER_TYPE_P (TREE_TYPE (lhs)))
1873 	      && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1874 		  || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1875 	    switch (get_gimple_rhs_class (subcode))
1876 	      {
1877 	      case GIMPLE_SINGLE_RHS:
1878 	        val = get_value_for_expr (rhs1, true);
1879 		break;
1880 
1881 	      case GIMPLE_UNARY_RHS:
1882 		val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1883 		break;
1884 
1885 	      case GIMPLE_BINARY_RHS:
1886 		val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1887 				       gimple_assign_rhs2 (stmt));
1888 		break;
1889 
1890 	      default:;
1891 	      }
1892 	}
1893       else if (code == GIMPLE_COND)
1894 	{
1895 	  enum tree_code code = gimple_cond_code (stmt);
1896 	  tree rhs1 = gimple_cond_lhs (stmt);
1897 	  tree rhs2 = gimple_cond_rhs (stmt);
1898 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1899 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1900 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1901 	}
1902       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1903 	{
1904 	  tree fndecl = gimple_call_fndecl (stmt);
1905 	  switch (DECL_FUNCTION_CODE (fndecl))
1906 	    {
1907 	    case BUILT_IN_MALLOC:
1908 	    case BUILT_IN_REALLOC:
1909 	    case BUILT_IN_CALLOC:
1910 	    case BUILT_IN_STRDUP:
1911 	    case BUILT_IN_STRNDUP:
1912 	      val.lattice_val = CONSTANT;
1913 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1914 	      val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1915 			   / BITS_PER_UNIT - 1);
1916 	      break;
1917 
1918 	    CASE_BUILT_IN_ALLOCA:
1919 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
1920 		       ? BIGGEST_ALIGNMENT
1921 		       : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
1922 	      val.lattice_val = CONSTANT;
1923 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1924 	      val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1925 	      break;
1926 
1927 	    /* These builtins return their first argument, unmodified.  */
1928 	    case BUILT_IN_MEMCPY:
1929 	    case BUILT_IN_MEMMOVE:
1930 	    case BUILT_IN_MEMSET:
1931 	    case BUILT_IN_STRCPY:
1932 	    case BUILT_IN_STRNCPY:
1933 	    case BUILT_IN_MEMCPY_CHK:
1934 	    case BUILT_IN_MEMMOVE_CHK:
1935 	    case BUILT_IN_MEMSET_CHK:
1936 	    case BUILT_IN_STRCPY_CHK:
1937 	    case BUILT_IN_STRNCPY_CHK:
1938 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1939 	      break;
1940 
1941 	    case BUILT_IN_ASSUME_ALIGNED:
1942 	      val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1943 	      break;
1944 
1945 	    case BUILT_IN_ALIGNED_ALLOC:
1946 	      {
1947 		tree align = get_constant_value (gimple_call_arg (stmt, 0));
1948 		if (align
1949 		    && tree_fits_uhwi_p (align))
1950 		  {
1951 		    unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1952 		    if (aligni > 1
1953 			/* align must be power-of-two */
1954 			&& (aligni & (aligni - 1)) == 0)
1955 		      {
1956 			val.lattice_val = CONSTANT;
1957 			val.value = build_int_cst (ptr_type_node, 0);
1958 			val.mask = -aligni;
1959 		      }
1960 		  }
1961 		break;
1962 	      }
1963 
1964 	    default:;
1965 	    }
1966 	}
1967       if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1968 	{
1969 	  tree fntype = gimple_call_fntype (stmt);
1970 	  if (fntype)
1971 	    {
1972 	      tree attrs = lookup_attribute ("assume_aligned",
1973 					     TYPE_ATTRIBUTES (fntype));
1974 	      if (attrs)
1975 		val = bit_value_assume_aligned (stmt, attrs, val, false);
1976 	      attrs = lookup_attribute ("alloc_align",
1977 					TYPE_ATTRIBUTES (fntype));
1978 	      if (attrs)
1979 		val = bit_value_assume_aligned (stmt, attrs, val, true);
1980 	    }
1981 	}
1982       is_constant = (val.lattice_val == CONSTANT);
1983     }
1984 
1985   if (flag_tree_bit_ccp
1986       && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1987 	  || !is_constant)
1988       && gimple_get_lhs (stmt)
1989       && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1990     {
1991       tree lhs = gimple_get_lhs (stmt);
1992       wide_int nonzero_bits = get_nonzero_bits (lhs);
1993       if (nonzero_bits != -1)
1994 	{
1995 	  if (!is_constant)
1996 	    {
1997 	      val.lattice_val = CONSTANT;
1998 	      val.value = build_zero_cst (TREE_TYPE (lhs));
1999 	      val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
2000 	      is_constant = true;
2001 	    }
2002 	  else
2003 	    {
2004 	      if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
2005 		val.value = wide_int_to_tree (TREE_TYPE (lhs),
2006 					      nonzero_bits
2007 					      & wi::to_wide (val.value));
2008 	      if (nonzero_bits == 0)
2009 		val.mask = 0;
2010 	      else
2011 		val.mask = val.mask & extend_mask (nonzero_bits,
2012 						   TYPE_SIGN (TREE_TYPE (lhs)));
2013 	    }
2014 	}
2015     }
2016 
2017   /* The statement produced a nonconstant value.  */
2018   if (!is_constant)
2019     {
2020       /* The statement produced a copy.  */
2021       if (simplified && TREE_CODE (simplified) == SSA_NAME
2022 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2023 	{
2024 	  val.lattice_val = CONSTANT;
2025 	  val.value = simplified;
2026 	  val.mask = -1;
2027 	}
2028       /* The statement is VARYING.  */
2029       else
2030 	{
2031 	  val.lattice_val = VARYING;
2032 	  val.value = NULL_TREE;
2033 	  val.mask = -1;
2034 	}
2035     }
2036 
2037   return val;
2038 }
2039 
2040 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2041 
2042 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2043    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
2044 
2045 static void
insert_clobber_before_stack_restore(tree saved_val,tree var,gimple_htab ** visited)2046 insert_clobber_before_stack_restore (tree saved_val, tree var,
2047 				     gimple_htab **visited)
2048 {
2049   gimple *stmt;
2050   gassign *clobber_stmt;
2051   tree clobber;
2052   imm_use_iterator iter;
2053   gimple_stmt_iterator i;
2054   gimple **slot;
2055 
2056   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2057     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2058       {
2059 	clobber = build_constructor (TREE_TYPE (var),
2060 				     NULL);
2061 	TREE_THIS_VOLATILE (clobber) = 1;
2062 	clobber_stmt = gimple_build_assign (var, clobber);
2063 
2064 	i = gsi_for_stmt (stmt);
2065 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2066       }
2067     else if (gimple_code (stmt) == GIMPLE_PHI)
2068       {
2069 	if (!*visited)
2070 	  *visited = new gimple_htab (10);
2071 
2072 	slot = (*visited)->find_slot (stmt, INSERT);
2073 	if (*slot != NULL)
2074 	  continue;
2075 
2076 	*slot = stmt;
2077 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2078 					     visited);
2079       }
2080     else if (gimple_assign_ssa_name_copy_p (stmt))
2081       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2082 					   visited);
2083     else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2084       continue;
2085 }
2086 
2087 /* Advance the iterator to the previous non-debug gimple statement in the same
2088    or dominating basic block.  */
2089 
2090 static inline void
gsi_prev_dom_bb_nondebug(gimple_stmt_iterator * i)2091 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2092 {
2093   basic_block dom;
2094 
2095   gsi_prev_nondebug (i);
2096   while (gsi_end_p (*i))
2097     {
2098       dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2099       if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2100 	return;
2101 
2102       *i = gsi_last_bb (dom);
2103     }
2104 }
2105 
2106 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2107    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2108 
2109    It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2110    a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2111    In that case the function gives up without inserting the clobbers.  */
2112 
2113 static void
insert_clobbers_for_var(gimple_stmt_iterator i,tree var)2114 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2115 {
2116   gimple *stmt;
2117   tree saved_val;
2118   gimple_htab *visited = NULL;
2119 
2120   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2121     {
2122       stmt = gsi_stmt (i);
2123 
2124       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2125 	continue;
2126 
2127       saved_val = gimple_call_lhs (stmt);
2128       if (saved_val == NULL_TREE)
2129 	continue;
2130 
2131       insert_clobber_before_stack_restore (saved_val, var, &visited);
2132       break;
2133     }
2134 
2135   delete visited;
2136 }
2137 
2138 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
2139    fixed-size array and returns the address, if found, otherwise returns
2140    NULL_TREE.  */
2141 
2142 static tree
fold_builtin_alloca_with_align(gimple * stmt)2143 fold_builtin_alloca_with_align (gimple *stmt)
2144 {
2145   unsigned HOST_WIDE_INT size, threshold, n_elem;
2146   tree lhs, arg, block, var, elem_type, array_type;
2147 
2148   /* Get lhs.  */
2149   lhs = gimple_call_lhs (stmt);
2150   if (lhs == NULL_TREE)
2151     return NULL_TREE;
2152 
2153   /* Detect constant argument.  */
2154   arg = get_constant_value (gimple_call_arg (stmt, 0));
2155   if (arg == NULL_TREE
2156       || TREE_CODE (arg) != INTEGER_CST
2157       || !tree_fits_uhwi_p (arg))
2158     return NULL_TREE;
2159 
2160   size = tree_to_uhwi (arg);
2161 
2162   /* Heuristic: don't fold large allocas.  */
2163   threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2164   /* In case the alloca is located at function entry, it has the same lifetime
2165      as a declared array, so we allow a larger size.  */
2166   block = gimple_block (stmt);
2167   if (!(cfun->after_inlining
2168 	&& block
2169         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2170     threshold /= 10;
2171   if (size > threshold)
2172     return NULL_TREE;
2173 
2174   /* Declare array.  */
2175   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2176   n_elem = size * 8 / BITS_PER_UNIT;
2177   array_type = build_array_type_nelts (elem_type, n_elem);
2178   var = create_tmp_var (array_type);
2179   SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2180   {
2181     struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2182     if (pi != NULL && !pi->pt.anything)
2183       {
2184 	bool singleton_p;
2185 	unsigned uid;
2186 	singleton_p = pt_solution_singleton_or_null_p (&pi->pt, &uid);
2187 	gcc_assert (singleton_p);
2188 	SET_DECL_PT_UID (var, uid);
2189       }
2190   }
2191 
2192   /* Fold alloca to the address of the array.  */
2193   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2194 }
2195 
2196 /* Fold the stmt at *GSI with CCP specific information that propagating
2197    and regular folding does not catch.  */
2198 
2199 bool
fold_stmt(gimple_stmt_iterator * gsi)2200 ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2201 {
2202   gimple *stmt = gsi_stmt (*gsi);
2203 
2204   switch (gimple_code (stmt))
2205     {
2206     case GIMPLE_COND:
2207       {
2208 	gcond *cond_stmt = as_a <gcond *> (stmt);
2209 	ccp_prop_value_t val;
2210 	/* Statement evaluation will handle type mismatches in constants
2211 	   more gracefully than the final propagation.  This allows us to
2212 	   fold more conditionals here.  */
2213 	val = evaluate_stmt (stmt);
2214 	if (val.lattice_val != CONSTANT
2215 	    || val.mask != 0)
2216 	  return false;
2217 
2218 	if (dump_file)
2219 	  {
2220 	    fprintf (dump_file, "Folding predicate ");
2221 	    print_gimple_expr (dump_file, stmt, 0);
2222 	    fprintf (dump_file, " to ");
2223 	    print_generic_expr (dump_file, val.value);
2224 	    fprintf (dump_file, "\n");
2225 	  }
2226 
2227 	if (integer_zerop (val.value))
2228 	  gimple_cond_make_false (cond_stmt);
2229 	else
2230 	  gimple_cond_make_true (cond_stmt);
2231 
2232 	return true;
2233       }
2234 
2235     case GIMPLE_CALL:
2236       {
2237 	tree lhs = gimple_call_lhs (stmt);
2238 	int flags = gimple_call_flags (stmt);
2239 	tree val;
2240 	tree argt;
2241 	bool changed = false;
2242 	unsigned i;
2243 
2244 	/* If the call was folded into a constant make sure it goes
2245 	   away even if we cannot propagate into all uses because of
2246 	   type issues.  */
2247 	if (lhs
2248 	    && TREE_CODE (lhs) == SSA_NAME
2249 	    && (val = get_constant_value (lhs))
2250 	    /* Don't optimize away calls that have side-effects.  */
2251 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
2252 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2253 	  {
2254 	    tree new_rhs = unshare_expr (val);
2255 	    bool res;
2256 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
2257 					    TREE_TYPE (new_rhs)))
2258 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2259 	    res = update_call_from_tree (gsi, new_rhs);
2260 	    gcc_assert (res);
2261 	    return true;
2262 	  }
2263 
2264 	/* Internal calls provide no argument types, so the extra laxity
2265 	   for normal calls does not apply.  */
2266 	if (gimple_call_internal_p (stmt))
2267 	  return false;
2268 
2269         /* The heuristic of fold_builtin_alloca_with_align differs before and
2270 	   after inlining, so we don't require the arg to be changed into a
2271 	   constant for folding, but just to be constant.  */
2272         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2273 	    || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2274           {
2275             tree new_rhs = fold_builtin_alloca_with_align (stmt);
2276             if (new_rhs)
2277 	      {
2278 		bool res = update_call_from_tree (gsi, new_rhs);
2279 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2280 		gcc_assert (res);
2281 		insert_clobbers_for_var (*gsi, var);
2282 		return true;
2283 	      }
2284           }
2285 
2286 	/* Propagate into the call arguments.  Compared to replace_uses_in
2287 	   this can use the argument slot types for type verification
2288 	   instead of the current argument type.  We also can safely
2289 	   drop qualifiers here as we are dealing with constants anyway.  */
2290 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2291 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
2292 	     ++i, argt = TREE_CHAIN (argt))
2293 	  {
2294 	    tree arg = gimple_call_arg (stmt, i);
2295 	    if (TREE_CODE (arg) == SSA_NAME
2296 		&& (val = get_constant_value (arg))
2297 		&& useless_type_conversion_p
2298 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2299 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2300 	      {
2301 		gimple_call_set_arg (stmt, i, unshare_expr (val));
2302 		changed = true;
2303 	      }
2304 	  }
2305 
2306 	return changed;
2307       }
2308 
2309     case GIMPLE_ASSIGN:
2310       {
2311 	tree lhs = gimple_assign_lhs (stmt);
2312 	tree val;
2313 
2314 	/* If we have a load that turned out to be constant replace it
2315 	   as we cannot propagate into all uses in all cases.  */
2316 	if (gimple_assign_single_p (stmt)
2317 	    && TREE_CODE (lhs) == SSA_NAME
2318 	    && (val = get_constant_value (lhs)))
2319 	  {
2320 	    tree rhs = unshare_expr (val);
2321 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2322 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2323 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
2324 	    return true;
2325 	  }
2326 
2327 	return false;
2328       }
2329 
2330     default:
2331       return false;
2332     }
2333 }
2334 
2335 /* Visit the assignment statement STMT.  Set the value of its LHS to the
2336    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
2337    creates virtual definitions, set the value of each new name to that
2338    of the RHS (if we can derive a constant out of the RHS).
2339    Value-returning call statements also perform an assignment, and
2340    are handled here.  */
2341 
2342 static enum ssa_prop_result
visit_assignment(gimple * stmt,tree * output_p)2343 visit_assignment (gimple *stmt, tree *output_p)
2344 {
2345   ccp_prop_value_t val;
2346   enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2347 
2348   tree lhs = gimple_get_lhs (stmt);
2349   if (TREE_CODE (lhs) == SSA_NAME)
2350     {
2351       /* Evaluate the statement, which could be
2352 	 either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
2353       val = evaluate_stmt (stmt);
2354 
2355       /* If STMT is an assignment to an SSA_NAME, we only have one
2356 	 value to set.  */
2357       if (set_lattice_value (lhs, &val))
2358 	{
2359 	  *output_p = lhs;
2360 	  if (val.lattice_val == VARYING)
2361 	    retval = SSA_PROP_VARYING;
2362 	  else
2363 	    retval = SSA_PROP_INTERESTING;
2364 	}
2365     }
2366 
2367   return retval;
2368 }
2369 
2370 
2371 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2372    if it can determine which edge will be taken.  Otherwise, return
2373    SSA_PROP_VARYING.  */
2374 
2375 static enum ssa_prop_result
visit_cond_stmt(gimple * stmt,edge * taken_edge_p)2376 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2377 {
2378   ccp_prop_value_t val;
2379   basic_block block;
2380 
2381   block = gimple_bb (stmt);
2382   val = evaluate_stmt (stmt);
2383   if (val.lattice_val != CONSTANT
2384       || val.mask != 0)
2385     return SSA_PROP_VARYING;
2386 
2387   /* Find which edge out of the conditional block will be taken and add it
2388      to the worklist.  If no single edge can be determined statically,
2389      return SSA_PROP_VARYING to feed all the outgoing edges to the
2390      propagation engine.  */
2391   *taken_edge_p = find_taken_edge (block, val.value);
2392   if (*taken_edge_p)
2393     return SSA_PROP_INTERESTING;
2394   else
2395     return SSA_PROP_VARYING;
2396 }
2397 
2398 
2399 /* Evaluate statement STMT.  If the statement produces an output value and
2400    its evaluation changes the lattice value of its output, return
2401    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2402    output value.
2403 
2404    If STMT is a conditional branch and we can determine its truth
2405    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2406    value, return SSA_PROP_VARYING.  */
2407 
2408 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)2409 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2410 {
2411   tree def;
2412   ssa_op_iter iter;
2413 
2414   if (dump_file && (dump_flags & TDF_DETAILS))
2415     {
2416       fprintf (dump_file, "\nVisiting statement:\n");
2417       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2418     }
2419 
2420   switch (gimple_code (stmt))
2421     {
2422       case GIMPLE_ASSIGN:
2423         /* If the statement is an assignment that produces a single
2424            output value, evaluate its RHS to see if the lattice value of
2425            its output has changed.  */
2426         return visit_assignment (stmt, output_p);
2427 
2428       case GIMPLE_CALL:
2429         /* A value-returning call also performs an assignment.  */
2430         if (gimple_call_lhs (stmt) != NULL_TREE)
2431           return visit_assignment (stmt, output_p);
2432         break;
2433 
2434       case GIMPLE_COND:
2435       case GIMPLE_SWITCH:
2436         /* If STMT is a conditional branch, see if we can determine
2437            which branch will be taken.   */
2438         /* FIXME.  It appears that we should be able to optimize
2439            computed GOTOs here as well.  */
2440         return visit_cond_stmt (stmt, taken_edge_p);
2441 
2442       default:
2443         break;
2444     }
2445 
2446   /* Any other kind of statement is not interesting for constant
2447      propagation and, therefore, not worth simulating.  */
2448   if (dump_file && (dump_flags & TDF_DETAILS))
2449     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2450 
2451   /* Definitions made by statements other than assignments to
2452      SSA_NAMEs represent unknown modifications to their outputs.
2453      Mark them VARYING.  */
2454   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2455     set_value_varying (def);
2456 
2457   return SSA_PROP_VARYING;
2458 }
2459 
2460 
2461 /* Main entry point for SSA Conditional Constant Propagation.  If NONZERO_P,
2462    record nonzero bits.  */
2463 
2464 static unsigned int
do_ssa_ccp(bool nonzero_p)2465 do_ssa_ccp (bool nonzero_p)
2466 {
2467   unsigned int todo = 0;
2468   calculate_dominance_info (CDI_DOMINATORS);
2469 
2470   ccp_initialize ();
2471   class ccp_propagate ccp_propagate;
2472   ccp_propagate.ssa_propagate ();
2473   if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2474     {
2475       todo = (TODO_cleanup_cfg | TODO_update_ssa);
2476 
2477       /* ccp_finalize does not preserve loop-closed ssa.  */
2478       loops_state_clear (LOOP_CLOSED_SSA);
2479     }
2480 
2481   free_dominance_info (CDI_DOMINATORS);
2482   return todo;
2483 }
2484 
2485 
2486 namespace {
2487 
2488 const pass_data pass_data_ccp =
2489 {
2490   GIMPLE_PASS, /* type */
2491   "ccp", /* name */
2492   OPTGROUP_NONE, /* optinfo_flags */
2493   TV_TREE_CCP, /* tv_id */
2494   ( PROP_cfg | PROP_ssa ), /* properties_required */
2495   0, /* properties_provided */
2496   0, /* properties_destroyed */
2497   0, /* todo_flags_start */
2498   TODO_update_address_taken, /* todo_flags_finish */
2499 };
2500 
2501 class pass_ccp : public gimple_opt_pass
2502 {
2503 public:
pass_ccp(gcc::context * ctxt)2504   pass_ccp (gcc::context *ctxt)
2505     : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2506   {}
2507 
2508   /* opt_pass methods: */
clone()2509   opt_pass * clone () { return new pass_ccp (m_ctxt); }
set_pass_param(unsigned int n,bool param)2510   void set_pass_param (unsigned int n, bool param)
2511     {
2512       gcc_assert (n == 0);
2513       nonzero_p = param;
2514     }
gate(function *)2515   virtual bool gate (function *) { return flag_tree_ccp != 0; }
execute(function *)2516   virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2517 
2518  private:
2519   /* Determines whether the pass instance records nonzero bits.  */
2520   bool nonzero_p;
2521 }; // class pass_ccp
2522 
2523 } // anon namespace
2524 
2525 gimple_opt_pass *
make_pass_ccp(gcc::context * ctxt)2526 make_pass_ccp (gcc::context *ctxt)
2527 {
2528   return new pass_ccp (ctxt);
2529 }
2530 
2531 
2532 
2533 /* Try to optimize out __builtin_stack_restore.  Optimize it out
2534    if there is another __builtin_stack_restore in the same basic
2535    block and no calls or ASM_EXPRs are in between, or if this block's
2536    only outgoing edge is to EXIT_BLOCK and there are no calls or
2537    ASM_EXPRs after this __builtin_stack_restore.  */
2538 
2539 static tree
optimize_stack_restore(gimple_stmt_iterator i)2540 optimize_stack_restore (gimple_stmt_iterator i)
2541 {
2542   tree callee;
2543   gimple *stmt;
2544 
2545   basic_block bb = gsi_bb (i);
2546   gimple *call = gsi_stmt (i);
2547 
2548   if (gimple_code (call) != GIMPLE_CALL
2549       || gimple_call_num_args (call) != 1
2550       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2551       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2552     return NULL_TREE;
2553 
2554   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2555     {
2556       stmt = gsi_stmt (i);
2557       if (gimple_code (stmt) == GIMPLE_ASM)
2558 	return NULL_TREE;
2559       if (gimple_code (stmt) != GIMPLE_CALL)
2560 	continue;
2561 
2562       callee = gimple_call_fndecl (stmt);
2563       if (!callee
2564 	  || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2565 	  /* All regular builtins are ok, just obviously not alloca.  */
2566 	  || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
2567 	return NULL_TREE;
2568 
2569       if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2570 	goto second_stack_restore;
2571     }
2572 
2573   if (!gsi_end_p (i))
2574     return NULL_TREE;
2575 
2576   /* Allow one successor of the exit block, or zero successors.  */
2577   switch (EDGE_COUNT (bb->succs))
2578     {
2579     case 0:
2580       break;
2581     case 1:
2582       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2583 	return NULL_TREE;
2584       break;
2585     default:
2586       return NULL_TREE;
2587     }
2588  second_stack_restore:
2589 
2590   /* If there's exactly one use, then zap the call to __builtin_stack_save.
2591      If there are multiple uses, then the last one should remove the call.
2592      In any case, whether the call to __builtin_stack_save can be removed
2593      or not is irrelevant to removing the call to __builtin_stack_restore.  */
2594   if (has_single_use (gimple_call_arg (call, 0)))
2595     {
2596       gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2597       if (is_gimple_call (stack_save))
2598 	{
2599 	  callee = gimple_call_fndecl (stack_save);
2600 	  if (callee
2601 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2602 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2603 	    {
2604 	      gimple_stmt_iterator stack_save_gsi;
2605 	      tree rhs;
2606 
2607 	      stack_save_gsi = gsi_for_stmt (stack_save);
2608 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2609 	      update_call_from_tree (&stack_save_gsi, rhs);
2610 	    }
2611 	}
2612     }
2613 
2614   /* No effect, so the statement will be deleted.  */
2615   return integer_zero_node;
2616 }
2617 
2618 /* If va_list type is a simple pointer and nothing special is needed,
2619    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2620    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2621    pointer assignment.  */
2622 
2623 static tree
optimize_stdarg_builtin(gimple * call)2624 optimize_stdarg_builtin (gimple *call)
2625 {
2626   tree callee, lhs, rhs, cfun_va_list;
2627   bool va_list_simple_ptr;
2628   location_t loc = gimple_location (call);
2629 
2630   if (gimple_code (call) != GIMPLE_CALL)
2631     return NULL_TREE;
2632 
2633   callee = gimple_call_fndecl (call);
2634 
2635   cfun_va_list = targetm.fn_abi_va_list (callee);
2636   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2637 		       && (TREE_TYPE (cfun_va_list) == void_type_node
2638 			   || TREE_TYPE (cfun_va_list) == char_type_node);
2639 
2640   switch (DECL_FUNCTION_CODE (callee))
2641     {
2642     case BUILT_IN_VA_START:
2643       if (!va_list_simple_ptr
2644 	  || targetm.expand_builtin_va_start != NULL
2645 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2646 	return NULL_TREE;
2647 
2648       if (gimple_call_num_args (call) != 2)
2649 	return NULL_TREE;
2650 
2651       lhs = gimple_call_arg (call, 0);
2652       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2653 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2654 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2655 	return NULL_TREE;
2656 
2657       lhs = build_fold_indirect_ref_loc (loc, lhs);
2658       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2659                              1, integer_zero_node);
2660       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2661       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2662 
2663     case BUILT_IN_VA_COPY:
2664       if (!va_list_simple_ptr)
2665 	return NULL_TREE;
2666 
2667       if (gimple_call_num_args (call) != 2)
2668 	return NULL_TREE;
2669 
2670       lhs = gimple_call_arg (call, 0);
2671       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2672 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2673 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2674 	return NULL_TREE;
2675 
2676       lhs = build_fold_indirect_ref_loc (loc, lhs);
2677       rhs = gimple_call_arg (call, 1);
2678       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2679 	  != TYPE_MAIN_VARIANT (cfun_va_list))
2680 	return NULL_TREE;
2681 
2682       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2683       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2684 
2685     case BUILT_IN_VA_END:
2686       /* No effect, so the statement will be deleted.  */
2687       return integer_zero_node;
2688 
2689     default:
2690       gcc_unreachable ();
2691     }
2692 }
2693 
2694 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2695    the incoming jumps.  Return true if at least one jump was changed.  */
2696 
2697 static bool
optimize_unreachable(gimple_stmt_iterator i)2698 optimize_unreachable (gimple_stmt_iterator i)
2699 {
2700   basic_block bb = gsi_bb (i);
2701   gimple_stmt_iterator gsi;
2702   gimple *stmt;
2703   edge_iterator ei;
2704   edge e;
2705   bool ret;
2706 
2707   if (flag_sanitize & SANITIZE_UNREACHABLE)
2708     return false;
2709 
2710   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2711     {
2712       stmt = gsi_stmt (gsi);
2713 
2714       if (is_gimple_debug (stmt))
2715        continue;
2716 
2717       if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2718 	{
2719 	  /* Verify we do not need to preserve the label.  */
2720 	  if (FORCED_LABEL (gimple_label_label (label_stmt)))
2721 	    return false;
2722 
2723 	  continue;
2724 	}
2725 
2726       /* Only handle the case that __builtin_unreachable is the first statement
2727 	 in the block.  We rely on DCE to remove stmts without side-effects
2728 	 before __builtin_unreachable.  */
2729       if (gsi_stmt (gsi) != gsi_stmt (i))
2730         return false;
2731     }
2732 
2733   ret = false;
2734   FOR_EACH_EDGE (e, ei, bb->preds)
2735     {
2736       gsi = gsi_last_bb (e->src);
2737       if (gsi_end_p (gsi))
2738 	continue;
2739 
2740       stmt = gsi_stmt (gsi);
2741       if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2742 	{
2743 	  if (e->flags & EDGE_TRUE_VALUE)
2744 	    gimple_cond_make_false (cond_stmt);
2745 	  else if (e->flags & EDGE_FALSE_VALUE)
2746 	    gimple_cond_make_true (cond_stmt);
2747 	  else
2748 	    gcc_unreachable ();
2749 	  update_stmt (cond_stmt);
2750 	}
2751       else
2752 	{
2753 	  /* Todo: handle other cases.  Note that unreachable switch case
2754 	     statements have already been removed.  */
2755 	  continue;
2756 	}
2757 
2758       ret = true;
2759     }
2760 
2761   return ret;
2762 }
2763 
2764 /* Optimize
2765      mask_2 = 1 << cnt_1;
2766      _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2767      _5 = _4 & mask_2;
2768    to
2769      _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2770      _5 = _4;
2771    If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2772    is passed instead of 0, and the builtin just returns a zero
2773    or 1 value instead of the actual bit.
2774    Similarly for __sync_fetch_and_or_* (without the ", _3" part
2775    in there), and/or if mask_2 is a power of 2 constant.
2776    Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2777    in that case.  And similarly for and instead of or, except that
2778    the second argument to the builtin needs to be one's complement
2779    of the mask instead of mask.  */
2780 
2781 static void
optimize_atomic_bit_test_and(gimple_stmt_iterator * gsip,enum internal_fn fn,bool has_model_arg,bool after)2782 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2783 			      enum internal_fn fn, bool has_model_arg,
2784 			      bool after)
2785 {
2786   gimple *call = gsi_stmt (*gsip);
2787   tree lhs = gimple_call_lhs (call);
2788   use_operand_p use_p;
2789   gimple *use_stmt;
2790   tree mask, bit;
2791   optab optab;
2792 
2793   if (!flag_inline_atomics
2794       || optimize_debug
2795       || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2796       || !lhs
2797       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2798       || !single_imm_use (lhs, &use_p, &use_stmt)
2799       || !is_gimple_assign (use_stmt)
2800       || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2801       || !gimple_vdef (call))
2802     return;
2803 
2804   switch (fn)
2805     {
2806     case IFN_ATOMIC_BIT_TEST_AND_SET:
2807       optab = atomic_bit_test_and_set_optab;
2808       break;
2809     case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2810       optab = atomic_bit_test_and_complement_optab;
2811       break;
2812     case IFN_ATOMIC_BIT_TEST_AND_RESET:
2813       optab = atomic_bit_test_and_reset_optab;
2814       break;
2815     default:
2816       return;
2817     }
2818 
2819   if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2820     return;
2821 
2822   mask = gimple_call_arg (call, 1);
2823   tree use_lhs = gimple_assign_lhs (use_stmt);
2824   if (!use_lhs)
2825     return;
2826 
2827   if (TREE_CODE (mask) == INTEGER_CST)
2828     {
2829       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2830 	mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2831       mask = fold_convert (TREE_TYPE (lhs), mask);
2832       int ibit = tree_log2 (mask);
2833       if (ibit < 0)
2834 	return;
2835       bit = build_int_cst (TREE_TYPE (lhs), ibit);
2836     }
2837   else if (TREE_CODE (mask) == SSA_NAME)
2838     {
2839       gimple *g = SSA_NAME_DEF_STMT (mask);
2840       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2841 	{
2842 	  if (!is_gimple_assign (g)
2843 	      || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2844 	    return;
2845 	  mask = gimple_assign_rhs1 (g);
2846 	  if (TREE_CODE (mask) != SSA_NAME)
2847 	    return;
2848 	  g = SSA_NAME_DEF_STMT (mask);
2849 	}
2850       if (!is_gimple_assign (g)
2851 	  || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2852 	  || !integer_onep (gimple_assign_rhs1 (g)))
2853 	return;
2854       bit = gimple_assign_rhs2 (g);
2855     }
2856   else
2857     return;
2858 
2859   if (gimple_assign_rhs1 (use_stmt) == lhs)
2860     {
2861       if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2862 	return;
2863     }
2864   else if (gimple_assign_rhs2 (use_stmt) != lhs
2865 	   || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2866     return;
2867 
2868   bool use_bool = true;
2869   bool has_debug_uses = false;
2870   imm_use_iterator iter;
2871   gimple *g;
2872 
2873   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2874     use_bool = false;
2875   FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2876     {
2877       enum tree_code code = ERROR_MARK;
2878       tree op0 = NULL_TREE, op1 = NULL_TREE;
2879       if (is_gimple_debug (g))
2880 	{
2881 	  has_debug_uses = true;
2882 	  continue;
2883 	}
2884       else if (is_gimple_assign (g))
2885 	switch (gimple_assign_rhs_code (g))
2886 	  {
2887 	  case COND_EXPR:
2888 	    op1 = gimple_assign_rhs1 (g);
2889 	    code = TREE_CODE (op1);
2890 	    op0 = TREE_OPERAND (op1, 0);
2891 	    op1 = TREE_OPERAND (op1, 1);
2892 	    break;
2893 	  case EQ_EXPR:
2894 	  case NE_EXPR:
2895 	    code = gimple_assign_rhs_code (g);
2896 	    op0 = gimple_assign_rhs1 (g);
2897 	    op1 = gimple_assign_rhs2 (g);
2898 	    break;
2899 	  default:
2900 	    break;
2901 	  }
2902       else if (gimple_code (g) == GIMPLE_COND)
2903 	{
2904 	  code = gimple_cond_code (g);
2905 	  op0 = gimple_cond_lhs (g);
2906 	  op1 = gimple_cond_rhs (g);
2907 	}
2908 
2909       if ((code == EQ_EXPR || code == NE_EXPR)
2910 	  && op0 == use_lhs
2911 	  && integer_zerop (op1))
2912 	{
2913 	  use_operand_p use_p;
2914 	  int n = 0;
2915 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2916 	    n++;
2917 	  if (n == 1)
2918 	    continue;
2919 	}
2920 
2921       use_bool = false;
2922       BREAK_FROM_IMM_USE_STMT (iter);
2923     }
2924 
2925   tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2926   tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2927   if (has_model_arg)
2928     g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2929 				    bit, flag, gimple_call_arg (call, 2));
2930   else
2931     g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2932 				    bit, flag);
2933   gimple_call_set_lhs (g, new_lhs);
2934   gimple_set_location (g, gimple_location (call));
2935   gimple_set_vuse (g, gimple_vuse (call));
2936   gimple_set_vdef (g, gimple_vdef (call));
2937   bool throws = stmt_can_throw_internal (call);
2938   gimple_call_set_nothrow (as_a <gcall *> (g),
2939 			   gimple_call_nothrow_p (as_a <gcall *> (call)));
2940   SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
2941   gimple_stmt_iterator gsi = *gsip;
2942   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2943   edge e = NULL;
2944   if (throws)
2945     {
2946       maybe_clean_or_replace_eh_stmt (call, g);
2947       if (after || (use_bool && has_debug_uses))
2948 	e = find_fallthru_edge (gsi_bb (gsi)->succs);
2949     }
2950   if (after)
2951     {
2952       /* The internal function returns the value of the specified bit
2953 	 before the atomic operation.  If we are interested in the value
2954 	 of the specified bit after the atomic operation (makes only sense
2955 	 for xor, otherwise the bit content is compile time known),
2956 	 we need to invert the bit.  */
2957       g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
2958 			       BIT_XOR_EXPR, new_lhs,
2959 			       use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
2960 					: mask);
2961       new_lhs = gimple_assign_lhs (g);
2962       if (throws)
2963 	{
2964 	  gsi_insert_on_edge_immediate (e, g);
2965 	  gsi = gsi_for_stmt (g);
2966 	}
2967       else
2968 	gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2969     }
2970   if (use_bool && has_debug_uses)
2971     {
2972       tree temp = NULL_TREE;
2973       if (!throws || after || single_pred_p (e->dest))
2974 	{
2975 	  temp = make_node (DEBUG_EXPR_DECL);
2976 	  DECL_ARTIFICIAL (temp) = 1;
2977 	  TREE_TYPE (temp) = TREE_TYPE (lhs);
2978 	  SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
2979 	  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
2980 	  g = gimple_build_debug_bind (temp, t, g);
2981 	  if (throws && !after)
2982 	    {
2983 	      gsi = gsi_after_labels (e->dest);
2984 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2985 	    }
2986 	  else
2987 	    gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2988 	}
2989       FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2990 	if (is_gimple_debug (g))
2991 	  {
2992 	    use_operand_p use_p;
2993 	    if (temp == NULL_TREE)
2994 	      gimple_debug_bind_reset_value (g);
2995 	    else
2996 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2997 		SET_USE (use_p, temp);
2998 	    update_stmt (g);
2999 	  }
3000     }
3001   SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3002     = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3003   replace_uses_by (use_lhs, new_lhs);
3004   gsi = gsi_for_stmt (use_stmt);
3005   gsi_remove (&gsi, true);
3006   release_defs (use_stmt);
3007   gsi_remove (gsip, true);
3008   release_ssa_name (lhs);
3009 }
3010 
3011 /* Optimize
3012    a = {};
3013    b = a;
3014    into
3015    a = {};
3016    b = {};
3017    Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3018    and/or memcpy (&b, &a, sizeof (a)); instead of b = a;  */
3019 
3020 static void
optimize_memcpy(gimple_stmt_iterator * gsip,tree dest,tree src,tree len)3021 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3022 {
3023   gimple *stmt = gsi_stmt (*gsip);
3024   if (gimple_has_volatile_ops (stmt))
3025     return;
3026 
3027   tree vuse = gimple_vuse (stmt);
3028   if (vuse == NULL)
3029     return;
3030 
3031   gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3032   tree src2 = NULL_TREE, len2 = NULL_TREE;
3033   poly_int64 offset, offset2;
3034   tree val = integer_zero_node;
3035   if (gimple_store_p (defstmt)
3036       && gimple_assign_single_p (defstmt)
3037       && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3038       && !gimple_clobber_p (defstmt))
3039     src2 = gimple_assign_lhs (defstmt);
3040   else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3041 	   && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3042 	   && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3043     {
3044       src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3045       len2 = gimple_call_arg (defstmt, 2);
3046       val = gimple_call_arg (defstmt, 1);
3047       /* For non-0 val, we'd have to transform stmt from assignment
3048 	 into memset (only if dest is addressable).  */
3049       if (!integer_zerop (val) && is_gimple_assign (stmt))
3050 	src2 = NULL_TREE;
3051     }
3052 
3053   if (src2 == NULL_TREE)
3054     return;
3055 
3056   if (len == NULL_TREE)
3057     len = (TREE_CODE (src) == COMPONENT_REF
3058 	   ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3059 	   : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3060   if (len2 == NULL_TREE)
3061     len2 = (TREE_CODE (src2) == COMPONENT_REF
3062 	    ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3063 	    : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3064   if (len == NULL_TREE
3065       || !poly_int_tree_p (len)
3066       || len2 == NULL_TREE
3067       || !poly_int_tree_p (len2))
3068     return;
3069 
3070   src = get_addr_base_and_unit_offset (src, &offset);
3071   src2 = get_addr_base_and_unit_offset (src2, &offset2);
3072   if (src == NULL_TREE
3073       || src2 == NULL_TREE
3074       || maybe_lt (offset, offset2))
3075     return;
3076 
3077   if (!operand_equal_p (src, src2, 0))
3078     return;
3079 
3080   /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3081      Make sure that
3082      [ src + offset, src + offset + len - 1 ] is a subset of that.  */
3083   if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2),
3084 		wi::to_poly_offset (len2)))
3085     return;
3086 
3087   if (dump_file && (dump_flags & TDF_DETAILS))
3088     {
3089       fprintf (dump_file, "Simplified\n  ");
3090       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3091       fprintf (dump_file, "after previous\n  ");
3092       print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3093     }
3094 
3095   /* For simplicity, don't change the kind of the stmt,
3096      turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3097      into memset (&dest, val, len);
3098      In theory we could change dest = src into memset if dest
3099      is addressable (maybe beneficial if val is not 0), or
3100      memcpy (&dest, &src, len) into dest = {} if len is the size
3101      of dest, dest isn't volatile.  */
3102   if (is_gimple_assign (stmt))
3103     {
3104       tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3105       gimple_assign_set_rhs_from_tree (gsip, ctor);
3106       update_stmt (stmt);
3107     }
3108   else /* If stmt is memcpy, transform it into memset.  */
3109     {
3110       gcall *call = as_a <gcall *> (stmt);
3111       tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3112       gimple_call_set_fndecl (call, fndecl);
3113       gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3114       gimple_call_set_arg (call, 1, val);
3115       update_stmt (stmt);
3116     }
3117 
3118   if (dump_file && (dump_flags & TDF_DETAILS))
3119     {
3120       fprintf (dump_file, "into\n  ");
3121       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3122     }
3123 }
3124 
3125 /* A simple pass that attempts to fold all builtin functions.  This pass
3126    is run after we've propagated as many constants as we can.  */
3127 
3128 namespace {
3129 
3130 const pass_data pass_data_fold_builtins =
3131 {
3132   GIMPLE_PASS, /* type */
3133   "fab", /* name */
3134   OPTGROUP_NONE, /* optinfo_flags */
3135   TV_NONE, /* tv_id */
3136   ( PROP_cfg | PROP_ssa ), /* properties_required */
3137   0, /* properties_provided */
3138   0, /* properties_destroyed */
3139   0, /* todo_flags_start */
3140   TODO_update_ssa, /* todo_flags_finish */
3141 };
3142 
3143 class pass_fold_builtins : public gimple_opt_pass
3144 {
3145 public:
pass_fold_builtins(gcc::context * ctxt)3146   pass_fold_builtins (gcc::context *ctxt)
3147     : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3148   {}
3149 
3150   /* opt_pass methods: */
clone()3151   opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3152   virtual unsigned int execute (function *);
3153 
3154 }; // class pass_fold_builtins
3155 
3156 unsigned int
execute(function * fun)3157 pass_fold_builtins::execute (function *fun)
3158 {
3159   bool cfg_changed = false;
3160   basic_block bb;
3161   unsigned int todoflags = 0;
3162 
3163   FOR_EACH_BB_FN (bb, fun)
3164     {
3165       gimple_stmt_iterator i;
3166       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3167 	{
3168 	  gimple *stmt, *old_stmt;
3169 	  tree callee;
3170 	  enum built_in_function fcode;
3171 
3172 	  stmt = gsi_stmt (i);
3173 
3174           if (gimple_code (stmt) != GIMPLE_CALL)
3175 	    {
3176 	      /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3177 		 after the last GIMPLE DSE they aren't needed and might
3178 		 unnecessarily keep the SSA_NAMEs live.  */
3179 	      if (gimple_clobber_p (stmt))
3180 		{
3181 		  tree lhs = gimple_assign_lhs (stmt);
3182 		  if (TREE_CODE (lhs) == MEM_REF
3183 		      && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3184 		    {
3185 		      unlink_stmt_vdef (stmt);
3186 		      gsi_remove (&i, true);
3187 		      release_defs (stmt);
3188 		      continue;
3189 		    }
3190 		}
3191 	      else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3192 		optimize_memcpy (&i, gimple_assign_lhs (stmt),
3193 				 gimple_assign_rhs1 (stmt), NULL_TREE);
3194 	      gsi_next (&i);
3195 	      continue;
3196 	    }
3197 
3198 	  callee = gimple_call_fndecl (stmt);
3199 	  if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
3200 	    {
3201 	      gsi_next (&i);
3202 	      continue;
3203 	    }
3204 
3205 	  fcode = DECL_FUNCTION_CODE (callee);
3206 	  if (fold_stmt (&i))
3207 	    ;
3208 	  else
3209 	    {
3210 	      tree result = NULL_TREE;
3211 	      switch (DECL_FUNCTION_CODE (callee))
3212 		{
3213 		case BUILT_IN_CONSTANT_P:
3214 		  /* Resolve __builtin_constant_p.  If it hasn't been
3215 		     folded to integer_one_node by now, it's fairly
3216 		     certain that the value simply isn't constant.  */
3217 		  result = integer_zero_node;
3218 		  break;
3219 
3220 		case BUILT_IN_ASSUME_ALIGNED:
3221 		  /* Remove __builtin_assume_aligned.  */
3222 		  result = gimple_call_arg (stmt, 0);
3223 		  break;
3224 
3225 		case BUILT_IN_STACK_RESTORE:
3226 		  result = optimize_stack_restore (i);
3227 		  if (result)
3228 		    break;
3229 		  gsi_next (&i);
3230 		  continue;
3231 
3232 		case BUILT_IN_UNREACHABLE:
3233 		  if (optimize_unreachable (i))
3234 		    cfg_changed = true;
3235 		  break;
3236 
3237 		case BUILT_IN_ATOMIC_FETCH_OR_1:
3238 		case BUILT_IN_ATOMIC_FETCH_OR_2:
3239 		case BUILT_IN_ATOMIC_FETCH_OR_4:
3240 		case BUILT_IN_ATOMIC_FETCH_OR_8:
3241 		case BUILT_IN_ATOMIC_FETCH_OR_16:
3242 		  optimize_atomic_bit_test_and (&i,
3243 						IFN_ATOMIC_BIT_TEST_AND_SET,
3244 						true, false);
3245 		  break;
3246 		case BUILT_IN_SYNC_FETCH_AND_OR_1:
3247 		case BUILT_IN_SYNC_FETCH_AND_OR_2:
3248 		case BUILT_IN_SYNC_FETCH_AND_OR_4:
3249 		case BUILT_IN_SYNC_FETCH_AND_OR_8:
3250 		case BUILT_IN_SYNC_FETCH_AND_OR_16:
3251 		  optimize_atomic_bit_test_and (&i,
3252 						IFN_ATOMIC_BIT_TEST_AND_SET,
3253 						false, false);
3254 		  break;
3255 
3256 		case BUILT_IN_ATOMIC_FETCH_XOR_1:
3257 		case BUILT_IN_ATOMIC_FETCH_XOR_2:
3258 		case BUILT_IN_ATOMIC_FETCH_XOR_4:
3259 		case BUILT_IN_ATOMIC_FETCH_XOR_8:
3260 		case BUILT_IN_ATOMIC_FETCH_XOR_16:
3261 		  optimize_atomic_bit_test_and
3262 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3263 		  break;
3264 		case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3265 		case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3266 		case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3267 		case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3268 		case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3269 		  optimize_atomic_bit_test_and
3270 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3271 		  break;
3272 
3273 		case BUILT_IN_ATOMIC_XOR_FETCH_1:
3274 		case BUILT_IN_ATOMIC_XOR_FETCH_2:
3275 		case BUILT_IN_ATOMIC_XOR_FETCH_4:
3276 		case BUILT_IN_ATOMIC_XOR_FETCH_8:
3277 		case BUILT_IN_ATOMIC_XOR_FETCH_16:
3278 		  optimize_atomic_bit_test_and
3279 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3280 		  break;
3281 		case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3282 		case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3283 		case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3284 		case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3285 		case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3286 		  optimize_atomic_bit_test_and
3287 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3288 		  break;
3289 
3290 		case BUILT_IN_ATOMIC_FETCH_AND_1:
3291 		case BUILT_IN_ATOMIC_FETCH_AND_2:
3292 		case BUILT_IN_ATOMIC_FETCH_AND_4:
3293 		case BUILT_IN_ATOMIC_FETCH_AND_8:
3294 		case BUILT_IN_ATOMIC_FETCH_AND_16:
3295 		  optimize_atomic_bit_test_and (&i,
3296 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3297 						true, false);
3298 		  break;
3299 		case BUILT_IN_SYNC_FETCH_AND_AND_1:
3300 		case BUILT_IN_SYNC_FETCH_AND_AND_2:
3301 		case BUILT_IN_SYNC_FETCH_AND_AND_4:
3302 		case BUILT_IN_SYNC_FETCH_AND_AND_8:
3303 		case BUILT_IN_SYNC_FETCH_AND_AND_16:
3304 		  optimize_atomic_bit_test_and (&i,
3305 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3306 						false, false);
3307 		  break;
3308 
3309 		case BUILT_IN_MEMCPY:
3310 		  if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3311 		      && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3312 		      && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3313 		      && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3314 		    {
3315 		      tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3316 		      tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3317 		      tree len = gimple_call_arg (stmt, 2);
3318 		      optimize_memcpy (&i, dest, src, len);
3319 		    }
3320 		  break;
3321 
3322 		case BUILT_IN_VA_START:
3323 		case BUILT_IN_VA_END:
3324 		case BUILT_IN_VA_COPY:
3325 		  /* These shouldn't be folded before pass_stdarg.  */
3326 		  result = optimize_stdarg_builtin (stmt);
3327 		  break;
3328 
3329 		default:;
3330 		}
3331 
3332 	      if (!result)
3333 		{
3334 		  gsi_next (&i);
3335 		  continue;
3336 		}
3337 
3338 	      if (!update_call_from_tree (&i, result))
3339 		gimplify_and_update_call_from_tree (&i, result);
3340 	    }
3341 
3342 	  todoflags |= TODO_update_address_taken;
3343 
3344 	  if (dump_file && (dump_flags & TDF_DETAILS))
3345 	    {
3346 	      fprintf (dump_file, "Simplified\n  ");
3347 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3348 	    }
3349 
3350           old_stmt = stmt;
3351 	  stmt = gsi_stmt (i);
3352 	  update_stmt (stmt);
3353 
3354 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3355 	      && gimple_purge_dead_eh_edges (bb))
3356 	    cfg_changed = true;
3357 
3358 	  if (dump_file && (dump_flags & TDF_DETAILS))
3359 	    {
3360 	      fprintf (dump_file, "to\n  ");
3361 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3362 	      fprintf (dump_file, "\n");
3363 	    }
3364 
3365 	  /* Retry the same statement if it changed into another
3366 	     builtin, there might be new opportunities now.  */
3367           if (gimple_code (stmt) != GIMPLE_CALL)
3368 	    {
3369 	      gsi_next (&i);
3370 	      continue;
3371 	    }
3372 	  callee = gimple_call_fndecl (stmt);
3373 	  if (!callee
3374               || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
3375 	      || DECL_FUNCTION_CODE (callee) == fcode)
3376 	    gsi_next (&i);
3377 	}
3378     }
3379 
3380   /* Delete unreachable blocks.  */
3381   if (cfg_changed)
3382     todoflags |= TODO_cleanup_cfg;
3383 
3384   return todoflags;
3385 }
3386 
3387 } // anon namespace
3388 
3389 gimple_opt_pass *
make_pass_fold_builtins(gcc::context * ctxt)3390 make_pass_fold_builtins (gcc::context *ctxt)
3391 {
3392   return new pass_fold_builtins (ctxt);
3393 }
3394 
3395 /* A simple pass that emits some warnings post IPA.  */
3396 
3397 namespace {
3398 
3399 const pass_data pass_data_post_ipa_warn =
3400 {
3401   GIMPLE_PASS, /* type */
3402   "post_ipa_warn", /* name */
3403   OPTGROUP_NONE, /* optinfo_flags */
3404   TV_NONE, /* tv_id */
3405   ( PROP_cfg | PROP_ssa ), /* properties_required */
3406   0, /* properties_provided */
3407   0, /* properties_destroyed */
3408   0, /* todo_flags_start */
3409   0, /* todo_flags_finish */
3410 };
3411 
3412 class pass_post_ipa_warn : public gimple_opt_pass
3413 {
3414 public:
pass_post_ipa_warn(gcc::context * ctxt)3415   pass_post_ipa_warn (gcc::context *ctxt)
3416     : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3417   {}
3418 
3419   /* opt_pass methods: */
clone()3420   opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
gate(function *)3421   virtual bool gate (function *) { return warn_nonnull != 0; }
3422   virtual unsigned int execute (function *);
3423 
3424 }; // class pass_fold_builtins
3425 
3426 unsigned int
execute(function * fun)3427 pass_post_ipa_warn::execute (function *fun)
3428 {
3429   basic_block bb;
3430 
3431   FOR_EACH_BB_FN (bb, fun)
3432     {
3433       gimple_stmt_iterator gsi;
3434       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3435 	{
3436 	  gimple *stmt = gsi_stmt (gsi);
3437 	  if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3438 	    continue;
3439 
3440 	  if (warn_nonnull)
3441 	    {
3442 	      bitmap nonnullargs
3443 		= get_nonnull_args (gimple_call_fntype (stmt));
3444 	      if (nonnullargs)
3445 		{
3446 		  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3447 		    {
3448 		      tree arg = gimple_call_arg (stmt, i);
3449 		      if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3450 			continue;
3451 		      if (!integer_zerop (arg))
3452 			continue;
3453 		      if (!bitmap_empty_p (nonnullargs)
3454 			  && !bitmap_bit_p (nonnullargs, i))
3455 			continue;
3456 
3457 		      location_t loc = gimple_location (stmt);
3458 		      if (warning_at (loc, OPT_Wnonnull,
3459 				      "%Gargument %u null where non-null "
3460 				      "expected", as_a <gcall *>(stmt), i + 1))
3461 			{
3462 			  tree fndecl = gimple_call_fndecl (stmt);
3463 			  if (fndecl && DECL_IS_BUILTIN (fndecl))
3464 			    inform (loc, "in a call to built-in function %qD",
3465 				    fndecl);
3466 			  else if (fndecl)
3467 			    inform (DECL_SOURCE_LOCATION (fndecl),
3468 				    "in a call to function %qD declared here",
3469 				    fndecl);
3470 
3471 			}
3472 		    }
3473 		  BITMAP_FREE (nonnullargs);
3474 		}
3475 	    }
3476 	}
3477     }
3478   return 0;
3479 }
3480 
3481 } // anon namespace
3482 
3483 gimple_opt_pass *
make_pass_post_ipa_warn(gcc::context * ctxt)3484 make_pass_post_ipa_warn (gcc::context *ctxt)
3485 {
3486   return new pass_post_ipa_warn (ctxt);
3487 }
3488