1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2019 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    This algorithm uses wide-ints at the max precision of the target.
102    This means that, with one uninteresting exception, variables with
103    UNSIGNED types never go to VARYING because the bits above the
104    precision of the type of the variable are always zero.  The
105    uninteresting case is a variable of UNSIGNED type that has the
106    maximum precision of the target.  Such variables can go to VARYING,
107    but this causes no loss of infomation since these variables will
108    never be extended.
109 
110    References:
111 
112      Constant propagation with conditional branches,
113      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114 
115      Building an Optimizing Compiler,
116      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117 
118      Advanced Compiler Design and Implementation,
119      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
120 
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "cfgloop.h"
142 #include "stor-layout.h"
143 #include "optabs-query.h"
144 #include "tree-ssa-ccp.h"
145 #include "tree-dfa.h"
146 #include "diagnostic-core.h"
147 #include "stringpool.h"
148 #include "attribs.h"
149 #include "tree-vector-builder.h"
150 
151 /* Possible lattice values.  */
152 typedef enum
153 {
154   UNINITIALIZED,
155   UNDEFINED,
156   CONSTANT,
157   VARYING
158 } ccp_lattice_t;
159 
160 struct ccp_prop_value_t {
161     /* Lattice value.  */
162     ccp_lattice_t lattice_val;
163 
164     /* Propagated value.  */
165     tree value;
166 
167     /* Mask that applies to the propagated value during CCP.  For X
168        with a CONSTANT lattice value X & ~mask == value & ~mask.  The
169        zero bits in the mask cover constant values.  The ones mean no
170        information.  */
171     widest_int mask;
172 };
173 
174 class ccp_propagate : public ssa_propagation_engine
175 {
176  public:
177   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
178   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
179 };
180 
181 /* Array of propagated constant values.  After propagation,
182    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
183    the constant is held in an SSA name representing a memory store
184    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
185    memory reference used to store (i.e., the LHS of the assignment
186    doing the store).  */
187 static ccp_prop_value_t *const_val;
188 static unsigned n_const_val;
189 
190 static void canonicalize_value (ccp_prop_value_t *);
191 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
192 
193 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
194 
195 static void
dump_lattice_value(FILE * outf,const char * prefix,ccp_prop_value_t val)196 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
197 {
198   switch (val.lattice_val)
199     {
200     case UNINITIALIZED:
201       fprintf (outf, "%sUNINITIALIZED", prefix);
202       break;
203     case UNDEFINED:
204       fprintf (outf, "%sUNDEFINED", prefix);
205       break;
206     case VARYING:
207       fprintf (outf, "%sVARYING", prefix);
208       break;
209     case CONSTANT:
210       if (TREE_CODE (val.value) != INTEGER_CST
211 	  || val.mask == 0)
212 	{
213 	  fprintf (outf, "%sCONSTANT ", prefix);
214 	  print_generic_expr (outf, val.value, dump_flags);
215 	}
216       else
217 	{
218 	  widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
219 					     val.mask);
220 	  fprintf (outf, "%sCONSTANT ", prefix);
221 	  print_hex (cval, outf);
222 	  fprintf (outf, " (");
223 	  print_hex (val.mask, outf);
224 	  fprintf (outf, ")");
225 	}
226       break;
227     default:
228       gcc_unreachable ();
229     }
230 }
231 
232 
233 /* Print lattice value VAL to stderr.  */
234 
235 void debug_lattice_value (ccp_prop_value_t val);
236 
237 DEBUG_FUNCTION void
debug_lattice_value(ccp_prop_value_t val)238 debug_lattice_value (ccp_prop_value_t val)
239 {
240   dump_lattice_value (stderr, "", val);
241   fprintf (stderr, "\n");
242 }
243 
244 /* Extend NONZERO_BITS to a full mask, based on sgn.  */
245 
246 static widest_int
extend_mask(const wide_int & nonzero_bits,signop sgn)247 extend_mask (const wide_int &nonzero_bits, signop sgn)
248 {
249   return widest_int::from (nonzero_bits, sgn);
250 }
251 
252 /* Compute a default value for variable VAR and store it in the
253    CONST_VAL array.  The following rules are used to get default
254    values:
255 
256    1- Global and static variables that are declared constant are
257       considered CONSTANT.
258 
259    2- Any other value is considered UNDEFINED.  This is useful when
260       considering PHI nodes.  PHI arguments that are undefined do not
261       change the constant value of the PHI node, which allows for more
262       constants to be propagated.
263 
264    3- Variables defined by statements other than assignments and PHI
265       nodes are considered VARYING.
266 
267    4- Initial values of variables that are not GIMPLE registers are
268       considered VARYING.  */
269 
270 static ccp_prop_value_t
get_default_value(tree var)271 get_default_value (tree var)
272 {
273   ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
274   gimple *stmt;
275 
276   stmt = SSA_NAME_DEF_STMT (var);
277 
278   if (gimple_nop_p (stmt))
279     {
280       /* Variables defined by an empty statement are those used
281 	 before being initialized.  If VAR is a local variable, we
282 	 can assume initially that it is UNDEFINED, otherwise we must
283 	 consider it VARYING.  */
284       if (!virtual_operand_p (var)
285 	  && SSA_NAME_VAR (var)
286 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
287 	val.lattice_val = UNDEFINED;
288       else
289 	{
290 	  val.lattice_val = VARYING;
291 	  val.mask = -1;
292 	  if (flag_tree_bit_ccp)
293 	    {
294 	      wide_int nonzero_bits = get_nonzero_bits (var);
295 	      if (nonzero_bits != -1)
296 		{
297 		  val.lattice_val = CONSTANT;
298 		  val.value = build_zero_cst (TREE_TYPE (var));
299 		  val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
300 		}
301 	    }
302 	}
303     }
304   else if (is_gimple_assign (stmt))
305     {
306       tree cst;
307       if (gimple_assign_single_p (stmt)
308 	  && DECL_P (gimple_assign_rhs1 (stmt))
309 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
310 	{
311 	  val.lattice_val = CONSTANT;
312 	  val.value = cst;
313 	}
314       else
315 	{
316 	  /* Any other variable defined by an assignment is considered
317 	     UNDEFINED.  */
318 	  val.lattice_val = UNDEFINED;
319 	}
320     }
321   else if ((is_gimple_call (stmt)
322 	    && gimple_call_lhs (stmt) != NULL_TREE)
323 	   || gimple_code (stmt) == GIMPLE_PHI)
324     {
325       /* A variable defined by a call or a PHI node is considered
326 	 UNDEFINED.  */
327       val.lattice_val = UNDEFINED;
328     }
329   else
330     {
331       /* Otherwise, VAR will never take on a constant value.  */
332       val.lattice_val = VARYING;
333       val.mask = -1;
334     }
335 
336   return val;
337 }
338 
339 
340 /* Get the constant value associated with variable VAR.  */
341 
342 static inline ccp_prop_value_t *
get_value(tree var)343 get_value (tree var)
344 {
345   ccp_prop_value_t *val;
346 
347   if (const_val == NULL
348       || SSA_NAME_VERSION (var) >= n_const_val)
349     return NULL;
350 
351   val = &const_val[SSA_NAME_VERSION (var)];
352   if (val->lattice_val == UNINITIALIZED)
353     *val = get_default_value (var);
354 
355   canonicalize_value (val);
356 
357   return val;
358 }
359 
360 /* Return the constant tree value associated with VAR.  */
361 
362 static inline tree
get_constant_value(tree var)363 get_constant_value (tree var)
364 {
365   ccp_prop_value_t *val;
366   if (TREE_CODE (var) != SSA_NAME)
367     {
368       if (is_gimple_min_invariant (var))
369         return var;
370       return NULL_TREE;
371     }
372   val = get_value (var);
373   if (val
374       && val->lattice_val == CONSTANT
375       && (TREE_CODE (val->value) != INTEGER_CST
376 	  || val->mask == 0))
377     return val->value;
378   return NULL_TREE;
379 }
380 
381 /* Sets the value associated with VAR to VARYING.  */
382 
383 static inline void
set_value_varying(tree var)384 set_value_varying (tree var)
385 {
386   ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
387 
388   val->lattice_val = VARYING;
389   val->value = NULL_TREE;
390   val->mask = -1;
391 }
392 
393 /* For integer constants, make sure to drop TREE_OVERFLOW.  */
394 
395 static void
canonicalize_value(ccp_prop_value_t * val)396 canonicalize_value (ccp_prop_value_t *val)
397 {
398   if (val->lattice_val != CONSTANT)
399     return;
400 
401   if (TREE_OVERFLOW_P (val->value))
402     val->value = drop_tree_overflow (val->value);
403 }
404 
405 /* Return whether the lattice transition is valid.  */
406 
407 static bool
valid_lattice_transition(ccp_prop_value_t old_val,ccp_prop_value_t new_val)408 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
409 {
410   /* Lattice transitions must always be monotonically increasing in
411      value.  */
412   if (old_val.lattice_val < new_val.lattice_val)
413     return true;
414 
415   if (old_val.lattice_val != new_val.lattice_val)
416     return false;
417 
418   if (!old_val.value && !new_val.value)
419     return true;
420 
421   /* Now both lattice values are CONSTANT.  */
422 
423   /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
424      when only a single copy edge is executable.  */
425   if (TREE_CODE (old_val.value) == SSA_NAME
426       && TREE_CODE (new_val.value) == SSA_NAME)
427     return true;
428 
429   /* Allow transitioning from a constant to a copy.  */
430   if (is_gimple_min_invariant (old_val.value)
431       && TREE_CODE (new_val.value) == SSA_NAME)
432     return true;
433 
434   /* Allow transitioning from PHI <&x, not executable> == &x
435      to PHI <&x, &y> == common alignment.  */
436   if (TREE_CODE (old_val.value) != INTEGER_CST
437       && TREE_CODE (new_val.value) == INTEGER_CST)
438     return true;
439 
440   /* Bit-lattices have to agree in the still valid bits.  */
441   if (TREE_CODE (old_val.value) == INTEGER_CST
442       && TREE_CODE (new_val.value) == INTEGER_CST)
443     return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
444 	    == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
445 
446   /* Otherwise constant values have to agree.  */
447   if (operand_equal_p (old_val.value, new_val.value, 0))
448     return true;
449 
450   /* At least the kinds and types should agree now.  */
451   if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
452       || !types_compatible_p (TREE_TYPE (old_val.value),
453 			      TREE_TYPE (new_val.value)))
454     return false;
455 
456   /* For floats and !HONOR_NANS allow transitions from (partial) NaN
457      to non-NaN.  */
458   tree type = TREE_TYPE (new_val.value);
459   if (SCALAR_FLOAT_TYPE_P (type)
460       && !HONOR_NANS (type))
461     {
462       if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
463 	return true;
464     }
465   else if (VECTOR_FLOAT_TYPE_P (type)
466 	   && !HONOR_NANS (type))
467     {
468       unsigned int count
469 	= tree_vector_builder::binary_encoded_nelts (old_val.value,
470 						     new_val.value);
471       for (unsigned int i = 0; i < count; ++i)
472 	if (!REAL_VALUE_ISNAN
473 	       (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
474 	    && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
475 				 VECTOR_CST_ENCODED_ELT (new_val.value, i), 0))
476 	  return false;
477       return true;
478     }
479   else if (COMPLEX_FLOAT_TYPE_P (type)
480 	   && !HONOR_NANS (type))
481     {
482       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
483 	  && !operand_equal_p (TREE_REALPART (old_val.value),
484 			       TREE_REALPART (new_val.value), 0))
485 	return false;
486       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
487 	  && !operand_equal_p (TREE_IMAGPART (old_val.value),
488 			       TREE_IMAGPART (new_val.value), 0))
489 	return false;
490       return true;
491     }
492   return false;
493 }
494 
495 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
496    value is different from VAR's previous value.  */
497 
498 static bool
set_lattice_value(tree var,ccp_prop_value_t * new_val)499 set_lattice_value (tree var, ccp_prop_value_t *new_val)
500 {
501   /* We can deal with old UNINITIALIZED values just fine here.  */
502   ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
503 
504   canonicalize_value (new_val);
505 
506   /* We have to be careful to not go up the bitwise lattice
507      represented by the mask.  Instead of dropping to VARYING
508      use the meet operator to retain a conservative value.
509      Missed optimizations like PR65851 makes this necessary.
510      It also ensures we converge to a stable lattice solution.  */
511   if (old_val->lattice_val != UNINITIALIZED)
512     ccp_lattice_meet (new_val, old_val);
513 
514   gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
515 
516   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
517      caller that this was a non-transition.  */
518   if (old_val->lattice_val != new_val->lattice_val
519       || (new_val->lattice_val == CONSTANT
520 	  && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
521 	      || (TREE_CODE (new_val->value) == INTEGER_CST
522 		  && (new_val->mask != old_val->mask
523 		      || (wi::bit_and_not (wi::to_widest (old_val->value),
524 					   new_val->mask)
525 			  != wi::bit_and_not (wi::to_widest (new_val->value),
526 					      new_val->mask))))
527 	      || (TREE_CODE (new_val->value) != INTEGER_CST
528 		  && !operand_equal_p (new_val->value, old_val->value, 0)))))
529     {
530       /* ???  We would like to delay creation of INTEGER_CSTs from
531 	 partially constants here.  */
532 
533       if (dump_file && (dump_flags & TDF_DETAILS))
534 	{
535 	  dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
536 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
537 	}
538 
539       *old_val = *new_val;
540 
541       gcc_assert (new_val->lattice_val != UNINITIALIZED);
542       return true;
543     }
544 
545   return false;
546 }
547 
548 static ccp_prop_value_t get_value_for_expr (tree, bool);
549 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
550 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
551 		      signop, int, const widest_int &, const widest_int &,
552 		      signop, int, const widest_int &, const widest_int &);
553 
554 /* Return a widest_int that can be used for bitwise simplifications
555    from VAL.  */
556 
557 static widest_int
value_to_wide_int(ccp_prop_value_t val)558 value_to_wide_int (ccp_prop_value_t val)
559 {
560   if (val.value
561       && TREE_CODE (val.value) == INTEGER_CST)
562     return wi::to_widest (val.value);
563 
564   return 0;
565 }
566 
567 /* Return the value for the address expression EXPR based on alignment
568    information.  */
569 
570 static ccp_prop_value_t
get_value_from_alignment(tree expr)571 get_value_from_alignment (tree expr)
572 {
573   tree type = TREE_TYPE (expr);
574   ccp_prop_value_t val;
575   unsigned HOST_WIDE_INT bitpos;
576   unsigned int align;
577 
578   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
579 
580   get_pointer_alignment_1 (expr, &align, &bitpos);
581   val.mask = wi::bit_and_not
582     (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
583      ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
584      : -1,
585      align / BITS_PER_UNIT - 1);
586   val.lattice_val
587     = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
588   if (val.lattice_val == CONSTANT)
589     val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
590   else
591     val.value = NULL_TREE;
592 
593   return val;
594 }
595 
596 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
597    return constant bits extracted from alignment information for
598    invariant addresses.  */
599 
600 static ccp_prop_value_t
get_value_for_expr(tree expr,bool for_bits_p)601 get_value_for_expr (tree expr, bool for_bits_p)
602 {
603   ccp_prop_value_t val;
604 
605   if (TREE_CODE (expr) == SSA_NAME)
606     {
607       ccp_prop_value_t *val_ = get_value (expr);
608       if (val_)
609 	val = *val_;
610       else
611 	{
612 	  val.lattice_val = VARYING;
613 	  val.value = NULL_TREE;
614 	  val.mask = -1;
615 	}
616       if (for_bits_p
617 	  && val.lattice_val == CONSTANT
618 	  && TREE_CODE (val.value) == ADDR_EXPR)
619 	val = get_value_from_alignment (val.value);
620       /* Fall back to a copy value.  */
621       if (!for_bits_p
622 	  && val.lattice_val == VARYING
623 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
624 	{
625 	  val.lattice_val = CONSTANT;
626 	  val.value = expr;
627 	  val.mask = -1;
628 	}
629     }
630   else if (is_gimple_min_invariant (expr)
631 	   && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
632     {
633       val.lattice_val = CONSTANT;
634       val.value = expr;
635       val.mask = 0;
636       canonicalize_value (&val);
637     }
638   else if (TREE_CODE (expr) == ADDR_EXPR)
639     val = get_value_from_alignment (expr);
640   else
641     {
642       val.lattice_val = VARYING;
643       val.mask = -1;
644       val.value = NULL_TREE;
645     }
646 
647   if (val.lattice_val == VARYING
648       && TYPE_UNSIGNED (TREE_TYPE (expr)))
649     val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
650 
651   return val;
652 }
653 
654 /* Return the likely CCP lattice value for STMT.
655 
656    If STMT has no operands, then return CONSTANT.
657 
658    Else if undefinedness of operands of STMT cause its value to be
659    undefined, then return UNDEFINED.
660 
661    Else if any operands of STMT are constants, then return CONSTANT.
662 
663    Else return VARYING.  */
664 
665 static ccp_lattice_t
likely_value(gimple * stmt)666 likely_value (gimple *stmt)
667 {
668   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
669   bool has_nsa_operand;
670   tree use;
671   ssa_op_iter iter;
672   unsigned i;
673 
674   enum gimple_code code = gimple_code (stmt);
675 
676   /* This function appears to be called only for assignments, calls,
677      conditionals, and switches, due to the logic in visit_stmt.  */
678   gcc_assert (code == GIMPLE_ASSIGN
679               || code == GIMPLE_CALL
680               || code == GIMPLE_COND
681               || code == GIMPLE_SWITCH);
682 
683   /* If the statement has volatile operands, it won't fold to a
684      constant value.  */
685   if (gimple_has_volatile_ops (stmt))
686     return VARYING;
687 
688   /* Arrive here for more complex cases.  */
689   has_constant_operand = false;
690   has_undefined_operand = false;
691   all_undefined_operands = true;
692   has_nsa_operand = false;
693   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
694     {
695       ccp_prop_value_t *val = get_value (use);
696 
697       if (val && val->lattice_val == UNDEFINED)
698 	has_undefined_operand = true;
699       else
700 	all_undefined_operands = false;
701 
702       if (val && val->lattice_val == CONSTANT)
703 	has_constant_operand = true;
704 
705       if (SSA_NAME_IS_DEFAULT_DEF (use)
706 	  || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
707 	has_nsa_operand = true;
708     }
709 
710   /* There may be constants in regular rhs operands.  For calls we
711      have to ignore lhs, fndecl and static chain, otherwise only
712      the lhs.  */
713   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
714        i < gimple_num_ops (stmt); ++i)
715     {
716       tree op = gimple_op (stmt, i);
717       if (!op || TREE_CODE (op) == SSA_NAME)
718 	continue;
719       if (is_gimple_min_invariant (op))
720 	has_constant_operand = true;
721     }
722 
723   if (has_constant_operand)
724     all_undefined_operands = false;
725 
726   if (has_undefined_operand
727       && code == GIMPLE_CALL
728       && gimple_call_internal_p (stmt))
729     switch (gimple_call_internal_fn (stmt))
730       {
731 	/* These 3 builtins use the first argument just as a magic
732 	   way how to find out a decl uid.  */
733       case IFN_GOMP_SIMD_LANE:
734       case IFN_GOMP_SIMD_VF:
735       case IFN_GOMP_SIMD_LAST_LANE:
736 	has_undefined_operand = false;
737 	break;
738       default:
739 	break;
740       }
741 
742   /* If the operation combines operands like COMPLEX_EXPR make sure to
743      not mark the result UNDEFINED if only one part of the result is
744      undefined.  */
745   if (has_undefined_operand && all_undefined_operands)
746     return UNDEFINED;
747   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
748     {
749       switch (gimple_assign_rhs_code (stmt))
750 	{
751 	/* Unary operators are handled with all_undefined_operands.  */
752 	case PLUS_EXPR:
753 	case MINUS_EXPR:
754 	case POINTER_PLUS_EXPR:
755 	case BIT_XOR_EXPR:
756 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
757 	     Not bitwise operators, one VARYING operand may specify the
758 	     result completely.
759 	     Not logical operators for the same reason, apart from XOR.
760 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
761 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
762 	     the undefined operand may be promoted.  */
763 	  return UNDEFINED;
764 
765 	case ADDR_EXPR:
766 	  /* If any part of an address is UNDEFINED, like the index
767 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
768 	  return UNDEFINED;
769 
770 	default:
771 	  ;
772 	}
773     }
774   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
775      fall back to CONSTANT.  During iteration UNDEFINED may still drop
776      to CONSTANT.  */
777   if (has_undefined_operand)
778     return CONSTANT;
779 
780   /* We do not consider virtual operands here -- load from read-only
781      memory may have only VARYING virtual operands, but still be
782      constant.  Also we can combine the stmt with definitions from
783      operands whose definitions are not simulated again.  */
784   if (has_constant_operand
785       || has_nsa_operand
786       || gimple_references_memory_p (stmt))
787     return CONSTANT;
788 
789   return VARYING;
790 }
791 
792 /* Returns true if STMT cannot be constant.  */
793 
794 static bool
surely_varying_stmt_p(gimple * stmt)795 surely_varying_stmt_p (gimple *stmt)
796 {
797   /* If the statement has operands that we cannot handle, it cannot be
798      constant.  */
799   if (gimple_has_volatile_ops (stmt))
800     return true;
801 
802   /* If it is a call and does not return a value or is not a
803      builtin and not an indirect call or a call to function with
804      assume_aligned/alloc_align attribute, it is varying.  */
805   if (is_gimple_call (stmt))
806     {
807       tree fndecl, fntype = gimple_call_fntype (stmt);
808       if (!gimple_call_lhs (stmt)
809 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
810 	      && !fndecl_built_in_p (fndecl)
811 	      && !lookup_attribute ("assume_aligned",
812 				    TYPE_ATTRIBUTES (fntype))
813 	      && !lookup_attribute ("alloc_align",
814 				    TYPE_ATTRIBUTES (fntype))))
815 	return true;
816     }
817 
818   /* Any other store operation is not interesting.  */
819   else if (gimple_vdef (stmt))
820     return true;
821 
822   /* Anything other than assignments and conditional jumps are not
823      interesting for CCP.  */
824   if (gimple_code (stmt) != GIMPLE_ASSIGN
825       && gimple_code (stmt) != GIMPLE_COND
826       && gimple_code (stmt) != GIMPLE_SWITCH
827       && gimple_code (stmt) != GIMPLE_CALL)
828     return true;
829 
830   return false;
831 }
832 
833 /* Initialize local data structures for CCP.  */
834 
835 static void
ccp_initialize(void)836 ccp_initialize (void)
837 {
838   basic_block bb;
839 
840   n_const_val = num_ssa_names;
841   const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
842 
843   /* Initialize simulation flags for PHI nodes and statements.  */
844   FOR_EACH_BB_FN (bb, cfun)
845     {
846       gimple_stmt_iterator i;
847 
848       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
849         {
850 	  gimple *stmt = gsi_stmt (i);
851 	  bool is_varying;
852 
853 	  /* If the statement is a control insn, then we do not
854 	     want to avoid simulating the statement once.  Failure
855 	     to do so means that those edges will never get added.  */
856 	  if (stmt_ends_bb_p (stmt))
857 	    is_varying = false;
858 	  else
859 	    is_varying = surely_varying_stmt_p (stmt);
860 
861 	  if (is_varying)
862 	    {
863 	      tree def;
864 	      ssa_op_iter iter;
865 
866 	      /* If the statement will not produce a constant, mark
867 		 all its outputs VARYING.  */
868 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
869 		set_value_varying (def);
870 	    }
871           prop_set_simulate_again (stmt, !is_varying);
872 	}
873     }
874 
875   /* Now process PHI nodes.  We never clear the simulate_again flag on
876      phi nodes, since we do not know which edges are executable yet,
877      except for phi nodes for virtual operands when we do not do store ccp.  */
878   FOR_EACH_BB_FN (bb, cfun)
879     {
880       gphi_iterator i;
881 
882       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
883         {
884           gphi *phi = i.phi ();
885 
886 	  if (virtual_operand_p (gimple_phi_result (phi)))
887             prop_set_simulate_again (phi, false);
888 	  else
889             prop_set_simulate_again (phi, true);
890 	}
891     }
892 }
893 
894 /* Debug count support. Reset the values of ssa names
895    VARYING when the total number ssa names analyzed is
896    beyond the debug count specified.  */
897 
898 static void
do_dbg_cnt(void)899 do_dbg_cnt (void)
900 {
901   unsigned i;
902   for (i = 0; i < num_ssa_names; i++)
903     {
904       if (!dbg_cnt (ccp))
905         {
906           const_val[i].lattice_val = VARYING;
907 	  const_val[i].mask = -1;
908           const_val[i].value = NULL_TREE;
909         }
910     }
911 }
912 
913 
914 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods.  */
915 class ccp_folder : public substitute_and_fold_engine
916 {
917  public:
918   tree get_value (tree) FINAL OVERRIDE;
919   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
920 };
921 
922 /* This method just wraps GET_CONSTANT_VALUE for now.  Over time
923    naked calls to GET_CONSTANT_VALUE should be eliminated in favor
924    of calling member functions.  */
925 
926 tree
get_value(tree op)927 ccp_folder::get_value (tree op)
928 {
929   return get_constant_value (op);
930 }
931 
932 /* Do final substitution of propagated values, cleanup the flowgraph and
933    free allocated storage.  If NONZERO_P, record nonzero bits.
934 
935    Return TRUE when something was optimized.  */
936 
937 static bool
ccp_finalize(bool nonzero_p)938 ccp_finalize (bool nonzero_p)
939 {
940   bool something_changed;
941   unsigned i;
942   tree name;
943 
944   do_dbg_cnt ();
945 
946   /* Derive alignment and misalignment information from partially
947      constant pointers in the lattice or nonzero bits from partially
948      constant integers.  */
949   FOR_EACH_SSA_NAME (i, name, cfun)
950     {
951       ccp_prop_value_t *val;
952       unsigned int tem, align;
953 
954       if (!POINTER_TYPE_P (TREE_TYPE (name))
955 	  && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
956 	      /* Don't record nonzero bits before IPA to avoid
957 		 using too much memory.  */
958 	      || !nonzero_p))
959 	continue;
960 
961       val = get_value (name);
962       if (val->lattice_val != CONSTANT
963 	  || TREE_CODE (val->value) != INTEGER_CST
964 	  || val->mask == 0)
965 	continue;
966 
967       if (POINTER_TYPE_P (TREE_TYPE (name)))
968 	{
969 	  /* Trailing mask bits specify the alignment, trailing value
970 	     bits the misalignment.  */
971 	  tem = val->mask.to_uhwi ();
972 	  align = least_bit_hwi (tem);
973 	  if (align > 1)
974 	    set_ptr_info_alignment (get_ptr_info (name), align,
975 				    (TREE_INT_CST_LOW (val->value)
976 				     & (align - 1)));
977 	}
978       else
979 	{
980 	  unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
981 	  wide_int nonzero_bits
982 	    = (wide_int::from (val->mask, precision, UNSIGNED)
983 	       | wi::to_wide (val->value));
984 	  nonzero_bits &= get_nonzero_bits (name);
985 	  set_nonzero_bits (name, nonzero_bits);
986 	}
987     }
988 
989   /* Perform substitutions based on the known constant values.  */
990   class ccp_folder ccp_folder;
991   something_changed = ccp_folder.substitute_and_fold ();
992 
993   free (const_val);
994   const_val = NULL;
995   return something_changed;
996 }
997 
998 
999 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
1000    in VAL1.
1001 
1002    		any  M UNDEFINED   = any
1003 		any  M VARYING     = VARYING
1004 		Ci   M Cj	   = Ci		if (i == j)
1005 		Ci   M Cj	   = VARYING	if (i != j)
1006    */
1007 
1008 static void
ccp_lattice_meet(ccp_prop_value_t * val1,ccp_prop_value_t * val2)1009 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1010 {
1011   if (val1->lattice_val == UNDEFINED
1012       /* For UNDEFINED M SSA we can't always SSA because its definition
1013          may not dominate the PHI node.  Doing optimistic copy propagation
1014 	 also causes a lot of gcc.dg/uninit-pred*.c FAILs.  */
1015       && (val2->lattice_val != CONSTANT
1016 	  || TREE_CODE (val2->value) != SSA_NAME))
1017     {
1018       /* UNDEFINED M any = any   */
1019       *val1 = *val2;
1020     }
1021   else if (val2->lattice_val == UNDEFINED
1022 	   /* See above.  */
1023 	   && (val1->lattice_val != CONSTANT
1024 	       || TREE_CODE (val1->value) != SSA_NAME))
1025     {
1026       /* any M UNDEFINED = any
1027          Nothing to do.  VAL1 already contains the value we want.  */
1028       ;
1029     }
1030   else if (val1->lattice_val == VARYING
1031            || val2->lattice_val == VARYING)
1032     {
1033       /* any M VARYING = VARYING.  */
1034       val1->lattice_val = VARYING;
1035       val1->mask = -1;
1036       val1->value = NULL_TREE;
1037     }
1038   else if (val1->lattice_val == CONSTANT
1039 	   && val2->lattice_val == CONSTANT
1040 	   && TREE_CODE (val1->value) == INTEGER_CST
1041 	   && TREE_CODE (val2->value) == INTEGER_CST)
1042     {
1043       /* Ci M Cj = Ci		if (i == j)
1044 	 Ci M Cj = VARYING	if (i != j)
1045 
1046          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
1047 	 drop to varying.  */
1048       val1->mask = (val1->mask | val2->mask
1049 		    | (wi::to_widest (val1->value)
1050 		       ^ wi::to_widest (val2->value)));
1051       if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1052 	{
1053 	  val1->lattice_val = VARYING;
1054 	  val1->value = NULL_TREE;
1055 	}
1056     }
1057   else if (val1->lattice_val == CONSTANT
1058 	   && val2->lattice_val == CONSTANT
1059 	   && operand_equal_p (val1->value, val2->value, 0))
1060     {
1061       /* Ci M Cj = Ci		if (i == j)
1062 	 Ci M Cj = VARYING	if (i != j)
1063 
1064          VAL1 already contains the value we want for equivalent values.  */
1065     }
1066   else if (val1->lattice_val == CONSTANT
1067 	   && val2->lattice_val == CONSTANT
1068 	   && (TREE_CODE (val1->value) == ADDR_EXPR
1069 	       || TREE_CODE (val2->value) == ADDR_EXPR))
1070     {
1071       /* When not equal addresses are involved try meeting for
1072 	 alignment.  */
1073       ccp_prop_value_t tem = *val2;
1074       if (TREE_CODE (val1->value) == ADDR_EXPR)
1075 	*val1 = get_value_for_expr (val1->value, true);
1076       if (TREE_CODE (val2->value) == ADDR_EXPR)
1077 	tem = get_value_for_expr (val2->value, true);
1078       ccp_lattice_meet (val1, &tem);
1079     }
1080   else
1081     {
1082       /* Any other combination is VARYING.  */
1083       val1->lattice_val = VARYING;
1084       val1->mask = -1;
1085       val1->value = NULL_TREE;
1086     }
1087 }
1088 
1089 
1090 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1091    lattice values to determine PHI_NODE's lattice value.  The value of a
1092    PHI node is determined calling ccp_lattice_meet with all the arguments
1093    of the PHI node that are incoming via executable edges.  */
1094 
1095 enum ssa_prop_result
visit_phi(gphi * phi)1096 ccp_propagate::visit_phi (gphi *phi)
1097 {
1098   unsigned i;
1099   ccp_prop_value_t new_val;
1100 
1101   if (dump_file && (dump_flags & TDF_DETAILS))
1102     {
1103       fprintf (dump_file, "\nVisiting PHI node: ");
1104       print_gimple_stmt (dump_file, phi, 0, dump_flags);
1105     }
1106 
1107   new_val.lattice_val = UNDEFINED;
1108   new_val.value = NULL_TREE;
1109   new_val.mask = 0;
1110 
1111   bool first = true;
1112   bool non_exec_edge = false;
1113   for (i = 0; i < gimple_phi_num_args (phi); i++)
1114     {
1115       /* Compute the meet operator over all the PHI arguments flowing
1116 	 through executable edges.  */
1117       edge e = gimple_phi_arg_edge (phi, i);
1118 
1119       if (dump_file && (dump_flags & TDF_DETAILS))
1120 	{
1121 	  fprintf (dump_file,
1122 	      "\tArgument #%d (%d -> %d %sexecutable)\n",
1123 	      i, e->src->index, e->dest->index,
1124 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1125 	}
1126 
1127       /* If the incoming edge is executable, Compute the meet operator for
1128 	 the existing value of the PHI node and the current PHI argument.  */
1129       if (e->flags & EDGE_EXECUTABLE)
1130 	{
1131 	  tree arg = gimple_phi_arg (phi, i)->def;
1132 	  ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1133 
1134 	  if (first)
1135 	    {
1136 	      new_val = arg_val;
1137 	      first = false;
1138 	    }
1139 	  else
1140 	    ccp_lattice_meet (&new_val, &arg_val);
1141 
1142 	  if (dump_file && (dump_flags & TDF_DETAILS))
1143 	    {
1144 	      fprintf (dump_file, "\t");
1145 	      print_generic_expr (dump_file, arg, dump_flags);
1146 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
1147 	      fprintf (dump_file, "\n");
1148 	    }
1149 
1150 	  if (new_val.lattice_val == VARYING)
1151 	    break;
1152 	}
1153       else
1154 	non_exec_edge = true;
1155     }
1156 
1157   /* In case there were non-executable edges and the value is a copy
1158      make sure its definition dominates the PHI node.  */
1159   if (non_exec_edge
1160       && new_val.lattice_val == CONSTANT
1161       && TREE_CODE (new_val.value) == SSA_NAME
1162       && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1163       && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1164 			   gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1165     {
1166       new_val.lattice_val = VARYING;
1167       new_val.value = NULL_TREE;
1168       new_val.mask = -1;
1169     }
1170 
1171   if (dump_file && (dump_flags & TDF_DETAILS))
1172     {
1173       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
1174       fprintf (dump_file, "\n\n");
1175     }
1176 
1177   /* Make the transition to the new value.  */
1178   if (set_lattice_value (gimple_phi_result (phi), &new_val))
1179     {
1180       if (new_val.lattice_val == VARYING)
1181 	return SSA_PROP_VARYING;
1182       else
1183 	return SSA_PROP_INTERESTING;
1184     }
1185   else
1186     return SSA_PROP_NOT_INTERESTING;
1187 }
1188 
1189 /* Return the constant value for OP or OP otherwise.  */
1190 
1191 static tree
valueize_op(tree op)1192 valueize_op (tree op)
1193 {
1194   if (TREE_CODE (op) == SSA_NAME)
1195     {
1196       tree tem = get_constant_value (op);
1197       if (tem)
1198 	return tem;
1199     }
1200   return op;
1201 }
1202 
1203 /* Return the constant value for OP, but signal to not follow SSA
1204    edges if the definition may be simulated again.  */
1205 
1206 static tree
valueize_op_1(tree op)1207 valueize_op_1 (tree op)
1208 {
1209   if (TREE_CODE (op) == SSA_NAME)
1210     {
1211       /* If the definition may be simulated again we cannot follow
1212          this SSA edge as the SSA propagator does not necessarily
1213 	 re-visit the use.  */
1214       gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1215       if (!gimple_nop_p (def_stmt)
1216 	  && prop_simulate_again_p (def_stmt))
1217 	return NULL_TREE;
1218       tree tem = get_constant_value (op);
1219       if (tem)
1220 	return tem;
1221     }
1222   return op;
1223 }
1224 
1225 /* CCP specific front-end to the non-destructive constant folding
1226    routines.
1227 
1228    Attempt to simplify the RHS of STMT knowing that one or more
1229    operands are constants.
1230 
1231    If simplification is possible, return the simplified RHS,
1232    otherwise return the original RHS or NULL_TREE.  */
1233 
1234 static tree
ccp_fold(gimple * stmt)1235 ccp_fold (gimple *stmt)
1236 {
1237   location_t loc = gimple_location (stmt);
1238   switch (gimple_code (stmt))
1239     {
1240     case GIMPLE_COND:
1241       {
1242         /* Handle comparison operators that can appear in GIMPLE form.  */
1243         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1244         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1245         enum tree_code code = gimple_cond_code (stmt);
1246         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1247       }
1248 
1249     case GIMPLE_SWITCH:
1250       {
1251 	/* Return the constant switch index.  */
1252         return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1253       }
1254 
1255     case GIMPLE_ASSIGN:
1256     case GIMPLE_CALL:
1257       return gimple_fold_stmt_to_constant_1 (stmt,
1258 					     valueize_op, valueize_op_1);
1259 
1260     default:
1261       gcc_unreachable ();
1262     }
1263 }
1264 
1265 /* Apply the operation CODE in type TYPE to the value, mask pair
1266    RVAL and RMASK representing a value of type RTYPE and set
1267    the value, mask pair *VAL and *MASK to the result.  */
1268 
1269 void
bit_value_unop(enum tree_code code,signop type_sgn,int type_precision,widest_int * val,widest_int * mask,signop rtype_sgn,int rtype_precision,const widest_int & rval,const widest_int & rmask)1270 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1271 		widest_int *val, widest_int *mask,
1272 		signop rtype_sgn, int rtype_precision,
1273 		const widest_int &rval, const widest_int &rmask)
1274 {
1275   switch (code)
1276     {
1277     case BIT_NOT_EXPR:
1278       *mask = rmask;
1279       *val = ~rval;
1280       break;
1281 
1282     case NEGATE_EXPR:
1283       {
1284 	widest_int temv, temm;
1285 	/* Return ~rval + 1.  */
1286 	bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1287 			type_sgn, type_precision, rval, rmask);
1288 	bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1289 			 type_sgn, type_precision, temv, temm,
1290 			 type_sgn, type_precision, 1, 0);
1291 	break;
1292       }
1293 
1294     CASE_CONVERT:
1295       {
1296 	/* First extend mask and value according to the original type.  */
1297 	*mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1298 	*val = wi::ext (rval, rtype_precision, rtype_sgn);
1299 
1300 	/* Then extend mask and value according to the target type.  */
1301 	*mask = wi::ext (*mask, type_precision, type_sgn);
1302 	*val = wi::ext (*val, type_precision, type_sgn);
1303 	break;
1304       }
1305 
1306     default:
1307       *mask = -1;
1308       break;
1309     }
1310 }
1311 
1312 /* Apply the operation CODE in type TYPE to the value, mask pairs
1313    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1314    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1315 
1316 void
bit_value_binop(enum tree_code code,signop sgn,int width,widest_int * val,widest_int * mask,signop r1type_sgn,int r1type_precision,const widest_int & r1val,const widest_int & r1mask,signop r2type_sgn,int r2type_precision,const widest_int & r2val,const widest_int & r2mask)1317 bit_value_binop (enum tree_code code, signop sgn, int width,
1318 		 widest_int *val, widest_int *mask,
1319 		 signop r1type_sgn, int r1type_precision,
1320 		 const widest_int &r1val, const widest_int &r1mask,
1321 		 signop r2type_sgn, int r2type_precision,
1322 		 const widest_int &r2val, const widest_int &r2mask)
1323 {
1324   bool swap_p = false;
1325 
1326   /* Assume we'll get a constant result.  Use an initial non varying
1327      value, we fall back to varying in the end if necessary.  */
1328   *mask = -1;
1329 
1330   switch (code)
1331     {
1332     case BIT_AND_EXPR:
1333       /* The mask is constant where there is a known not
1334 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1335       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1336       *val = r1val & r2val;
1337       break;
1338 
1339     case BIT_IOR_EXPR:
1340       /* The mask is constant where there is a known
1341 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1342       *mask = wi::bit_and_not (r1mask | r2mask,
1343 			       wi::bit_and_not (r1val, r1mask)
1344 			       | wi::bit_and_not (r2val, r2mask));
1345       *val = r1val | r2val;
1346       break;
1347 
1348     case BIT_XOR_EXPR:
1349       /* m1 | m2  */
1350       *mask = r1mask | r2mask;
1351       *val = r1val ^ r2val;
1352       break;
1353 
1354     case LROTATE_EXPR:
1355     case RROTATE_EXPR:
1356       if (r2mask == 0)
1357 	{
1358 	  widest_int shift = r2val;
1359 	  if (shift == 0)
1360 	    {
1361 	      *mask = r1mask;
1362 	      *val = r1val;
1363 	    }
1364 	  else
1365 	    {
1366 	      if (wi::neg_p (shift))
1367 		{
1368 		  shift = -shift;
1369 		  if (code == RROTATE_EXPR)
1370 		    code = LROTATE_EXPR;
1371 		  else
1372 		    code = RROTATE_EXPR;
1373 		}
1374 	      if (code == RROTATE_EXPR)
1375 		{
1376 		  *mask = wi::rrotate (r1mask, shift, width);
1377 		  *val = wi::rrotate (r1val, shift, width);
1378 		}
1379 	      else
1380 		{
1381 		  *mask = wi::lrotate (r1mask, shift, width);
1382 		  *val = wi::lrotate (r1val, shift, width);
1383 		}
1384 	    }
1385 	}
1386       break;
1387 
1388     case LSHIFT_EXPR:
1389     case RSHIFT_EXPR:
1390       /* ???  We can handle partially known shift counts if we know
1391 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1392 	 is zero.  */
1393       if (r2mask == 0)
1394 	{
1395 	  widest_int shift = r2val;
1396 	  if (shift == 0)
1397 	    {
1398 	      *mask = r1mask;
1399 	      *val = r1val;
1400 	    }
1401 	  else
1402 	    {
1403 	      if (wi::neg_p (shift))
1404 		{
1405 		  shift = -shift;
1406 		  if (code == RSHIFT_EXPR)
1407 		    code = LSHIFT_EXPR;
1408 		  else
1409 		    code = RSHIFT_EXPR;
1410 		}
1411 	      if (code == RSHIFT_EXPR)
1412 		{
1413 		  *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1414 		  *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1415 		}
1416 	      else
1417 		{
1418 		  *mask = wi::ext (r1mask << shift, width, sgn);
1419 		  *val = wi::ext (r1val << shift, width, sgn);
1420 		}
1421 	    }
1422 	}
1423       break;
1424 
1425     case PLUS_EXPR:
1426     case POINTER_PLUS_EXPR:
1427       {
1428 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1429 	   zero wherever possible.  */
1430 	widest_int lo = (wi::bit_and_not (r1val, r1mask)
1431 			 + wi::bit_and_not (r2val, r2mask));
1432 	lo = wi::ext (lo, width, sgn);
1433 	/* Do the addition with unknown bits set to one, to give carry-ins of
1434 	   one wherever possible.  */
1435 	widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1436 	hi = wi::ext (hi, width, sgn);
1437 	/* Each bit in the result is known if (a) the corresponding bits in
1438 	   both inputs are known, and (b) the carry-in to that bit position
1439 	   is known.  We can check condition (b) by seeing if we got the same
1440 	   result with minimised carries as with maximised carries.  */
1441 	*mask = r1mask | r2mask | (lo ^ hi);
1442 	*mask = wi::ext (*mask, width, sgn);
1443 	/* It shouldn't matter whether we choose lo or hi here.  */
1444 	*val = lo;
1445 	break;
1446       }
1447 
1448     case MINUS_EXPR:
1449       {
1450 	widest_int temv, temm;
1451 	bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1452 			  r2type_sgn, r2type_precision, r2val, r2mask);
1453 	bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1454 			 r1type_sgn, r1type_precision, r1val, r1mask,
1455 			 r2type_sgn, r2type_precision, temv, temm);
1456 	break;
1457       }
1458 
1459     case MULT_EXPR:
1460       {
1461 	/* Just track trailing zeros in both operands and transfer
1462 	   them to the other.  */
1463 	int r1tz = wi::ctz (r1val | r1mask);
1464 	int r2tz = wi::ctz (r2val | r2mask);
1465 	if (r1tz + r2tz >= width)
1466 	  {
1467 	    *mask = 0;
1468 	    *val = 0;
1469 	  }
1470 	else if (r1tz + r2tz > 0)
1471 	  {
1472 	    *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1473 			     width, sgn);
1474 	    *val = 0;
1475 	  }
1476 	break;
1477       }
1478 
1479     case EQ_EXPR:
1480     case NE_EXPR:
1481       {
1482 	widest_int m = r1mask | r2mask;
1483 	if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1484 	  {
1485 	    *mask = 0;
1486 	    *val = ((code == EQ_EXPR) ? 0 : 1);
1487 	  }
1488 	else
1489 	  {
1490 	    /* We know the result of a comparison is always one or zero.  */
1491 	    *mask = 1;
1492 	    *val = 0;
1493 	  }
1494 	break;
1495       }
1496 
1497     case GE_EXPR:
1498     case GT_EXPR:
1499       swap_p = true;
1500       code = swap_tree_comparison (code);
1501       /* Fall through.  */
1502     case LT_EXPR:
1503     case LE_EXPR:
1504       {
1505 	int minmax, maxmin;
1506 
1507 	const widest_int &o1val = swap_p ? r2val : r1val;
1508 	const widest_int &o1mask = swap_p ? r2mask : r1mask;
1509 	const widest_int &o2val = swap_p ? r1val : r2val;
1510 	const widest_int &o2mask = swap_p ? r1mask : r2mask;
1511 
1512 	/* If the most significant bits are not known we know nothing.  */
1513 	if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1514 	  break;
1515 
1516 	/* For comparisons the signedness is in the comparison operands.  */
1517 	sgn = r1type_sgn;
1518 
1519 	/* If we know the most significant bits we know the values
1520 	   value ranges by means of treating varying bits as zero
1521 	   or one.  Do a cross comparison of the max/min pairs.  */
1522 	maxmin = wi::cmp (o1val | o1mask,
1523 			  wi::bit_and_not (o2val, o2mask), sgn);
1524 	minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
1525 			  o2val | o2mask, sgn);
1526 	if (maxmin < 0)  /* o1 is less than o2.  */
1527 	  {
1528 	    *mask = 0;
1529 	    *val = 1;
1530 	  }
1531 	else if (minmax > 0)  /* o1 is not less or equal to o2.  */
1532 	  {
1533 	    *mask = 0;
1534 	    *val = 0;
1535 	  }
1536 	else if (maxmin == minmax)  /* o1 and o2 are equal.  */
1537 	  {
1538 	    /* This probably should never happen as we'd have
1539 	       folded the thing during fully constant value folding.  */
1540 	    *mask = 0;
1541 	    *val = (code == LE_EXPR ? 1 : 0);
1542 	  }
1543 	else
1544 	  {
1545 	    /* We know the result of a comparison is always one or zero.  */
1546 	    *mask = 1;
1547 	    *val = 0;
1548 	  }
1549 	break;
1550       }
1551 
1552     default:;
1553     }
1554 }
1555 
1556 /* Return the propagation value when applying the operation CODE to
1557    the value RHS yielding type TYPE.  */
1558 
1559 static ccp_prop_value_t
bit_value_unop(enum tree_code code,tree type,tree rhs)1560 bit_value_unop (enum tree_code code, tree type, tree rhs)
1561 {
1562   ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1563   widest_int value, mask;
1564   ccp_prop_value_t val;
1565 
1566   if (rval.lattice_val == UNDEFINED)
1567     return rval;
1568 
1569   gcc_assert ((rval.lattice_val == CONSTANT
1570 	       && TREE_CODE (rval.value) == INTEGER_CST)
1571 	      || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1572   bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1573 		  TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1574 		  value_to_wide_int (rval), rval.mask);
1575   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1576     {
1577       val.lattice_val = CONSTANT;
1578       val.mask = mask;
1579       /* ???  Delay building trees here.  */
1580       val.value = wide_int_to_tree (type, value);
1581     }
1582   else
1583     {
1584       val.lattice_val = VARYING;
1585       val.value = NULL_TREE;
1586       val.mask = -1;
1587     }
1588   return val;
1589 }
1590 
1591 /* Return the propagation value when applying the operation CODE to
1592    the values RHS1 and RHS2 yielding type TYPE.  */
1593 
1594 static ccp_prop_value_t
bit_value_binop(enum tree_code code,tree type,tree rhs1,tree rhs2)1595 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1596 {
1597   ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1598   ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1599   widest_int value, mask;
1600   ccp_prop_value_t val;
1601 
1602   if (r1val.lattice_val == UNDEFINED
1603       || r2val.lattice_val == UNDEFINED)
1604     {
1605       val.lattice_val = VARYING;
1606       val.value = NULL_TREE;
1607       val.mask = -1;
1608       return val;
1609     }
1610 
1611   gcc_assert ((r1val.lattice_val == CONSTANT
1612 	       && TREE_CODE (r1val.value) == INTEGER_CST)
1613 	      || wi::sext (r1val.mask,
1614 			   TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1615   gcc_assert ((r2val.lattice_val == CONSTANT
1616 	       && TREE_CODE (r2val.value) == INTEGER_CST)
1617 	      || wi::sext (r2val.mask,
1618 			   TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1619   bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1620 		   TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1621 		   value_to_wide_int (r1val), r1val.mask,
1622 		   TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1623 		   value_to_wide_int (r2val), r2val.mask);
1624 
1625   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1626     {
1627       val.lattice_val = CONSTANT;
1628       val.mask = mask;
1629       /* ???  Delay building trees here.  */
1630       val.value = wide_int_to_tree (type, value);
1631     }
1632   else
1633     {
1634       val.lattice_val = VARYING;
1635       val.value = NULL_TREE;
1636       val.mask = -1;
1637     }
1638   return val;
1639 }
1640 
1641 /* Return the propagation value for __builtin_assume_aligned
1642    and functions with assume_aligned or alloc_aligned attribute.
1643    For __builtin_assume_aligned, ATTR is NULL_TREE,
1644    for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1645    is false, for alloc_aligned attribute ATTR is non-NULL and
1646    ALLOC_ALIGNED is true.  */
1647 
1648 static ccp_prop_value_t
bit_value_assume_aligned(gimple * stmt,tree attr,ccp_prop_value_t ptrval,bool alloc_aligned)1649 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1650 			  bool alloc_aligned)
1651 {
1652   tree align, misalign = NULL_TREE, type;
1653   unsigned HOST_WIDE_INT aligni, misaligni = 0;
1654   ccp_prop_value_t alignval;
1655   widest_int value, mask;
1656   ccp_prop_value_t val;
1657 
1658   if (attr == NULL_TREE)
1659     {
1660       tree ptr = gimple_call_arg (stmt, 0);
1661       type = TREE_TYPE (ptr);
1662       ptrval = get_value_for_expr (ptr, true);
1663     }
1664   else
1665     {
1666       tree lhs = gimple_call_lhs (stmt);
1667       type = TREE_TYPE (lhs);
1668     }
1669 
1670   if (ptrval.lattice_val == UNDEFINED)
1671     return ptrval;
1672   gcc_assert ((ptrval.lattice_val == CONSTANT
1673 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
1674 	      || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1675   if (attr == NULL_TREE)
1676     {
1677       /* Get aligni and misaligni from __builtin_assume_aligned.  */
1678       align = gimple_call_arg (stmt, 1);
1679       if (!tree_fits_uhwi_p (align))
1680 	return ptrval;
1681       aligni = tree_to_uhwi (align);
1682       if (gimple_call_num_args (stmt) > 2)
1683 	{
1684 	  misalign = gimple_call_arg (stmt, 2);
1685 	  if (!tree_fits_uhwi_p (misalign))
1686 	    return ptrval;
1687 	  misaligni = tree_to_uhwi (misalign);
1688 	}
1689     }
1690   else
1691     {
1692       /* Get aligni and misaligni from assume_aligned or
1693 	 alloc_align attributes.  */
1694       if (TREE_VALUE (attr) == NULL_TREE)
1695 	return ptrval;
1696       attr = TREE_VALUE (attr);
1697       align = TREE_VALUE (attr);
1698       if (!tree_fits_uhwi_p (align))
1699 	return ptrval;
1700       aligni = tree_to_uhwi (align);
1701       if (alloc_aligned)
1702 	{
1703 	  if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1704 	    return ptrval;
1705 	  align = gimple_call_arg (stmt, aligni - 1);
1706 	  if (!tree_fits_uhwi_p (align))
1707 	    return ptrval;
1708 	  aligni = tree_to_uhwi (align);
1709 	}
1710       else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1711 	{
1712 	  misalign = TREE_VALUE (TREE_CHAIN (attr));
1713 	  if (!tree_fits_uhwi_p (misalign))
1714 	    return ptrval;
1715 	  misaligni = tree_to_uhwi (misalign);
1716 	}
1717     }
1718   if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1719     return ptrval;
1720 
1721   align = build_int_cst_type (type, -aligni);
1722   alignval = get_value_for_expr (align, true);
1723   bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1724 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1725 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1726 
1727   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1728     {
1729       val.lattice_val = CONSTANT;
1730       val.mask = mask;
1731       gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1732       gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1733       value |= misaligni;
1734       /* ???  Delay building trees here.  */
1735       val.value = wide_int_to_tree (type, value);
1736     }
1737   else
1738     {
1739       val.lattice_val = VARYING;
1740       val.value = NULL_TREE;
1741       val.mask = -1;
1742     }
1743   return val;
1744 }
1745 
1746 /* Evaluate statement STMT.
1747    Valid only for assignments, calls, conditionals, and switches. */
1748 
1749 static ccp_prop_value_t
evaluate_stmt(gimple * stmt)1750 evaluate_stmt (gimple *stmt)
1751 {
1752   ccp_prop_value_t val;
1753   tree simplified = NULL_TREE;
1754   ccp_lattice_t likelyvalue = likely_value (stmt);
1755   bool is_constant = false;
1756   unsigned int align;
1757 
1758   if (dump_file && (dump_flags & TDF_DETAILS))
1759     {
1760       fprintf (dump_file, "which is likely ");
1761       switch (likelyvalue)
1762 	{
1763 	case CONSTANT:
1764 	  fprintf (dump_file, "CONSTANT");
1765 	  break;
1766 	case UNDEFINED:
1767 	  fprintf (dump_file, "UNDEFINED");
1768 	  break;
1769 	case VARYING:
1770 	  fprintf (dump_file, "VARYING");
1771 	  break;
1772 	default:;
1773 	}
1774       fprintf (dump_file, "\n");
1775     }
1776 
1777   /* If the statement is likely to have a CONSTANT result, then try
1778      to fold the statement to determine the constant value.  */
1779   /* FIXME.  This is the only place that we call ccp_fold.
1780      Since likely_value never returns CONSTANT for calls, we will
1781      not attempt to fold them, including builtins that may profit.  */
1782   if (likelyvalue == CONSTANT)
1783     {
1784       fold_defer_overflow_warnings ();
1785       simplified = ccp_fold (stmt);
1786       if (simplified
1787 	  && TREE_CODE (simplified) == SSA_NAME)
1788 	{
1789 	  /* We may not use values of something that may be simulated again,
1790 	     see valueize_op_1.  */
1791 	  if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1792 	      || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1793 	    {
1794 	      ccp_prop_value_t *val = get_value (simplified);
1795 	      if (val && val->lattice_val != VARYING)
1796 		{
1797 		  fold_undefer_overflow_warnings (true, stmt, 0);
1798 		  return *val;
1799 		}
1800 	    }
1801 	  else
1802 	    /* We may also not place a non-valueized copy in the lattice
1803 	       as that might become stale if we never re-visit this stmt.  */
1804 	    simplified = NULL_TREE;
1805 	}
1806       is_constant = simplified && is_gimple_min_invariant (simplified);
1807       fold_undefer_overflow_warnings (is_constant, stmt, 0);
1808       if (is_constant)
1809 	{
1810 	  /* The statement produced a constant value.  */
1811 	  val.lattice_val = CONSTANT;
1812 	  val.value = simplified;
1813 	  val.mask = 0;
1814 	  return val;
1815 	}
1816     }
1817   /* If the statement is likely to have a VARYING result, then do not
1818      bother folding the statement.  */
1819   else if (likelyvalue == VARYING)
1820     {
1821       enum gimple_code code = gimple_code (stmt);
1822       if (code == GIMPLE_ASSIGN)
1823         {
1824           enum tree_code subcode = gimple_assign_rhs_code (stmt);
1825 
1826           /* Other cases cannot satisfy is_gimple_min_invariant
1827              without folding.  */
1828           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1829             simplified = gimple_assign_rhs1 (stmt);
1830         }
1831       else if (code == GIMPLE_SWITCH)
1832         simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1833       else
1834 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
1835 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1836       is_constant = simplified && is_gimple_min_invariant (simplified);
1837       if (is_constant)
1838 	{
1839 	  /* The statement produced a constant value.  */
1840 	  val.lattice_val = CONSTANT;
1841 	  val.value = simplified;
1842 	  val.mask = 0;
1843 	}
1844     }
1845   /* If the statement result is likely UNDEFINED, make it so.  */
1846   else if (likelyvalue == UNDEFINED)
1847     {
1848       val.lattice_val = UNDEFINED;
1849       val.value = NULL_TREE;
1850       val.mask = 0;
1851       return val;
1852     }
1853 
1854   /* Resort to simplification for bitwise tracking.  */
1855   if (flag_tree_bit_ccp
1856       && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1857 	  || (gimple_assign_single_p (stmt)
1858 	      && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1859       && !is_constant)
1860     {
1861       enum gimple_code code = gimple_code (stmt);
1862       val.lattice_val = VARYING;
1863       val.value = NULL_TREE;
1864       val.mask = -1;
1865       if (code == GIMPLE_ASSIGN)
1866 	{
1867 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
1868 	  tree rhs1 = gimple_assign_rhs1 (stmt);
1869 	  tree lhs = gimple_assign_lhs (stmt);
1870 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1871 	       || POINTER_TYPE_P (TREE_TYPE (lhs)))
1872 	      && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1873 		  || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1874 	    switch (get_gimple_rhs_class (subcode))
1875 	      {
1876 	      case GIMPLE_SINGLE_RHS:
1877 	        val = get_value_for_expr (rhs1, true);
1878 		break;
1879 
1880 	      case GIMPLE_UNARY_RHS:
1881 		val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1882 		break;
1883 
1884 	      case GIMPLE_BINARY_RHS:
1885 		val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1886 				       gimple_assign_rhs2 (stmt));
1887 		break;
1888 
1889 	      default:;
1890 	      }
1891 	}
1892       else if (code == GIMPLE_COND)
1893 	{
1894 	  enum tree_code code = gimple_cond_code (stmt);
1895 	  tree rhs1 = gimple_cond_lhs (stmt);
1896 	  tree rhs2 = gimple_cond_rhs (stmt);
1897 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1898 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1899 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1900 	}
1901       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1902 	{
1903 	  tree fndecl = gimple_call_fndecl (stmt);
1904 	  switch (DECL_FUNCTION_CODE (fndecl))
1905 	    {
1906 	    case BUILT_IN_MALLOC:
1907 	    case BUILT_IN_REALLOC:
1908 	    case BUILT_IN_CALLOC:
1909 	    case BUILT_IN_STRDUP:
1910 	    case BUILT_IN_STRNDUP:
1911 	      val.lattice_val = CONSTANT;
1912 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1913 	      val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1914 			   / BITS_PER_UNIT - 1);
1915 	      break;
1916 
1917 	    CASE_BUILT_IN_ALLOCA:
1918 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
1919 		       ? BIGGEST_ALIGNMENT
1920 		       : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
1921 	      val.lattice_val = CONSTANT;
1922 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1923 	      val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1924 	      break;
1925 
1926 	    /* These builtins return their first argument, unmodified.  */
1927 	    case BUILT_IN_MEMCPY:
1928 	    case BUILT_IN_MEMMOVE:
1929 	    case BUILT_IN_MEMSET:
1930 	    case BUILT_IN_STRCPY:
1931 	    case BUILT_IN_STRNCPY:
1932 	    case BUILT_IN_MEMCPY_CHK:
1933 	    case BUILT_IN_MEMMOVE_CHK:
1934 	    case BUILT_IN_MEMSET_CHK:
1935 	    case BUILT_IN_STRCPY_CHK:
1936 	    case BUILT_IN_STRNCPY_CHK:
1937 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1938 	      break;
1939 
1940 	    case BUILT_IN_ASSUME_ALIGNED:
1941 	      val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1942 	      break;
1943 
1944 	    case BUILT_IN_ALIGNED_ALLOC:
1945 	      {
1946 		tree align = get_constant_value (gimple_call_arg (stmt, 0));
1947 		if (align
1948 		    && tree_fits_uhwi_p (align))
1949 		  {
1950 		    unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1951 		    if (aligni > 1
1952 			/* align must be power-of-two */
1953 			&& (aligni & (aligni - 1)) == 0)
1954 		      {
1955 			val.lattice_val = CONSTANT;
1956 			val.value = build_int_cst (ptr_type_node, 0);
1957 			val.mask = -aligni;
1958 		      }
1959 		  }
1960 		break;
1961 	      }
1962 
1963 	    default:;
1964 	    }
1965 	}
1966       if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1967 	{
1968 	  tree fntype = gimple_call_fntype (stmt);
1969 	  if (fntype)
1970 	    {
1971 	      tree attrs = lookup_attribute ("assume_aligned",
1972 					     TYPE_ATTRIBUTES (fntype));
1973 	      if (attrs)
1974 		val = bit_value_assume_aligned (stmt, attrs, val, false);
1975 	      attrs = lookup_attribute ("alloc_align",
1976 					TYPE_ATTRIBUTES (fntype));
1977 	      if (attrs)
1978 		val = bit_value_assume_aligned (stmt, attrs, val, true);
1979 	    }
1980 	}
1981       is_constant = (val.lattice_val == CONSTANT);
1982     }
1983 
1984   if (flag_tree_bit_ccp
1985       && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1986 	  || !is_constant)
1987       && gimple_get_lhs (stmt)
1988       && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1989     {
1990       tree lhs = gimple_get_lhs (stmt);
1991       wide_int nonzero_bits = get_nonzero_bits (lhs);
1992       if (nonzero_bits != -1)
1993 	{
1994 	  if (!is_constant)
1995 	    {
1996 	      val.lattice_val = CONSTANT;
1997 	      val.value = build_zero_cst (TREE_TYPE (lhs));
1998 	      val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
1999 	      is_constant = true;
2000 	    }
2001 	  else
2002 	    {
2003 	      if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
2004 		val.value = wide_int_to_tree (TREE_TYPE (lhs),
2005 					      nonzero_bits
2006 					      & wi::to_wide (val.value));
2007 	      if (nonzero_bits == 0)
2008 		val.mask = 0;
2009 	      else
2010 		val.mask = val.mask & extend_mask (nonzero_bits,
2011 						   TYPE_SIGN (TREE_TYPE (lhs)));
2012 	    }
2013 	}
2014     }
2015 
2016   /* The statement produced a nonconstant value.  */
2017   if (!is_constant)
2018     {
2019       /* The statement produced a copy.  */
2020       if (simplified && TREE_CODE (simplified) == SSA_NAME
2021 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2022 	{
2023 	  val.lattice_val = CONSTANT;
2024 	  val.value = simplified;
2025 	  val.mask = -1;
2026 	}
2027       /* The statement is VARYING.  */
2028       else
2029 	{
2030 	  val.lattice_val = VARYING;
2031 	  val.value = NULL_TREE;
2032 	  val.mask = -1;
2033 	}
2034     }
2035 
2036   return val;
2037 }
2038 
2039 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2040 
2041 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2042    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
2043 
2044 static void
insert_clobber_before_stack_restore(tree saved_val,tree var,gimple_htab ** visited)2045 insert_clobber_before_stack_restore (tree saved_val, tree var,
2046 				     gimple_htab **visited)
2047 {
2048   gimple *stmt;
2049   gassign *clobber_stmt;
2050   tree clobber;
2051   imm_use_iterator iter;
2052   gimple_stmt_iterator i;
2053   gimple **slot;
2054 
2055   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2056     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2057       {
2058 	clobber = build_constructor (TREE_TYPE (var),
2059 				     NULL);
2060 	TREE_THIS_VOLATILE (clobber) = 1;
2061 	clobber_stmt = gimple_build_assign (var, clobber);
2062 
2063 	i = gsi_for_stmt (stmt);
2064 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2065       }
2066     else if (gimple_code (stmt) == GIMPLE_PHI)
2067       {
2068 	if (!*visited)
2069 	  *visited = new gimple_htab (10);
2070 
2071 	slot = (*visited)->find_slot (stmt, INSERT);
2072 	if (*slot != NULL)
2073 	  continue;
2074 
2075 	*slot = stmt;
2076 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2077 					     visited);
2078       }
2079     else if (gimple_assign_ssa_name_copy_p (stmt))
2080       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2081 					   visited);
2082 }
2083 
2084 /* Advance the iterator to the previous non-debug gimple statement in the same
2085    or dominating basic block.  */
2086 
2087 static inline void
gsi_prev_dom_bb_nondebug(gimple_stmt_iterator * i)2088 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2089 {
2090   basic_block dom;
2091 
2092   gsi_prev_nondebug (i);
2093   while (gsi_end_p (*i))
2094     {
2095       dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2096       if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2097 	return;
2098 
2099       *i = gsi_last_bb (dom);
2100     }
2101 }
2102 
2103 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2104    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2105 
2106    It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2107    a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2108    In that case the function gives up without inserting the clobbers.  */
2109 
2110 static void
insert_clobbers_for_var(gimple_stmt_iterator i,tree var)2111 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2112 {
2113   gimple *stmt;
2114   tree saved_val;
2115   gimple_htab *visited = NULL;
2116 
2117   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2118     {
2119       stmt = gsi_stmt (i);
2120 
2121       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2122 	continue;
2123 
2124       saved_val = gimple_call_lhs (stmt);
2125       if (saved_val == NULL_TREE)
2126 	continue;
2127 
2128       insert_clobber_before_stack_restore (saved_val, var, &visited);
2129       break;
2130     }
2131 
2132   delete visited;
2133 }
2134 
2135 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
2136    fixed-size array and returns the address, if found, otherwise returns
2137    NULL_TREE.  */
2138 
2139 static tree
fold_builtin_alloca_with_align(gimple * stmt)2140 fold_builtin_alloca_with_align (gimple *stmt)
2141 {
2142   unsigned HOST_WIDE_INT size, threshold, n_elem;
2143   tree lhs, arg, block, var, elem_type, array_type;
2144 
2145   /* Get lhs.  */
2146   lhs = gimple_call_lhs (stmt);
2147   if (lhs == NULL_TREE)
2148     return NULL_TREE;
2149 
2150   /* Detect constant argument.  */
2151   arg = get_constant_value (gimple_call_arg (stmt, 0));
2152   if (arg == NULL_TREE
2153       || TREE_CODE (arg) != INTEGER_CST
2154       || !tree_fits_uhwi_p (arg))
2155     return NULL_TREE;
2156 
2157   size = tree_to_uhwi (arg);
2158 
2159   /* Heuristic: don't fold large allocas.  */
2160   threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2161   /* In case the alloca is located at function entry, it has the same lifetime
2162      as a declared array, so we allow a larger size.  */
2163   block = gimple_block (stmt);
2164   if (!(cfun->after_inlining
2165 	&& block
2166         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2167     threshold /= 10;
2168   if (size > threshold)
2169     return NULL_TREE;
2170 
2171   /* We have to be able to move points-to info.  We used to assert
2172      that we can but IPA PTA might end up with two UIDs here
2173      as it might need to handle more than one instance being
2174      live at the same time.  Instead of trying to detect this case
2175      (using the first UID would be OK) just give up for now.  */
2176   struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2177   unsigned uid = 0;
2178   if (pi != NULL
2179       && !pi->pt.anything
2180       && !pt_solution_singleton_or_null_p (&pi->pt, &uid))
2181     return NULL_TREE;
2182 
2183   /* Declare array.  */
2184   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2185   n_elem = size * 8 / BITS_PER_UNIT;
2186   array_type = build_array_type_nelts (elem_type, n_elem);
2187   var = create_tmp_var (array_type);
2188   SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2189   if (uid != 0)
2190     SET_DECL_PT_UID (var, uid);
2191 
2192   /* Fold alloca to the address of the array.  */
2193   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2194 }
2195 
2196 /* Fold the stmt at *GSI with CCP specific information that propagating
2197    and regular folding does not catch.  */
2198 
2199 bool
fold_stmt(gimple_stmt_iterator * gsi)2200 ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2201 {
2202   gimple *stmt = gsi_stmt (*gsi);
2203 
2204   switch (gimple_code (stmt))
2205     {
2206     case GIMPLE_COND:
2207       {
2208 	gcond *cond_stmt = as_a <gcond *> (stmt);
2209 	ccp_prop_value_t val;
2210 	/* Statement evaluation will handle type mismatches in constants
2211 	   more gracefully than the final propagation.  This allows us to
2212 	   fold more conditionals here.  */
2213 	val = evaluate_stmt (stmt);
2214 	if (val.lattice_val != CONSTANT
2215 	    || val.mask != 0)
2216 	  return false;
2217 
2218 	if (dump_file)
2219 	  {
2220 	    fprintf (dump_file, "Folding predicate ");
2221 	    print_gimple_expr (dump_file, stmt, 0);
2222 	    fprintf (dump_file, " to ");
2223 	    print_generic_expr (dump_file, val.value);
2224 	    fprintf (dump_file, "\n");
2225 	  }
2226 
2227 	if (integer_zerop (val.value))
2228 	  gimple_cond_make_false (cond_stmt);
2229 	else
2230 	  gimple_cond_make_true (cond_stmt);
2231 
2232 	return true;
2233       }
2234 
2235     case GIMPLE_CALL:
2236       {
2237 	tree lhs = gimple_call_lhs (stmt);
2238 	int flags = gimple_call_flags (stmt);
2239 	tree val;
2240 	tree argt;
2241 	bool changed = false;
2242 	unsigned i;
2243 
2244 	/* If the call was folded into a constant make sure it goes
2245 	   away even if we cannot propagate into all uses because of
2246 	   type issues.  */
2247 	if (lhs
2248 	    && TREE_CODE (lhs) == SSA_NAME
2249 	    && (val = get_constant_value (lhs))
2250 	    /* Don't optimize away calls that have side-effects.  */
2251 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
2252 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2253 	  {
2254 	    tree new_rhs = unshare_expr (val);
2255 	    bool res;
2256 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
2257 					    TREE_TYPE (new_rhs)))
2258 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2259 	    res = update_call_from_tree (gsi, new_rhs);
2260 	    gcc_assert (res);
2261 	    return true;
2262 	  }
2263 
2264 	/* Internal calls provide no argument types, so the extra laxity
2265 	   for normal calls does not apply.  */
2266 	if (gimple_call_internal_p (stmt))
2267 	  return false;
2268 
2269         /* The heuristic of fold_builtin_alloca_with_align differs before and
2270 	   after inlining, so we don't require the arg to be changed into a
2271 	   constant for folding, but just to be constant.  */
2272         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2273 	    || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2274           {
2275             tree new_rhs = fold_builtin_alloca_with_align (stmt);
2276             if (new_rhs)
2277 	      {
2278 		bool res = update_call_from_tree (gsi, new_rhs);
2279 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2280 		gcc_assert (res);
2281 		insert_clobbers_for_var (*gsi, var);
2282 		return true;
2283 	      }
2284           }
2285 
2286 	/* Propagate into the call arguments.  Compared to replace_uses_in
2287 	   this can use the argument slot types for type verification
2288 	   instead of the current argument type.  We also can safely
2289 	   drop qualifiers here as we are dealing with constants anyway.  */
2290 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2291 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
2292 	     ++i, argt = TREE_CHAIN (argt))
2293 	  {
2294 	    tree arg = gimple_call_arg (stmt, i);
2295 	    if (TREE_CODE (arg) == SSA_NAME
2296 		&& (val = get_constant_value (arg))
2297 		&& useless_type_conversion_p
2298 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2299 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2300 	      {
2301 		gimple_call_set_arg (stmt, i, unshare_expr (val));
2302 		changed = true;
2303 	      }
2304 	  }
2305 
2306 	return changed;
2307       }
2308 
2309     case GIMPLE_ASSIGN:
2310       {
2311 	tree lhs = gimple_assign_lhs (stmt);
2312 	tree val;
2313 
2314 	/* If we have a load that turned out to be constant replace it
2315 	   as we cannot propagate into all uses in all cases.  */
2316 	if (gimple_assign_single_p (stmt)
2317 	    && TREE_CODE (lhs) == SSA_NAME
2318 	    && (val = get_constant_value (lhs)))
2319 	  {
2320 	    tree rhs = unshare_expr (val);
2321 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2322 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2323 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
2324 	    return true;
2325 	  }
2326 
2327 	return false;
2328       }
2329 
2330     default:
2331       return false;
2332     }
2333 }
2334 
2335 /* Visit the assignment statement STMT.  Set the value of its LHS to the
2336    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
2337    creates virtual definitions, set the value of each new name to that
2338    of the RHS (if we can derive a constant out of the RHS).
2339    Value-returning call statements also perform an assignment, and
2340    are handled here.  */
2341 
2342 static enum ssa_prop_result
visit_assignment(gimple * stmt,tree * output_p)2343 visit_assignment (gimple *stmt, tree *output_p)
2344 {
2345   ccp_prop_value_t val;
2346   enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2347 
2348   tree lhs = gimple_get_lhs (stmt);
2349   if (TREE_CODE (lhs) == SSA_NAME)
2350     {
2351       /* Evaluate the statement, which could be
2352 	 either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
2353       val = evaluate_stmt (stmt);
2354 
2355       /* If STMT is an assignment to an SSA_NAME, we only have one
2356 	 value to set.  */
2357       if (set_lattice_value (lhs, &val))
2358 	{
2359 	  *output_p = lhs;
2360 	  if (val.lattice_val == VARYING)
2361 	    retval = SSA_PROP_VARYING;
2362 	  else
2363 	    retval = SSA_PROP_INTERESTING;
2364 	}
2365     }
2366 
2367   return retval;
2368 }
2369 
2370 
2371 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2372    if it can determine which edge will be taken.  Otherwise, return
2373    SSA_PROP_VARYING.  */
2374 
2375 static enum ssa_prop_result
visit_cond_stmt(gimple * stmt,edge * taken_edge_p)2376 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2377 {
2378   ccp_prop_value_t val;
2379   basic_block block;
2380 
2381   block = gimple_bb (stmt);
2382   val = evaluate_stmt (stmt);
2383   if (val.lattice_val != CONSTANT
2384       || val.mask != 0)
2385     return SSA_PROP_VARYING;
2386 
2387   /* Find which edge out of the conditional block will be taken and add it
2388      to the worklist.  If no single edge can be determined statically,
2389      return SSA_PROP_VARYING to feed all the outgoing edges to the
2390      propagation engine.  */
2391   *taken_edge_p = find_taken_edge (block, val.value);
2392   if (*taken_edge_p)
2393     return SSA_PROP_INTERESTING;
2394   else
2395     return SSA_PROP_VARYING;
2396 }
2397 
2398 
2399 /* Evaluate statement STMT.  If the statement produces an output value and
2400    its evaluation changes the lattice value of its output, return
2401    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2402    output value.
2403 
2404    If STMT is a conditional branch and we can determine its truth
2405    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2406    value, return SSA_PROP_VARYING.  */
2407 
2408 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)2409 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2410 {
2411   tree def;
2412   ssa_op_iter iter;
2413 
2414   if (dump_file && (dump_flags & TDF_DETAILS))
2415     {
2416       fprintf (dump_file, "\nVisiting statement:\n");
2417       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2418     }
2419 
2420   switch (gimple_code (stmt))
2421     {
2422       case GIMPLE_ASSIGN:
2423         /* If the statement is an assignment that produces a single
2424            output value, evaluate its RHS to see if the lattice value of
2425            its output has changed.  */
2426         return visit_assignment (stmt, output_p);
2427 
2428       case GIMPLE_CALL:
2429         /* A value-returning call also performs an assignment.  */
2430         if (gimple_call_lhs (stmt) != NULL_TREE)
2431           return visit_assignment (stmt, output_p);
2432         break;
2433 
2434       case GIMPLE_COND:
2435       case GIMPLE_SWITCH:
2436         /* If STMT is a conditional branch, see if we can determine
2437            which branch will be taken.   */
2438         /* FIXME.  It appears that we should be able to optimize
2439            computed GOTOs here as well.  */
2440         return visit_cond_stmt (stmt, taken_edge_p);
2441 
2442       default:
2443         break;
2444     }
2445 
2446   /* Any other kind of statement is not interesting for constant
2447      propagation and, therefore, not worth simulating.  */
2448   if (dump_file && (dump_flags & TDF_DETAILS))
2449     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2450 
2451   /* Definitions made by statements other than assignments to
2452      SSA_NAMEs represent unknown modifications to their outputs.
2453      Mark them VARYING.  */
2454   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2455     set_value_varying (def);
2456 
2457   return SSA_PROP_VARYING;
2458 }
2459 
2460 
2461 /* Main entry point for SSA Conditional Constant Propagation.  If NONZERO_P,
2462    record nonzero bits.  */
2463 
2464 static unsigned int
do_ssa_ccp(bool nonzero_p)2465 do_ssa_ccp (bool nonzero_p)
2466 {
2467   unsigned int todo = 0;
2468   calculate_dominance_info (CDI_DOMINATORS);
2469 
2470   ccp_initialize ();
2471   class ccp_propagate ccp_propagate;
2472   ccp_propagate.ssa_propagate ();
2473   if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2474     {
2475       todo = (TODO_cleanup_cfg | TODO_update_ssa);
2476 
2477       /* ccp_finalize does not preserve loop-closed ssa.  */
2478       loops_state_clear (LOOP_CLOSED_SSA);
2479     }
2480 
2481   free_dominance_info (CDI_DOMINATORS);
2482   return todo;
2483 }
2484 
2485 
2486 namespace {
2487 
2488 const pass_data pass_data_ccp =
2489 {
2490   GIMPLE_PASS, /* type */
2491   "ccp", /* name */
2492   OPTGROUP_NONE, /* optinfo_flags */
2493   TV_TREE_CCP, /* tv_id */
2494   ( PROP_cfg | PROP_ssa ), /* properties_required */
2495   0, /* properties_provided */
2496   0, /* properties_destroyed */
2497   0, /* todo_flags_start */
2498   TODO_update_address_taken, /* todo_flags_finish */
2499 };
2500 
2501 class pass_ccp : public gimple_opt_pass
2502 {
2503 public:
pass_ccp(gcc::context * ctxt)2504   pass_ccp (gcc::context *ctxt)
2505     : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2506   {}
2507 
2508   /* opt_pass methods: */
clone()2509   opt_pass * clone () { return new pass_ccp (m_ctxt); }
set_pass_param(unsigned int n,bool param)2510   void set_pass_param (unsigned int n, bool param)
2511     {
2512       gcc_assert (n == 0);
2513       nonzero_p = param;
2514     }
gate(function *)2515   virtual bool gate (function *) { return flag_tree_ccp != 0; }
execute(function *)2516   virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2517 
2518  private:
2519   /* Determines whether the pass instance records nonzero bits.  */
2520   bool nonzero_p;
2521 }; // class pass_ccp
2522 
2523 } // anon namespace
2524 
2525 gimple_opt_pass *
make_pass_ccp(gcc::context * ctxt)2526 make_pass_ccp (gcc::context *ctxt)
2527 {
2528   return new pass_ccp (ctxt);
2529 }
2530 
2531 
2532 
2533 /* Try to optimize out __builtin_stack_restore.  Optimize it out
2534    if there is another __builtin_stack_restore in the same basic
2535    block and no calls or ASM_EXPRs are in between, or if this block's
2536    only outgoing edge is to EXIT_BLOCK and there are no calls or
2537    ASM_EXPRs after this __builtin_stack_restore.  */
2538 
2539 static tree
optimize_stack_restore(gimple_stmt_iterator i)2540 optimize_stack_restore (gimple_stmt_iterator i)
2541 {
2542   tree callee;
2543   gimple *stmt;
2544 
2545   basic_block bb = gsi_bb (i);
2546   gimple *call = gsi_stmt (i);
2547 
2548   if (gimple_code (call) != GIMPLE_CALL
2549       || gimple_call_num_args (call) != 1
2550       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2551       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2552     return NULL_TREE;
2553 
2554   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2555     {
2556       stmt = gsi_stmt (i);
2557       if (gimple_code (stmt) == GIMPLE_ASM)
2558 	return NULL_TREE;
2559       if (gimple_code (stmt) != GIMPLE_CALL)
2560 	continue;
2561 
2562       callee = gimple_call_fndecl (stmt);
2563       if (!callee
2564 	  || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
2565 	  /* All regular builtins are ok, just obviously not alloca.  */
2566 	  || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
2567 	return NULL_TREE;
2568 
2569       if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2570 	goto second_stack_restore;
2571     }
2572 
2573   if (!gsi_end_p (i))
2574     return NULL_TREE;
2575 
2576   /* Allow one successor of the exit block, or zero successors.  */
2577   switch (EDGE_COUNT (bb->succs))
2578     {
2579     case 0:
2580       break;
2581     case 1:
2582       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2583 	return NULL_TREE;
2584       break;
2585     default:
2586       return NULL_TREE;
2587     }
2588  second_stack_restore:
2589 
2590   /* If there's exactly one use, then zap the call to __builtin_stack_save.
2591      If there are multiple uses, then the last one should remove the call.
2592      In any case, whether the call to __builtin_stack_save can be removed
2593      or not is irrelevant to removing the call to __builtin_stack_restore.  */
2594   if (has_single_use (gimple_call_arg (call, 0)))
2595     {
2596       gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2597       if (is_gimple_call (stack_save))
2598 	{
2599 	  callee = gimple_call_fndecl (stack_save);
2600 	  if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE))
2601 	    {
2602 	      gimple_stmt_iterator stack_save_gsi;
2603 	      tree rhs;
2604 
2605 	      stack_save_gsi = gsi_for_stmt (stack_save);
2606 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2607 	      update_call_from_tree (&stack_save_gsi, rhs);
2608 	    }
2609 	}
2610     }
2611 
2612   /* No effect, so the statement will be deleted.  */
2613   return integer_zero_node;
2614 }
2615 
2616 /* If va_list type is a simple pointer and nothing special is needed,
2617    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2618    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2619    pointer assignment.  */
2620 
2621 static tree
optimize_stdarg_builtin(gimple * call)2622 optimize_stdarg_builtin (gimple *call)
2623 {
2624   tree callee, lhs, rhs, cfun_va_list;
2625   bool va_list_simple_ptr;
2626   location_t loc = gimple_location (call);
2627 
2628   if (gimple_code (call) != GIMPLE_CALL)
2629     return NULL_TREE;
2630 
2631   callee = gimple_call_fndecl (call);
2632 
2633   cfun_va_list = targetm.fn_abi_va_list (callee);
2634   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2635 		       && (TREE_TYPE (cfun_va_list) == void_type_node
2636 			   || TREE_TYPE (cfun_va_list) == char_type_node);
2637 
2638   switch (DECL_FUNCTION_CODE (callee))
2639     {
2640     case BUILT_IN_VA_START:
2641       if (!va_list_simple_ptr
2642 	  || targetm.expand_builtin_va_start != NULL
2643 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2644 	return NULL_TREE;
2645 
2646       if (gimple_call_num_args (call) != 2)
2647 	return NULL_TREE;
2648 
2649       lhs = gimple_call_arg (call, 0);
2650       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2651 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2652 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2653 	return NULL_TREE;
2654 
2655       lhs = build_fold_indirect_ref_loc (loc, lhs);
2656       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2657                              1, integer_zero_node);
2658       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2659       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2660 
2661     case BUILT_IN_VA_COPY:
2662       if (!va_list_simple_ptr)
2663 	return NULL_TREE;
2664 
2665       if (gimple_call_num_args (call) != 2)
2666 	return NULL_TREE;
2667 
2668       lhs = gimple_call_arg (call, 0);
2669       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2670 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2671 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2672 	return NULL_TREE;
2673 
2674       lhs = build_fold_indirect_ref_loc (loc, lhs);
2675       rhs = gimple_call_arg (call, 1);
2676       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2677 	  != TYPE_MAIN_VARIANT (cfun_va_list))
2678 	return NULL_TREE;
2679 
2680       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2681       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2682 
2683     case BUILT_IN_VA_END:
2684       /* No effect, so the statement will be deleted.  */
2685       return integer_zero_node;
2686 
2687     default:
2688       gcc_unreachable ();
2689     }
2690 }
2691 
2692 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2693    the incoming jumps.  Return true if at least one jump was changed.  */
2694 
2695 static bool
optimize_unreachable(gimple_stmt_iterator i)2696 optimize_unreachable (gimple_stmt_iterator i)
2697 {
2698   basic_block bb = gsi_bb (i);
2699   gimple_stmt_iterator gsi;
2700   gimple *stmt;
2701   edge_iterator ei;
2702   edge e;
2703   bool ret;
2704 
2705   if (flag_sanitize & SANITIZE_UNREACHABLE)
2706     return false;
2707 
2708   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2709     {
2710       stmt = gsi_stmt (gsi);
2711 
2712       if (is_gimple_debug (stmt))
2713        continue;
2714 
2715       if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2716 	{
2717 	  /* Verify we do not need to preserve the label.  */
2718 	  if (FORCED_LABEL (gimple_label_label (label_stmt)))
2719 	    return false;
2720 
2721 	  continue;
2722 	}
2723 
2724       /* Only handle the case that __builtin_unreachable is the first statement
2725 	 in the block.  We rely on DCE to remove stmts without side-effects
2726 	 before __builtin_unreachable.  */
2727       if (gsi_stmt (gsi) != gsi_stmt (i))
2728         return false;
2729     }
2730 
2731   ret = false;
2732   FOR_EACH_EDGE (e, ei, bb->preds)
2733     {
2734       gsi = gsi_last_bb (e->src);
2735       if (gsi_end_p (gsi))
2736 	continue;
2737 
2738       stmt = gsi_stmt (gsi);
2739       if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2740 	{
2741 	  if (e->flags & EDGE_TRUE_VALUE)
2742 	    gimple_cond_make_false (cond_stmt);
2743 	  else if (e->flags & EDGE_FALSE_VALUE)
2744 	    gimple_cond_make_true (cond_stmt);
2745 	  else
2746 	    gcc_unreachable ();
2747 	  update_stmt (cond_stmt);
2748 	}
2749       else
2750 	{
2751 	  /* Todo: handle other cases.  Note that unreachable switch case
2752 	     statements have already been removed.  */
2753 	  continue;
2754 	}
2755 
2756       ret = true;
2757     }
2758 
2759   return ret;
2760 }
2761 
2762 /* Optimize
2763      mask_2 = 1 << cnt_1;
2764      _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2765      _5 = _4 & mask_2;
2766    to
2767      _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2768      _5 = _4;
2769    If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2770    is passed instead of 0, and the builtin just returns a zero
2771    or 1 value instead of the actual bit.
2772    Similarly for __sync_fetch_and_or_* (without the ", _3" part
2773    in there), and/or if mask_2 is a power of 2 constant.
2774    Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2775    in that case.  And similarly for and instead of or, except that
2776    the second argument to the builtin needs to be one's complement
2777    of the mask instead of mask.  */
2778 
2779 static void
optimize_atomic_bit_test_and(gimple_stmt_iterator * gsip,enum internal_fn fn,bool has_model_arg,bool after)2780 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2781 			      enum internal_fn fn, bool has_model_arg,
2782 			      bool after)
2783 {
2784   gimple *call = gsi_stmt (*gsip);
2785   tree lhs = gimple_call_lhs (call);
2786   use_operand_p use_p;
2787   gimple *use_stmt;
2788   tree mask, bit;
2789   optab optab;
2790 
2791   if (!flag_inline_atomics
2792       || optimize_debug
2793       || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2794       || !lhs
2795       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2796       || !single_imm_use (lhs, &use_p, &use_stmt)
2797       || !is_gimple_assign (use_stmt)
2798       || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2799       || !gimple_vdef (call))
2800     return;
2801 
2802   switch (fn)
2803     {
2804     case IFN_ATOMIC_BIT_TEST_AND_SET:
2805       optab = atomic_bit_test_and_set_optab;
2806       break;
2807     case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2808       optab = atomic_bit_test_and_complement_optab;
2809       break;
2810     case IFN_ATOMIC_BIT_TEST_AND_RESET:
2811       optab = atomic_bit_test_and_reset_optab;
2812       break;
2813     default:
2814       return;
2815     }
2816 
2817   if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2818     return;
2819 
2820   mask = gimple_call_arg (call, 1);
2821   tree use_lhs = gimple_assign_lhs (use_stmt);
2822   if (!use_lhs)
2823     return;
2824 
2825   if (TREE_CODE (mask) == INTEGER_CST)
2826     {
2827       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2828 	mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2829       mask = fold_convert (TREE_TYPE (lhs), mask);
2830       int ibit = tree_log2 (mask);
2831       if (ibit < 0)
2832 	return;
2833       bit = build_int_cst (TREE_TYPE (lhs), ibit);
2834     }
2835   else if (TREE_CODE (mask) == SSA_NAME)
2836     {
2837       gimple *g = SSA_NAME_DEF_STMT (mask);
2838       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2839 	{
2840 	  if (!is_gimple_assign (g)
2841 	      || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2842 	    return;
2843 	  mask = gimple_assign_rhs1 (g);
2844 	  if (TREE_CODE (mask) != SSA_NAME)
2845 	    return;
2846 	  g = SSA_NAME_DEF_STMT (mask);
2847 	}
2848       if (!is_gimple_assign (g)
2849 	  || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2850 	  || !integer_onep (gimple_assign_rhs1 (g)))
2851 	return;
2852       bit = gimple_assign_rhs2 (g);
2853     }
2854   else
2855     return;
2856 
2857   if (gimple_assign_rhs1 (use_stmt) == lhs)
2858     {
2859       if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2860 	return;
2861     }
2862   else if (gimple_assign_rhs2 (use_stmt) != lhs
2863 	   || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2864     return;
2865 
2866   bool use_bool = true;
2867   bool has_debug_uses = false;
2868   imm_use_iterator iter;
2869   gimple *g;
2870 
2871   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2872     use_bool = false;
2873   FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2874     {
2875       enum tree_code code = ERROR_MARK;
2876       tree op0 = NULL_TREE, op1 = NULL_TREE;
2877       if (is_gimple_debug (g))
2878 	{
2879 	  has_debug_uses = true;
2880 	  continue;
2881 	}
2882       else if (is_gimple_assign (g))
2883 	switch (gimple_assign_rhs_code (g))
2884 	  {
2885 	  case COND_EXPR:
2886 	    op1 = gimple_assign_rhs1 (g);
2887 	    code = TREE_CODE (op1);
2888 	    op0 = TREE_OPERAND (op1, 0);
2889 	    op1 = TREE_OPERAND (op1, 1);
2890 	    break;
2891 	  case EQ_EXPR:
2892 	  case NE_EXPR:
2893 	    code = gimple_assign_rhs_code (g);
2894 	    op0 = gimple_assign_rhs1 (g);
2895 	    op1 = gimple_assign_rhs2 (g);
2896 	    break;
2897 	  default:
2898 	    break;
2899 	  }
2900       else if (gimple_code (g) == GIMPLE_COND)
2901 	{
2902 	  code = gimple_cond_code (g);
2903 	  op0 = gimple_cond_lhs (g);
2904 	  op1 = gimple_cond_rhs (g);
2905 	}
2906 
2907       if ((code == EQ_EXPR || code == NE_EXPR)
2908 	  && op0 == use_lhs
2909 	  && integer_zerop (op1))
2910 	{
2911 	  use_operand_p use_p;
2912 	  int n = 0;
2913 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2914 	    n++;
2915 	  if (n == 1)
2916 	    continue;
2917 	}
2918 
2919       use_bool = false;
2920       BREAK_FROM_IMM_USE_STMT (iter);
2921     }
2922 
2923   tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2924   tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2925   if (has_model_arg)
2926     g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2927 				    bit, flag, gimple_call_arg (call, 2));
2928   else
2929     g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2930 				    bit, flag);
2931   gimple_call_set_lhs (g, new_lhs);
2932   gimple_set_location (g, gimple_location (call));
2933   gimple_set_vuse (g, gimple_vuse (call));
2934   gimple_set_vdef (g, gimple_vdef (call));
2935   bool throws = stmt_can_throw_internal (cfun, call);
2936   gimple_call_set_nothrow (as_a <gcall *> (g),
2937 			   gimple_call_nothrow_p (as_a <gcall *> (call)));
2938   SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
2939   gimple_stmt_iterator gsi = *gsip;
2940   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2941   edge e = NULL;
2942   if (throws)
2943     {
2944       maybe_clean_or_replace_eh_stmt (call, g);
2945       if (after || (use_bool && has_debug_uses))
2946 	e = find_fallthru_edge (gsi_bb (gsi)->succs);
2947     }
2948   if (after)
2949     {
2950       /* The internal function returns the value of the specified bit
2951 	 before the atomic operation.  If we are interested in the value
2952 	 of the specified bit after the atomic operation (makes only sense
2953 	 for xor, otherwise the bit content is compile time known),
2954 	 we need to invert the bit.  */
2955       g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
2956 			       BIT_XOR_EXPR, new_lhs,
2957 			       use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
2958 					: mask);
2959       new_lhs = gimple_assign_lhs (g);
2960       if (throws)
2961 	{
2962 	  gsi_insert_on_edge_immediate (e, g);
2963 	  gsi = gsi_for_stmt (g);
2964 	}
2965       else
2966 	gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2967     }
2968   if (use_bool && has_debug_uses)
2969     {
2970       tree temp = NULL_TREE;
2971       if (!throws || after || single_pred_p (e->dest))
2972 	{
2973 	  temp = make_node (DEBUG_EXPR_DECL);
2974 	  DECL_ARTIFICIAL (temp) = 1;
2975 	  TREE_TYPE (temp) = TREE_TYPE (lhs);
2976 	  SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
2977 	  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
2978 	  g = gimple_build_debug_bind (temp, t, g);
2979 	  if (throws && !after)
2980 	    {
2981 	      gsi = gsi_after_labels (e->dest);
2982 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2983 	    }
2984 	  else
2985 	    gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2986 	}
2987       FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2988 	if (is_gimple_debug (g))
2989 	  {
2990 	    use_operand_p use_p;
2991 	    if (temp == NULL_TREE)
2992 	      gimple_debug_bind_reset_value (g);
2993 	    else
2994 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2995 		SET_USE (use_p, temp);
2996 	    update_stmt (g);
2997 	  }
2998     }
2999   SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3000     = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3001   replace_uses_by (use_lhs, new_lhs);
3002   gsi = gsi_for_stmt (use_stmt);
3003   gsi_remove (&gsi, true);
3004   release_defs (use_stmt);
3005   gsi_remove (gsip, true);
3006   release_ssa_name (lhs);
3007 }
3008 
3009 /* Optimize
3010    a = {};
3011    b = a;
3012    into
3013    a = {};
3014    b = {};
3015    Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3016    and/or memcpy (&b, &a, sizeof (a)); instead of b = a;  */
3017 
3018 static void
optimize_memcpy(gimple_stmt_iterator * gsip,tree dest,tree src,tree len)3019 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3020 {
3021   gimple *stmt = gsi_stmt (*gsip);
3022   if (gimple_has_volatile_ops (stmt))
3023     return;
3024 
3025   tree vuse = gimple_vuse (stmt);
3026   if (vuse == NULL)
3027     return;
3028 
3029   gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3030   tree src2 = NULL_TREE, len2 = NULL_TREE;
3031   poly_int64 offset, offset2;
3032   tree val = integer_zero_node;
3033   if (gimple_store_p (defstmt)
3034       && gimple_assign_single_p (defstmt)
3035       && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3036       && !gimple_clobber_p (defstmt))
3037     src2 = gimple_assign_lhs (defstmt);
3038   else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3039 	   && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3040 	   && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3041     {
3042       src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3043       len2 = gimple_call_arg (defstmt, 2);
3044       val = gimple_call_arg (defstmt, 1);
3045       /* For non-0 val, we'd have to transform stmt from assignment
3046 	 into memset (only if dest is addressable).  */
3047       if (!integer_zerop (val) && is_gimple_assign (stmt))
3048 	src2 = NULL_TREE;
3049     }
3050 
3051   if (src2 == NULL_TREE)
3052     return;
3053 
3054   if (len == NULL_TREE)
3055     len = (TREE_CODE (src) == COMPONENT_REF
3056 	   ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3057 	   : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3058   if (len2 == NULL_TREE)
3059     len2 = (TREE_CODE (src2) == COMPONENT_REF
3060 	    ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3061 	    : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3062   if (len == NULL_TREE
3063       || !poly_int_tree_p (len)
3064       || len2 == NULL_TREE
3065       || !poly_int_tree_p (len2))
3066     return;
3067 
3068   src = get_addr_base_and_unit_offset (src, &offset);
3069   src2 = get_addr_base_and_unit_offset (src2, &offset2);
3070   if (src == NULL_TREE
3071       || src2 == NULL_TREE
3072       || maybe_lt (offset, offset2))
3073     return;
3074 
3075   if (!operand_equal_p (src, src2, 0))
3076     return;
3077 
3078   /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3079      Make sure that
3080      [ src + offset, src + offset + len - 1 ] is a subset of that.  */
3081   if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2),
3082 		wi::to_poly_offset (len2)))
3083     return;
3084 
3085   if (dump_file && (dump_flags & TDF_DETAILS))
3086     {
3087       fprintf (dump_file, "Simplified\n  ");
3088       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3089       fprintf (dump_file, "after previous\n  ");
3090       print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3091     }
3092 
3093   /* For simplicity, don't change the kind of the stmt,
3094      turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3095      into memset (&dest, val, len);
3096      In theory we could change dest = src into memset if dest
3097      is addressable (maybe beneficial if val is not 0), or
3098      memcpy (&dest, &src, len) into dest = {} if len is the size
3099      of dest, dest isn't volatile.  */
3100   if (is_gimple_assign (stmt))
3101     {
3102       tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3103       gimple_assign_set_rhs_from_tree (gsip, ctor);
3104       update_stmt (stmt);
3105     }
3106   else /* If stmt is memcpy, transform it into memset.  */
3107     {
3108       gcall *call = as_a <gcall *> (stmt);
3109       tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3110       gimple_call_set_fndecl (call, fndecl);
3111       gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3112       gimple_call_set_arg (call, 1, val);
3113       update_stmt (stmt);
3114     }
3115 
3116   if (dump_file && (dump_flags & TDF_DETAILS))
3117     {
3118       fprintf (dump_file, "into\n  ");
3119       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3120     }
3121 }
3122 
3123 /* A simple pass that attempts to fold all builtin functions.  This pass
3124    is run after we've propagated as many constants as we can.  */
3125 
3126 namespace {
3127 
3128 const pass_data pass_data_fold_builtins =
3129 {
3130   GIMPLE_PASS, /* type */
3131   "fab", /* name */
3132   OPTGROUP_NONE, /* optinfo_flags */
3133   TV_NONE, /* tv_id */
3134   ( PROP_cfg | PROP_ssa ), /* properties_required */
3135   0, /* properties_provided */
3136   0, /* properties_destroyed */
3137   0, /* todo_flags_start */
3138   TODO_update_ssa, /* todo_flags_finish */
3139 };
3140 
3141 class pass_fold_builtins : public gimple_opt_pass
3142 {
3143 public:
pass_fold_builtins(gcc::context * ctxt)3144   pass_fold_builtins (gcc::context *ctxt)
3145     : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3146   {}
3147 
3148   /* opt_pass methods: */
clone()3149   opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3150   virtual unsigned int execute (function *);
3151 
3152 }; // class pass_fold_builtins
3153 
3154 unsigned int
execute(function * fun)3155 pass_fold_builtins::execute (function *fun)
3156 {
3157   bool cfg_changed = false;
3158   basic_block bb;
3159   unsigned int todoflags = 0;
3160 
3161   FOR_EACH_BB_FN (bb, fun)
3162     {
3163       gimple_stmt_iterator i;
3164       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3165 	{
3166 	  gimple *stmt, *old_stmt;
3167 	  tree callee;
3168 	  enum built_in_function fcode;
3169 
3170 	  stmt = gsi_stmt (i);
3171 
3172           if (gimple_code (stmt) != GIMPLE_CALL)
3173 	    {
3174 	      /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3175 		 after the last GIMPLE DSE they aren't needed and might
3176 		 unnecessarily keep the SSA_NAMEs live.  */
3177 	      if (gimple_clobber_p (stmt))
3178 		{
3179 		  tree lhs = gimple_assign_lhs (stmt);
3180 		  if (TREE_CODE (lhs) == MEM_REF
3181 		      && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3182 		    {
3183 		      unlink_stmt_vdef (stmt);
3184 		      gsi_remove (&i, true);
3185 		      release_defs (stmt);
3186 		      continue;
3187 		    }
3188 		}
3189 	      else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3190 		optimize_memcpy (&i, gimple_assign_lhs (stmt),
3191 				 gimple_assign_rhs1 (stmt), NULL_TREE);
3192 	      gsi_next (&i);
3193 	      continue;
3194 	    }
3195 
3196 	  callee = gimple_call_fndecl (stmt);
3197 	  if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
3198 	    {
3199 	      gsi_next (&i);
3200 	      continue;
3201 	    }
3202 
3203 	  fcode = DECL_FUNCTION_CODE (callee);
3204 	  if (fold_stmt (&i))
3205 	    ;
3206 	  else
3207 	    {
3208 	      tree result = NULL_TREE;
3209 	      switch (DECL_FUNCTION_CODE (callee))
3210 		{
3211 		case BUILT_IN_CONSTANT_P:
3212 		  /* Resolve __builtin_constant_p.  If it hasn't been
3213 		     folded to integer_one_node by now, it's fairly
3214 		     certain that the value simply isn't constant.  */
3215 		  result = integer_zero_node;
3216 		  break;
3217 
3218 		case BUILT_IN_ASSUME_ALIGNED:
3219 		  /* Remove __builtin_assume_aligned.  */
3220 		  result = gimple_call_arg (stmt, 0);
3221 		  break;
3222 
3223 		case BUILT_IN_STACK_RESTORE:
3224 		  result = optimize_stack_restore (i);
3225 		  if (result)
3226 		    break;
3227 		  gsi_next (&i);
3228 		  continue;
3229 
3230 		case BUILT_IN_UNREACHABLE:
3231 		  if (optimize_unreachable (i))
3232 		    cfg_changed = true;
3233 		  break;
3234 
3235 		case BUILT_IN_ATOMIC_FETCH_OR_1:
3236 		case BUILT_IN_ATOMIC_FETCH_OR_2:
3237 		case BUILT_IN_ATOMIC_FETCH_OR_4:
3238 		case BUILT_IN_ATOMIC_FETCH_OR_8:
3239 		case BUILT_IN_ATOMIC_FETCH_OR_16:
3240 		  optimize_atomic_bit_test_and (&i,
3241 						IFN_ATOMIC_BIT_TEST_AND_SET,
3242 						true, false);
3243 		  break;
3244 		case BUILT_IN_SYNC_FETCH_AND_OR_1:
3245 		case BUILT_IN_SYNC_FETCH_AND_OR_2:
3246 		case BUILT_IN_SYNC_FETCH_AND_OR_4:
3247 		case BUILT_IN_SYNC_FETCH_AND_OR_8:
3248 		case BUILT_IN_SYNC_FETCH_AND_OR_16:
3249 		  optimize_atomic_bit_test_and (&i,
3250 						IFN_ATOMIC_BIT_TEST_AND_SET,
3251 						false, false);
3252 		  break;
3253 
3254 		case BUILT_IN_ATOMIC_FETCH_XOR_1:
3255 		case BUILT_IN_ATOMIC_FETCH_XOR_2:
3256 		case BUILT_IN_ATOMIC_FETCH_XOR_4:
3257 		case BUILT_IN_ATOMIC_FETCH_XOR_8:
3258 		case BUILT_IN_ATOMIC_FETCH_XOR_16:
3259 		  optimize_atomic_bit_test_and
3260 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3261 		  break;
3262 		case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3263 		case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3264 		case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3265 		case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3266 		case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3267 		  optimize_atomic_bit_test_and
3268 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3269 		  break;
3270 
3271 		case BUILT_IN_ATOMIC_XOR_FETCH_1:
3272 		case BUILT_IN_ATOMIC_XOR_FETCH_2:
3273 		case BUILT_IN_ATOMIC_XOR_FETCH_4:
3274 		case BUILT_IN_ATOMIC_XOR_FETCH_8:
3275 		case BUILT_IN_ATOMIC_XOR_FETCH_16:
3276 		  optimize_atomic_bit_test_and
3277 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3278 		  break;
3279 		case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3280 		case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3281 		case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3282 		case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3283 		case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3284 		  optimize_atomic_bit_test_and
3285 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3286 		  break;
3287 
3288 		case BUILT_IN_ATOMIC_FETCH_AND_1:
3289 		case BUILT_IN_ATOMIC_FETCH_AND_2:
3290 		case BUILT_IN_ATOMIC_FETCH_AND_4:
3291 		case BUILT_IN_ATOMIC_FETCH_AND_8:
3292 		case BUILT_IN_ATOMIC_FETCH_AND_16:
3293 		  optimize_atomic_bit_test_and (&i,
3294 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3295 						true, false);
3296 		  break;
3297 		case BUILT_IN_SYNC_FETCH_AND_AND_1:
3298 		case BUILT_IN_SYNC_FETCH_AND_AND_2:
3299 		case BUILT_IN_SYNC_FETCH_AND_AND_4:
3300 		case BUILT_IN_SYNC_FETCH_AND_AND_8:
3301 		case BUILT_IN_SYNC_FETCH_AND_AND_16:
3302 		  optimize_atomic_bit_test_and (&i,
3303 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3304 						false, false);
3305 		  break;
3306 
3307 		case BUILT_IN_MEMCPY:
3308 		  if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3309 		      && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3310 		      && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3311 		      && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3312 		    {
3313 		      tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3314 		      tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3315 		      tree len = gimple_call_arg (stmt, 2);
3316 		      optimize_memcpy (&i, dest, src, len);
3317 		    }
3318 		  break;
3319 
3320 		case BUILT_IN_VA_START:
3321 		case BUILT_IN_VA_END:
3322 		case BUILT_IN_VA_COPY:
3323 		  /* These shouldn't be folded before pass_stdarg.  */
3324 		  result = optimize_stdarg_builtin (stmt);
3325 		  break;
3326 
3327 		default:;
3328 		}
3329 
3330 	      if (!result)
3331 		{
3332 		  gsi_next (&i);
3333 		  continue;
3334 		}
3335 
3336 	      if (!update_call_from_tree (&i, result))
3337 		gimplify_and_update_call_from_tree (&i, result);
3338 	    }
3339 
3340 	  todoflags |= TODO_update_address_taken;
3341 
3342 	  if (dump_file && (dump_flags & TDF_DETAILS))
3343 	    {
3344 	      fprintf (dump_file, "Simplified\n  ");
3345 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3346 	    }
3347 
3348           old_stmt = stmt;
3349 	  stmt = gsi_stmt (i);
3350 	  update_stmt (stmt);
3351 
3352 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3353 	      && gimple_purge_dead_eh_edges (bb))
3354 	    cfg_changed = true;
3355 
3356 	  if (dump_file && (dump_flags & TDF_DETAILS))
3357 	    {
3358 	      fprintf (dump_file, "to\n  ");
3359 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3360 	      fprintf (dump_file, "\n");
3361 	    }
3362 
3363 	  /* Retry the same statement if it changed into another
3364 	     builtin, there might be new opportunities now.  */
3365           if (gimple_code (stmt) != GIMPLE_CALL)
3366 	    {
3367 	      gsi_next (&i);
3368 	      continue;
3369 	    }
3370 	  callee = gimple_call_fndecl (stmt);
3371 	  if (!callee
3372 	      || !fndecl_built_in_p (callee, fcode))
3373 	    gsi_next (&i);
3374 	}
3375     }
3376 
3377   /* Delete unreachable blocks.  */
3378   if (cfg_changed)
3379     todoflags |= TODO_cleanup_cfg;
3380 
3381   return todoflags;
3382 }
3383 
3384 } // anon namespace
3385 
3386 gimple_opt_pass *
make_pass_fold_builtins(gcc::context * ctxt)3387 make_pass_fold_builtins (gcc::context *ctxt)
3388 {
3389   return new pass_fold_builtins (ctxt);
3390 }
3391 
3392 /* A simple pass that emits some warnings post IPA.  */
3393 
3394 namespace {
3395 
3396 const pass_data pass_data_post_ipa_warn =
3397 {
3398   GIMPLE_PASS, /* type */
3399   "post_ipa_warn", /* name */
3400   OPTGROUP_NONE, /* optinfo_flags */
3401   TV_NONE, /* tv_id */
3402   ( PROP_cfg | PROP_ssa ), /* properties_required */
3403   0, /* properties_provided */
3404   0, /* properties_destroyed */
3405   0, /* todo_flags_start */
3406   0, /* todo_flags_finish */
3407 };
3408 
3409 class pass_post_ipa_warn : public gimple_opt_pass
3410 {
3411 public:
pass_post_ipa_warn(gcc::context * ctxt)3412   pass_post_ipa_warn (gcc::context *ctxt)
3413     : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3414   {}
3415 
3416   /* opt_pass methods: */
clone()3417   opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
gate(function *)3418   virtual bool gate (function *) { return warn_nonnull != 0; }
3419   virtual unsigned int execute (function *);
3420 
3421 }; // class pass_fold_builtins
3422 
3423 unsigned int
execute(function * fun)3424 pass_post_ipa_warn::execute (function *fun)
3425 {
3426   basic_block bb;
3427 
3428   FOR_EACH_BB_FN (bb, fun)
3429     {
3430       gimple_stmt_iterator gsi;
3431       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3432 	{
3433 	  gimple *stmt = gsi_stmt (gsi);
3434 	  if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3435 	    continue;
3436 
3437 	  if (warn_nonnull)
3438 	    {
3439 	      bitmap nonnullargs
3440 		= get_nonnull_args (gimple_call_fntype (stmt));
3441 	      if (nonnullargs)
3442 		{
3443 		  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3444 		    {
3445 		      tree arg = gimple_call_arg (stmt, i);
3446 		      if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3447 			continue;
3448 		      if (!integer_zerop (arg))
3449 			continue;
3450 		      if (!bitmap_empty_p (nonnullargs)
3451 			  && !bitmap_bit_p (nonnullargs, i))
3452 			continue;
3453 
3454 		      location_t loc = gimple_location (stmt);
3455 		      auto_diagnostic_group d;
3456 		      if (warning_at (loc, OPT_Wnonnull,
3457 				      "%Gargument %u null where non-null "
3458 				      "expected", stmt, i + 1))
3459 			{
3460 			  tree fndecl = gimple_call_fndecl (stmt);
3461 			  if (fndecl && DECL_IS_BUILTIN (fndecl))
3462 			    inform (loc, "in a call to built-in function %qD",
3463 				    fndecl);
3464 			  else if (fndecl)
3465 			    inform (DECL_SOURCE_LOCATION (fndecl),
3466 				    "in a call to function %qD declared here",
3467 				    fndecl);
3468 
3469 			}
3470 		    }
3471 		  BITMAP_FREE (nonnullargs);
3472 		}
3473 	    }
3474 	}
3475     }
3476   return 0;
3477 }
3478 
3479 } // anon namespace
3480 
3481 gimple_opt_pass *
make_pass_post_ipa_warn(gcc::context * ctxt)3482 make_pass_post_ipa_warn (gcc::context *ctxt)
3483 {
3484   return new pass_post_ipa_warn (ctxt);
3485 }
3486