1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2021 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    This algorithm uses wide-ints at the max precision of the target.
102    This means that, with one uninteresting exception, variables with
103    UNSIGNED types never go to VARYING because the bits above the
104    precision of the type of the variable are always zero.  The
105    uninteresting case is a variable of UNSIGNED type that has the
106    maximum precision of the target.  Such variables can go to VARYING,
107    but this causes no loss of infomation since these variables will
108    never be extended.
109 
110    References:
111 
112      Constant propagation with conditional branches,
113      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114 
115      Building an Optimizing Compiler,
116      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117 
118      Advanced Compiler Design and Implementation,
119      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
120 
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "builtins.h"
140 #include "cfgloop.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
147 #include "attribs.h"
148 #include "tree-vector-builder.h"
149 #include "cgraph.h"
150 #include "alloc-pool.h"
151 #include "symbol-summary.h"
152 #include "ipa-utils.h"
153 #include "ipa-prop.h"
154 
155 /* Possible lattice values.  */
156 typedef enum
157 {
158   UNINITIALIZED,
159   UNDEFINED,
160   CONSTANT,
161   VARYING
162 } ccp_lattice_t;
163 
164 class ccp_prop_value_t {
165 public:
166     /* Lattice value.  */
167     ccp_lattice_t lattice_val;
168 
169     /* Propagated value.  */
170     tree value;
171 
172     /* Mask that applies to the propagated value during CCP.  For X
173        with a CONSTANT lattice value X & ~mask == value & ~mask.  The
174        zero bits in the mask cover constant values.  The ones mean no
175        information.  */
176     widest_int mask;
177 };
178 
179 class ccp_propagate : public ssa_propagation_engine
180 {
181  public:
182   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
183   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
184 };
185 
186 /* Array of propagated constant values.  After propagation,
187    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
188    the constant is held in an SSA name representing a memory store
189    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
190    memory reference used to store (i.e., the LHS of the assignment
191    doing the store).  */
192 static ccp_prop_value_t *const_val;
193 static unsigned n_const_val;
194 
195 static void canonicalize_value (ccp_prop_value_t *);
196 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
197 
198 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
199 
200 static void
dump_lattice_value(FILE * outf,const char * prefix,ccp_prop_value_t val)201 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
202 {
203   switch (val.lattice_val)
204     {
205     case UNINITIALIZED:
206       fprintf (outf, "%sUNINITIALIZED", prefix);
207       break;
208     case UNDEFINED:
209       fprintf (outf, "%sUNDEFINED", prefix);
210       break;
211     case VARYING:
212       fprintf (outf, "%sVARYING", prefix);
213       break;
214     case CONSTANT:
215       if (TREE_CODE (val.value) != INTEGER_CST
216 	  || val.mask == 0)
217 	{
218 	  fprintf (outf, "%sCONSTANT ", prefix);
219 	  print_generic_expr (outf, val.value, dump_flags);
220 	}
221       else
222 	{
223 	  widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
224 					     val.mask);
225 	  fprintf (outf, "%sCONSTANT ", prefix);
226 	  print_hex (cval, outf);
227 	  fprintf (outf, " (");
228 	  print_hex (val.mask, outf);
229 	  fprintf (outf, ")");
230 	}
231       break;
232     default:
233       gcc_unreachable ();
234     }
235 }
236 
237 
238 /* Print lattice value VAL to stderr.  */
239 
240 void debug_lattice_value (ccp_prop_value_t val);
241 
242 DEBUG_FUNCTION void
debug_lattice_value(ccp_prop_value_t val)243 debug_lattice_value (ccp_prop_value_t val)
244 {
245   dump_lattice_value (stderr, "", val);
246   fprintf (stderr, "\n");
247 }
248 
249 /* Extend NONZERO_BITS to a full mask, based on sgn.  */
250 
251 static widest_int
extend_mask(const wide_int & nonzero_bits,signop sgn)252 extend_mask (const wide_int &nonzero_bits, signop sgn)
253 {
254   return widest_int::from (nonzero_bits, sgn);
255 }
256 
257 /* Compute a default value for variable VAR and store it in the
258    CONST_VAL array.  The following rules are used to get default
259    values:
260 
261    1- Global and static variables that are declared constant are
262       considered CONSTANT.
263 
264    2- Any other value is considered UNDEFINED.  This is useful when
265       considering PHI nodes.  PHI arguments that are undefined do not
266       change the constant value of the PHI node, which allows for more
267       constants to be propagated.
268 
269    3- Variables defined by statements other than assignments and PHI
270       nodes are considered VARYING.
271 
272    4- Initial values of variables that are not GIMPLE registers are
273       considered VARYING.  */
274 
275 static ccp_prop_value_t
get_default_value(tree var)276 get_default_value (tree var)
277 {
278   ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
279   gimple *stmt;
280 
281   stmt = SSA_NAME_DEF_STMT (var);
282 
283   if (gimple_nop_p (stmt))
284     {
285       /* Variables defined by an empty statement are those used
286 	 before being initialized.  If VAR is a local variable, we
287 	 can assume initially that it is UNDEFINED, otherwise we must
288 	 consider it VARYING.  */
289       if (!virtual_operand_p (var)
290 	  && SSA_NAME_VAR (var)
291 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
292 	val.lattice_val = UNDEFINED;
293       else
294 	{
295 	  val.lattice_val = VARYING;
296 	  val.mask = -1;
297 	  if (flag_tree_bit_ccp)
298 	    {
299 	      wide_int nonzero_bits = get_nonzero_bits (var);
300 	      tree value;
301 	      widest_int mask;
302 
303 	      if (SSA_NAME_VAR (var)
304 		  && TREE_CODE (SSA_NAME_VAR (var)) == PARM_DECL
305 		  && ipcp_get_parm_bits (SSA_NAME_VAR (var), &value, &mask))
306 		{
307 		  val.lattice_val = CONSTANT;
308 		  val.value = value;
309 		  widest_int ipa_value = wi::to_widest (value);
310 		  /* Unknown bits from IPA CP must be equal to zero.  */
311 		  gcc_assert (wi::bit_and (ipa_value, mask) == 0);
312 		  val.mask = mask;
313 		  if (nonzero_bits != -1)
314 		    val.mask &= extend_mask (nonzero_bits,
315 					     TYPE_SIGN (TREE_TYPE (var)));
316 		}
317 	      else if (nonzero_bits != -1)
318 		{
319 		  val.lattice_val = CONSTANT;
320 		  val.value = build_zero_cst (TREE_TYPE (var));
321 		  val.mask = extend_mask (nonzero_bits,
322 					  TYPE_SIGN (TREE_TYPE (var)));
323 		}
324 	    }
325 	}
326     }
327   else if (is_gimple_assign (stmt))
328     {
329       tree cst;
330       if (gimple_assign_single_p (stmt)
331 	  && DECL_P (gimple_assign_rhs1 (stmt))
332 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
333 	{
334 	  val.lattice_val = CONSTANT;
335 	  val.value = cst;
336 	}
337       else
338 	{
339 	  /* Any other variable defined by an assignment is considered
340 	     UNDEFINED.  */
341 	  val.lattice_val = UNDEFINED;
342 	}
343     }
344   else if ((is_gimple_call (stmt)
345 	    && gimple_call_lhs (stmt) != NULL_TREE)
346 	   || gimple_code (stmt) == GIMPLE_PHI)
347     {
348       /* A variable defined by a call or a PHI node is considered
349 	 UNDEFINED.  */
350       val.lattice_val = UNDEFINED;
351     }
352   else
353     {
354       /* Otherwise, VAR will never take on a constant value.  */
355       val.lattice_val = VARYING;
356       val.mask = -1;
357     }
358 
359   return val;
360 }
361 
362 
363 /* Get the constant value associated with variable VAR.  */
364 
365 static inline ccp_prop_value_t *
get_value(tree var)366 get_value (tree var)
367 {
368   ccp_prop_value_t *val;
369 
370   if (const_val == NULL
371       || SSA_NAME_VERSION (var) >= n_const_val)
372     return NULL;
373 
374   val = &const_val[SSA_NAME_VERSION (var)];
375   if (val->lattice_val == UNINITIALIZED)
376     *val = get_default_value (var);
377 
378   canonicalize_value (val);
379 
380   return val;
381 }
382 
383 /* Return the constant tree value associated with VAR.  */
384 
385 static inline tree
get_constant_value(tree var)386 get_constant_value (tree var)
387 {
388   ccp_prop_value_t *val;
389   if (TREE_CODE (var) != SSA_NAME)
390     {
391       if (is_gimple_min_invariant (var))
392         return var;
393       return NULL_TREE;
394     }
395   val = get_value (var);
396   if (val
397       && val->lattice_val == CONSTANT
398       && (TREE_CODE (val->value) != INTEGER_CST
399 	  || val->mask == 0))
400     return val->value;
401   return NULL_TREE;
402 }
403 
404 /* Sets the value associated with VAR to VARYING.  */
405 
406 static inline void
set_value_varying(tree var)407 set_value_varying (tree var)
408 {
409   ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
410 
411   val->lattice_val = VARYING;
412   val->value = NULL_TREE;
413   val->mask = -1;
414 }
415 
416 /* For integer constants, make sure to drop TREE_OVERFLOW.  */
417 
418 static void
canonicalize_value(ccp_prop_value_t * val)419 canonicalize_value (ccp_prop_value_t *val)
420 {
421   if (val->lattice_val != CONSTANT)
422     return;
423 
424   if (TREE_OVERFLOW_P (val->value))
425     val->value = drop_tree_overflow (val->value);
426 }
427 
428 /* Return whether the lattice transition is valid.  */
429 
430 static bool
valid_lattice_transition(ccp_prop_value_t old_val,ccp_prop_value_t new_val)431 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
432 {
433   /* Lattice transitions must always be monotonically increasing in
434      value.  */
435   if (old_val.lattice_val < new_val.lattice_val)
436     return true;
437 
438   if (old_val.lattice_val != new_val.lattice_val)
439     return false;
440 
441   if (!old_val.value && !new_val.value)
442     return true;
443 
444   /* Now both lattice values are CONSTANT.  */
445 
446   /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
447      when only a single copy edge is executable.  */
448   if (TREE_CODE (old_val.value) == SSA_NAME
449       && TREE_CODE (new_val.value) == SSA_NAME)
450     return true;
451 
452   /* Allow transitioning from a constant to a copy.  */
453   if (is_gimple_min_invariant (old_val.value)
454       && TREE_CODE (new_val.value) == SSA_NAME)
455     return true;
456 
457   /* Allow transitioning from PHI <&x, not executable> == &x
458      to PHI <&x, &y> == common alignment.  */
459   if (TREE_CODE (old_val.value) != INTEGER_CST
460       && TREE_CODE (new_val.value) == INTEGER_CST)
461     return true;
462 
463   /* Bit-lattices have to agree in the still valid bits.  */
464   if (TREE_CODE (old_val.value) == INTEGER_CST
465       && TREE_CODE (new_val.value) == INTEGER_CST)
466     return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
467 	    == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
468 
469   /* Otherwise constant values have to agree.  */
470   if (operand_equal_p (old_val.value, new_val.value, 0))
471     return true;
472 
473   /* At least the kinds and types should agree now.  */
474   if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
475       || !types_compatible_p (TREE_TYPE (old_val.value),
476 			      TREE_TYPE (new_val.value)))
477     return false;
478 
479   /* For floats and !HONOR_NANS allow transitions from (partial) NaN
480      to non-NaN.  */
481   tree type = TREE_TYPE (new_val.value);
482   if (SCALAR_FLOAT_TYPE_P (type)
483       && !HONOR_NANS (type))
484     {
485       if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
486 	return true;
487     }
488   else if (VECTOR_FLOAT_TYPE_P (type)
489 	   && !HONOR_NANS (type))
490     {
491       unsigned int count
492 	= tree_vector_builder::binary_encoded_nelts (old_val.value,
493 						     new_val.value);
494       for (unsigned int i = 0; i < count; ++i)
495 	if (!REAL_VALUE_ISNAN
496 	       (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
497 	    && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
498 				 VECTOR_CST_ENCODED_ELT (new_val.value, i), 0))
499 	  return false;
500       return true;
501     }
502   else if (COMPLEX_FLOAT_TYPE_P (type)
503 	   && !HONOR_NANS (type))
504     {
505       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
506 	  && !operand_equal_p (TREE_REALPART (old_val.value),
507 			       TREE_REALPART (new_val.value), 0))
508 	return false;
509       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
510 	  && !operand_equal_p (TREE_IMAGPART (old_val.value),
511 			       TREE_IMAGPART (new_val.value), 0))
512 	return false;
513       return true;
514     }
515   return false;
516 }
517 
518 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
519    value is different from VAR's previous value.  */
520 
521 static bool
set_lattice_value(tree var,ccp_prop_value_t * new_val)522 set_lattice_value (tree var, ccp_prop_value_t *new_val)
523 {
524   /* We can deal with old UNINITIALIZED values just fine here.  */
525   ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
526 
527   canonicalize_value (new_val);
528 
529   /* We have to be careful to not go up the bitwise lattice
530      represented by the mask.  Instead of dropping to VARYING
531      use the meet operator to retain a conservative value.
532      Missed optimizations like PR65851 makes this necessary.
533      It also ensures we converge to a stable lattice solution.  */
534   if (old_val->lattice_val != UNINITIALIZED)
535     ccp_lattice_meet (new_val, old_val);
536 
537   gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
538 
539   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
540      caller that this was a non-transition.  */
541   if (old_val->lattice_val != new_val->lattice_val
542       || (new_val->lattice_val == CONSTANT
543 	  && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
544 	      || (TREE_CODE (new_val->value) == INTEGER_CST
545 		  && (new_val->mask != old_val->mask
546 		      || (wi::bit_and_not (wi::to_widest (old_val->value),
547 					   new_val->mask)
548 			  != wi::bit_and_not (wi::to_widest (new_val->value),
549 					      new_val->mask))))
550 	      || (TREE_CODE (new_val->value) != INTEGER_CST
551 		  && !operand_equal_p (new_val->value, old_val->value, 0)))))
552     {
553       /* ???  We would like to delay creation of INTEGER_CSTs from
554 	 partially constants here.  */
555 
556       if (dump_file && (dump_flags & TDF_DETAILS))
557 	{
558 	  dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
559 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
560 	}
561 
562       *old_val = *new_val;
563 
564       gcc_assert (new_val->lattice_val != UNINITIALIZED);
565       return true;
566     }
567 
568   return false;
569 }
570 
571 static ccp_prop_value_t get_value_for_expr (tree, bool);
572 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
573 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
574 		      signop, int, const widest_int &, const widest_int &,
575 		      signop, int, const widest_int &, const widest_int &);
576 
577 /* Return a widest_int that can be used for bitwise simplifications
578    from VAL.  */
579 
580 static widest_int
value_to_wide_int(ccp_prop_value_t val)581 value_to_wide_int (ccp_prop_value_t val)
582 {
583   if (val.value
584       && TREE_CODE (val.value) == INTEGER_CST)
585     return wi::to_widest (val.value);
586 
587   return 0;
588 }
589 
590 /* Return the value for the address expression EXPR based on alignment
591    information.  */
592 
593 static ccp_prop_value_t
get_value_from_alignment(tree expr)594 get_value_from_alignment (tree expr)
595 {
596   tree type = TREE_TYPE (expr);
597   ccp_prop_value_t val;
598   unsigned HOST_WIDE_INT bitpos;
599   unsigned int align;
600 
601   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
602 
603   get_pointer_alignment_1 (expr, &align, &bitpos);
604   val.mask = wi::bit_and_not
605     (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
606      ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
607      : -1,
608      align / BITS_PER_UNIT - 1);
609   val.lattice_val
610     = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
611   if (val.lattice_val == CONSTANT)
612     val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
613   else
614     val.value = NULL_TREE;
615 
616   return val;
617 }
618 
619 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
620    return constant bits extracted from alignment information for
621    invariant addresses.  */
622 
623 static ccp_prop_value_t
get_value_for_expr(tree expr,bool for_bits_p)624 get_value_for_expr (tree expr, bool for_bits_p)
625 {
626   ccp_prop_value_t val;
627 
628   if (TREE_CODE (expr) == SSA_NAME)
629     {
630       ccp_prop_value_t *val_ = get_value (expr);
631       if (val_)
632 	val = *val_;
633       else
634 	{
635 	  val.lattice_val = VARYING;
636 	  val.value = NULL_TREE;
637 	  val.mask = -1;
638 	}
639       if (for_bits_p
640 	  && val.lattice_val == CONSTANT)
641 	{
642 	  if (TREE_CODE (val.value) == ADDR_EXPR)
643 	    val = get_value_from_alignment (val.value);
644 	  else if (TREE_CODE (val.value) != INTEGER_CST)
645 	    {
646 	      val.lattice_val = VARYING;
647 	      val.value = NULL_TREE;
648 	      val.mask = -1;
649 	    }
650 	}
651       /* Fall back to a copy value.  */
652       if (!for_bits_p
653 	  && val.lattice_val == VARYING
654 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
655 	{
656 	  val.lattice_val = CONSTANT;
657 	  val.value = expr;
658 	  val.mask = -1;
659 	}
660     }
661   else if (is_gimple_min_invariant (expr)
662 	   && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
663     {
664       val.lattice_val = CONSTANT;
665       val.value = expr;
666       val.mask = 0;
667       canonicalize_value (&val);
668     }
669   else if (TREE_CODE (expr) == ADDR_EXPR)
670     val = get_value_from_alignment (expr);
671   else
672     {
673       val.lattice_val = VARYING;
674       val.mask = -1;
675       val.value = NULL_TREE;
676     }
677 
678   if (val.lattice_val == VARYING
679       && TYPE_UNSIGNED (TREE_TYPE (expr)))
680     val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
681 
682   return val;
683 }
684 
685 /* Return the likely CCP lattice value for STMT.
686 
687    If STMT has no operands, then return CONSTANT.
688 
689    Else if undefinedness of operands of STMT cause its value to be
690    undefined, then return UNDEFINED.
691 
692    Else if any operands of STMT are constants, then return CONSTANT.
693 
694    Else return VARYING.  */
695 
696 static ccp_lattice_t
likely_value(gimple * stmt)697 likely_value (gimple *stmt)
698 {
699   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
700   bool has_nsa_operand;
701   tree use;
702   ssa_op_iter iter;
703   unsigned i;
704 
705   enum gimple_code code = gimple_code (stmt);
706 
707   /* This function appears to be called only for assignments, calls,
708      conditionals, and switches, due to the logic in visit_stmt.  */
709   gcc_assert (code == GIMPLE_ASSIGN
710               || code == GIMPLE_CALL
711               || code == GIMPLE_COND
712               || code == GIMPLE_SWITCH);
713 
714   /* If the statement has volatile operands, it won't fold to a
715      constant value.  */
716   if (gimple_has_volatile_ops (stmt))
717     return VARYING;
718 
719   /* Arrive here for more complex cases.  */
720   has_constant_operand = false;
721   has_undefined_operand = false;
722   all_undefined_operands = true;
723   has_nsa_operand = false;
724   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
725     {
726       ccp_prop_value_t *val = get_value (use);
727 
728       if (val && val->lattice_val == UNDEFINED)
729 	has_undefined_operand = true;
730       else
731 	all_undefined_operands = false;
732 
733       if (val && val->lattice_val == CONSTANT)
734 	has_constant_operand = true;
735 
736       if (SSA_NAME_IS_DEFAULT_DEF (use)
737 	  || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
738 	has_nsa_operand = true;
739     }
740 
741   /* There may be constants in regular rhs operands.  For calls we
742      have to ignore lhs, fndecl and static chain, otherwise only
743      the lhs.  */
744   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
745        i < gimple_num_ops (stmt); ++i)
746     {
747       tree op = gimple_op (stmt, i);
748       if (!op || TREE_CODE (op) == SSA_NAME)
749 	continue;
750       if (is_gimple_min_invariant (op))
751 	has_constant_operand = true;
752     }
753 
754   if (has_constant_operand)
755     all_undefined_operands = false;
756 
757   if (has_undefined_operand
758       && code == GIMPLE_CALL
759       && gimple_call_internal_p (stmt))
760     switch (gimple_call_internal_fn (stmt))
761       {
762 	/* These 3 builtins use the first argument just as a magic
763 	   way how to find out a decl uid.  */
764       case IFN_GOMP_SIMD_LANE:
765       case IFN_GOMP_SIMD_VF:
766       case IFN_GOMP_SIMD_LAST_LANE:
767 	has_undefined_operand = false;
768 	break;
769       default:
770 	break;
771       }
772 
773   /* If the operation combines operands like COMPLEX_EXPR make sure to
774      not mark the result UNDEFINED if only one part of the result is
775      undefined.  */
776   if (has_undefined_operand && all_undefined_operands)
777     return UNDEFINED;
778   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
779     {
780       switch (gimple_assign_rhs_code (stmt))
781 	{
782 	/* Unary operators are handled with all_undefined_operands.  */
783 	case PLUS_EXPR:
784 	case MINUS_EXPR:
785 	case POINTER_PLUS_EXPR:
786 	case BIT_XOR_EXPR:
787 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
788 	     Not bitwise operators, one VARYING operand may specify the
789 	     result completely.
790 	     Not logical operators for the same reason, apart from XOR.
791 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
792 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
793 	     the undefined operand may be promoted.  */
794 	  return UNDEFINED;
795 
796 	case ADDR_EXPR:
797 	  /* If any part of an address is UNDEFINED, like the index
798 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
799 	  return UNDEFINED;
800 
801 	default:
802 	  ;
803 	}
804     }
805   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
806      fall back to CONSTANT.  During iteration UNDEFINED may still drop
807      to CONSTANT.  */
808   if (has_undefined_operand)
809     return CONSTANT;
810 
811   /* We do not consider virtual operands here -- load from read-only
812      memory may have only VARYING virtual operands, but still be
813      constant.  Also we can combine the stmt with definitions from
814      operands whose definitions are not simulated again.  */
815   if (has_constant_operand
816       || has_nsa_operand
817       || gimple_references_memory_p (stmt))
818     return CONSTANT;
819 
820   return VARYING;
821 }
822 
823 /* Returns true if STMT cannot be constant.  */
824 
825 static bool
surely_varying_stmt_p(gimple * stmt)826 surely_varying_stmt_p (gimple *stmt)
827 {
828   /* If the statement has operands that we cannot handle, it cannot be
829      constant.  */
830   if (gimple_has_volatile_ops (stmt))
831     return true;
832 
833   /* If it is a call and does not return a value or is not a
834      builtin and not an indirect call or a call to function with
835      assume_aligned/alloc_align attribute, it is varying.  */
836   if (is_gimple_call (stmt))
837     {
838       tree fndecl, fntype = gimple_call_fntype (stmt);
839       if (!gimple_call_lhs (stmt)
840 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
841 	      && !fndecl_built_in_p (fndecl)
842 	      && !lookup_attribute ("assume_aligned",
843 				    TYPE_ATTRIBUTES (fntype))
844 	      && !lookup_attribute ("alloc_align",
845 				    TYPE_ATTRIBUTES (fntype))))
846 	return true;
847     }
848 
849   /* Any other store operation is not interesting.  */
850   else if (gimple_vdef (stmt))
851     return true;
852 
853   /* Anything other than assignments and conditional jumps are not
854      interesting for CCP.  */
855   if (gimple_code (stmt) != GIMPLE_ASSIGN
856       && gimple_code (stmt) != GIMPLE_COND
857       && gimple_code (stmt) != GIMPLE_SWITCH
858       && gimple_code (stmt) != GIMPLE_CALL)
859     return true;
860 
861   return false;
862 }
863 
864 /* Initialize local data structures for CCP.  */
865 
866 static void
ccp_initialize(void)867 ccp_initialize (void)
868 {
869   basic_block bb;
870 
871   n_const_val = num_ssa_names;
872   const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
873 
874   /* Initialize simulation flags for PHI nodes and statements.  */
875   FOR_EACH_BB_FN (bb, cfun)
876     {
877       gimple_stmt_iterator i;
878 
879       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
880         {
881 	  gimple *stmt = gsi_stmt (i);
882 	  bool is_varying;
883 
884 	  /* If the statement is a control insn, then we do not
885 	     want to avoid simulating the statement once.  Failure
886 	     to do so means that those edges will never get added.  */
887 	  if (stmt_ends_bb_p (stmt))
888 	    is_varying = false;
889 	  else
890 	    is_varying = surely_varying_stmt_p (stmt);
891 
892 	  if (is_varying)
893 	    {
894 	      tree def;
895 	      ssa_op_iter iter;
896 
897 	      /* If the statement will not produce a constant, mark
898 		 all its outputs VARYING.  */
899 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
900 		set_value_varying (def);
901 	    }
902           prop_set_simulate_again (stmt, !is_varying);
903 	}
904     }
905 
906   /* Now process PHI nodes.  We never clear the simulate_again flag on
907      phi nodes, since we do not know which edges are executable yet,
908      except for phi nodes for virtual operands when we do not do store ccp.  */
909   FOR_EACH_BB_FN (bb, cfun)
910     {
911       gphi_iterator i;
912 
913       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
914         {
915           gphi *phi = i.phi ();
916 
917 	  if (virtual_operand_p (gimple_phi_result (phi)))
918             prop_set_simulate_again (phi, false);
919 	  else
920             prop_set_simulate_again (phi, true);
921 	}
922     }
923 }
924 
925 /* Debug count support. Reset the values of ssa names
926    VARYING when the total number ssa names analyzed is
927    beyond the debug count specified.  */
928 
929 static void
do_dbg_cnt(void)930 do_dbg_cnt (void)
931 {
932   unsigned i;
933   for (i = 0; i < num_ssa_names; i++)
934     {
935       if (!dbg_cnt (ccp))
936         {
937           const_val[i].lattice_val = VARYING;
938 	  const_val[i].mask = -1;
939           const_val[i].value = NULL_TREE;
940         }
941     }
942 }
943 
944 
945 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods.  */
946 class ccp_folder : public substitute_and_fold_engine
947 {
948  public:
949   tree value_of_expr (tree, gimple *) FINAL OVERRIDE;
950   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
951 };
952 
953 /* This method just wraps GET_CONSTANT_VALUE for now.  Over time
954    naked calls to GET_CONSTANT_VALUE should be eliminated in favor
955    of calling member functions.  */
956 
957 tree
value_of_expr(tree op,gimple *)958 ccp_folder::value_of_expr (tree op, gimple *)
959 {
960   return get_constant_value (op);
961 }
962 
963 /* Do final substitution of propagated values, cleanup the flowgraph and
964    free allocated storage.  If NONZERO_P, record nonzero bits.
965 
966    Return TRUE when something was optimized.  */
967 
968 static bool
ccp_finalize(bool nonzero_p)969 ccp_finalize (bool nonzero_p)
970 {
971   bool something_changed;
972   unsigned i;
973   tree name;
974 
975   do_dbg_cnt ();
976 
977   /* Derive alignment and misalignment information from partially
978      constant pointers in the lattice or nonzero bits from partially
979      constant integers.  */
980   FOR_EACH_SSA_NAME (i, name, cfun)
981     {
982       ccp_prop_value_t *val;
983       unsigned int tem, align;
984 
985       if (!POINTER_TYPE_P (TREE_TYPE (name))
986 	  && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
987 	      /* Don't record nonzero bits before IPA to avoid
988 		 using too much memory.  */
989 	      || !nonzero_p))
990 	continue;
991 
992       val = get_value (name);
993       if (val->lattice_val != CONSTANT
994 	  || TREE_CODE (val->value) != INTEGER_CST
995 	  || val->mask == 0)
996 	continue;
997 
998       if (POINTER_TYPE_P (TREE_TYPE (name)))
999 	{
1000 	  /* Trailing mask bits specify the alignment, trailing value
1001 	     bits the misalignment.  */
1002 	  tem = val->mask.to_uhwi ();
1003 	  align = least_bit_hwi (tem);
1004 	  if (align > 1)
1005 	    set_ptr_info_alignment (get_ptr_info (name), align,
1006 				    (TREE_INT_CST_LOW (val->value)
1007 				     & (align - 1)));
1008 	}
1009       else
1010 	{
1011 	  unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
1012 	  wide_int nonzero_bits
1013 	    = (wide_int::from (val->mask, precision, UNSIGNED)
1014 	       | wi::to_wide (val->value));
1015 	  nonzero_bits &= get_nonzero_bits (name);
1016 	  set_nonzero_bits (name, nonzero_bits);
1017 	}
1018     }
1019 
1020   /* Perform substitutions based on the known constant values.  */
1021   class ccp_folder ccp_folder;
1022   something_changed = ccp_folder.substitute_and_fold ();
1023 
1024   free (const_val);
1025   const_val = NULL;
1026   return something_changed;
1027 }
1028 
1029 
1030 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
1031    in VAL1.
1032 
1033    		any  M UNDEFINED   = any
1034 		any  M VARYING     = VARYING
1035 		Ci   M Cj	   = Ci		if (i == j)
1036 		Ci   M Cj	   = VARYING	if (i != j)
1037    */
1038 
1039 static void
ccp_lattice_meet(ccp_prop_value_t * val1,ccp_prop_value_t * val2)1040 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1041 {
1042   if (val1->lattice_val == UNDEFINED
1043       /* For UNDEFINED M SSA we can't always SSA because its definition
1044          may not dominate the PHI node.  Doing optimistic copy propagation
1045 	 also causes a lot of gcc.dg/uninit-pred*.c FAILs.  */
1046       && (val2->lattice_val != CONSTANT
1047 	  || TREE_CODE (val2->value) != SSA_NAME))
1048     {
1049       /* UNDEFINED M any = any   */
1050       *val1 = *val2;
1051     }
1052   else if (val2->lattice_val == UNDEFINED
1053 	   /* See above.  */
1054 	   && (val1->lattice_val != CONSTANT
1055 	       || TREE_CODE (val1->value) != SSA_NAME))
1056     {
1057       /* any M UNDEFINED = any
1058          Nothing to do.  VAL1 already contains the value we want.  */
1059       ;
1060     }
1061   else if (val1->lattice_val == VARYING
1062            || val2->lattice_val == VARYING)
1063     {
1064       /* any M VARYING = VARYING.  */
1065       val1->lattice_val = VARYING;
1066       val1->mask = -1;
1067       val1->value = NULL_TREE;
1068     }
1069   else if (val1->lattice_val == CONSTANT
1070 	   && val2->lattice_val == CONSTANT
1071 	   && TREE_CODE (val1->value) == INTEGER_CST
1072 	   && TREE_CODE (val2->value) == INTEGER_CST)
1073     {
1074       /* Ci M Cj = Ci		if (i == j)
1075 	 Ci M Cj = VARYING	if (i != j)
1076 
1077          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
1078 	 drop to varying.  */
1079       val1->mask = (val1->mask | val2->mask
1080 		    | (wi::to_widest (val1->value)
1081 		       ^ wi::to_widest (val2->value)));
1082       if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1083 	{
1084 	  val1->lattice_val = VARYING;
1085 	  val1->value = NULL_TREE;
1086 	}
1087     }
1088   else if (val1->lattice_val == CONSTANT
1089 	   && val2->lattice_val == CONSTANT
1090 	   && operand_equal_p (val1->value, val2->value, 0))
1091     {
1092       /* Ci M Cj = Ci		if (i == j)
1093 	 Ci M Cj = VARYING	if (i != j)
1094 
1095          VAL1 already contains the value we want for equivalent values.  */
1096     }
1097   else if (val1->lattice_val == CONSTANT
1098 	   && val2->lattice_val == CONSTANT
1099 	   && (TREE_CODE (val1->value) == ADDR_EXPR
1100 	       || TREE_CODE (val2->value) == ADDR_EXPR))
1101     {
1102       /* When not equal addresses are involved try meeting for
1103 	 alignment.  */
1104       ccp_prop_value_t tem = *val2;
1105       if (TREE_CODE (val1->value) == ADDR_EXPR)
1106 	*val1 = get_value_for_expr (val1->value, true);
1107       if (TREE_CODE (val2->value) == ADDR_EXPR)
1108 	tem = get_value_for_expr (val2->value, true);
1109       ccp_lattice_meet (val1, &tem);
1110     }
1111   else
1112     {
1113       /* Any other combination is VARYING.  */
1114       val1->lattice_val = VARYING;
1115       val1->mask = -1;
1116       val1->value = NULL_TREE;
1117     }
1118 }
1119 
1120 
1121 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1122    lattice values to determine PHI_NODE's lattice value.  The value of a
1123    PHI node is determined calling ccp_lattice_meet with all the arguments
1124    of the PHI node that are incoming via executable edges.  */
1125 
1126 enum ssa_prop_result
visit_phi(gphi * phi)1127 ccp_propagate::visit_phi (gphi *phi)
1128 {
1129   unsigned i;
1130   ccp_prop_value_t new_val;
1131 
1132   if (dump_file && (dump_flags & TDF_DETAILS))
1133     {
1134       fprintf (dump_file, "\nVisiting PHI node: ");
1135       print_gimple_stmt (dump_file, phi, 0, dump_flags);
1136     }
1137 
1138   new_val.lattice_val = UNDEFINED;
1139   new_val.value = NULL_TREE;
1140   new_val.mask = 0;
1141 
1142   bool first = true;
1143   bool non_exec_edge = false;
1144   for (i = 0; i < gimple_phi_num_args (phi); i++)
1145     {
1146       /* Compute the meet operator over all the PHI arguments flowing
1147 	 through executable edges.  */
1148       edge e = gimple_phi_arg_edge (phi, i);
1149 
1150       if (dump_file && (dump_flags & TDF_DETAILS))
1151 	{
1152 	  fprintf (dump_file,
1153 	      "\tArgument #%d (%d -> %d %sexecutable)\n",
1154 	      i, e->src->index, e->dest->index,
1155 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1156 	}
1157 
1158       /* If the incoming edge is executable, Compute the meet operator for
1159 	 the existing value of the PHI node and the current PHI argument.  */
1160       if (e->flags & EDGE_EXECUTABLE)
1161 	{
1162 	  tree arg = gimple_phi_arg (phi, i)->def;
1163 	  ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1164 
1165 	  if (first)
1166 	    {
1167 	      new_val = arg_val;
1168 	      first = false;
1169 	    }
1170 	  else
1171 	    ccp_lattice_meet (&new_val, &arg_val);
1172 
1173 	  if (dump_file && (dump_flags & TDF_DETAILS))
1174 	    {
1175 	      fprintf (dump_file, "\t");
1176 	      print_generic_expr (dump_file, arg, dump_flags);
1177 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
1178 	      fprintf (dump_file, "\n");
1179 	    }
1180 
1181 	  if (new_val.lattice_val == VARYING)
1182 	    break;
1183 	}
1184       else
1185 	non_exec_edge = true;
1186     }
1187 
1188   /* In case there were non-executable edges and the value is a copy
1189      make sure its definition dominates the PHI node.  */
1190   if (non_exec_edge
1191       && new_val.lattice_val == CONSTANT
1192       && TREE_CODE (new_val.value) == SSA_NAME
1193       && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1194       && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1195 			   gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1196     {
1197       new_val.lattice_val = VARYING;
1198       new_val.value = NULL_TREE;
1199       new_val.mask = -1;
1200     }
1201 
1202   if (dump_file && (dump_flags & TDF_DETAILS))
1203     {
1204       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
1205       fprintf (dump_file, "\n\n");
1206     }
1207 
1208   /* Make the transition to the new value.  */
1209   if (set_lattice_value (gimple_phi_result (phi), &new_val))
1210     {
1211       if (new_val.lattice_val == VARYING)
1212 	return SSA_PROP_VARYING;
1213       else
1214 	return SSA_PROP_INTERESTING;
1215     }
1216   else
1217     return SSA_PROP_NOT_INTERESTING;
1218 }
1219 
1220 /* Return the constant value for OP or OP otherwise.  */
1221 
1222 static tree
valueize_op(tree op)1223 valueize_op (tree op)
1224 {
1225   if (TREE_CODE (op) == SSA_NAME)
1226     {
1227       tree tem = get_constant_value (op);
1228       if (tem)
1229 	return tem;
1230     }
1231   return op;
1232 }
1233 
1234 /* Return the constant value for OP, but signal to not follow SSA
1235    edges if the definition may be simulated again.  */
1236 
1237 static tree
valueize_op_1(tree op)1238 valueize_op_1 (tree op)
1239 {
1240   if (TREE_CODE (op) == SSA_NAME)
1241     {
1242       /* If the definition may be simulated again we cannot follow
1243          this SSA edge as the SSA propagator does not necessarily
1244 	 re-visit the use.  */
1245       gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1246       if (!gimple_nop_p (def_stmt)
1247 	  && prop_simulate_again_p (def_stmt))
1248 	return NULL_TREE;
1249       tree tem = get_constant_value (op);
1250       if (tem)
1251 	return tem;
1252     }
1253   return op;
1254 }
1255 
1256 /* CCP specific front-end to the non-destructive constant folding
1257    routines.
1258 
1259    Attempt to simplify the RHS of STMT knowing that one or more
1260    operands are constants.
1261 
1262    If simplification is possible, return the simplified RHS,
1263    otherwise return the original RHS or NULL_TREE.  */
1264 
1265 static tree
ccp_fold(gimple * stmt)1266 ccp_fold (gimple *stmt)
1267 {
1268   location_t loc = gimple_location (stmt);
1269   switch (gimple_code (stmt))
1270     {
1271     case GIMPLE_COND:
1272       {
1273         /* Handle comparison operators that can appear in GIMPLE form.  */
1274         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1275         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1276         enum tree_code code = gimple_cond_code (stmt);
1277         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1278       }
1279 
1280     case GIMPLE_SWITCH:
1281       {
1282 	/* Return the constant switch index.  */
1283         return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1284       }
1285 
1286     case GIMPLE_ASSIGN:
1287     case GIMPLE_CALL:
1288       return gimple_fold_stmt_to_constant_1 (stmt,
1289 					     valueize_op, valueize_op_1);
1290 
1291     default:
1292       gcc_unreachable ();
1293     }
1294 }
1295 
1296 /* Determine the minimum and maximum values, *MIN and *MAX respectively,
1297    represented by the mask pair VAL and MASK with signedness SGN and
1298    precision PRECISION.  */
1299 
1300 void
value_mask_to_min_max(widest_int * min,widest_int * max,const widest_int & val,const widest_int & mask,signop sgn,int precision)1301 value_mask_to_min_max (widest_int *min, widest_int *max,
1302 		       const widest_int &val, const widest_int &mask,
1303 		       signop sgn, int precision)
1304 {
1305   *min = wi::bit_and_not (val, mask);
1306   *max = val | mask;
1307   if (sgn == SIGNED && wi::neg_p (mask))
1308     {
1309       widest_int sign_bit = wi::lshift (1, precision - 1);
1310       *min ^= sign_bit;
1311       *max ^= sign_bit;
1312       /* MAX is zero extended, and MIN is sign extended.  */
1313       *min = wi::ext (*min, precision, sgn);
1314       *max = wi::ext (*max, precision, sgn);
1315     }
1316 }
1317 
1318 /* Apply the operation CODE in type TYPE to the value, mask pair
1319    RVAL and RMASK representing a value of type RTYPE and set
1320    the value, mask pair *VAL and *MASK to the result.  */
1321 
1322 void
bit_value_unop(enum tree_code code,signop type_sgn,int type_precision,widest_int * val,widest_int * mask,signop rtype_sgn,int rtype_precision,const widest_int & rval,const widest_int & rmask)1323 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1324 		widest_int *val, widest_int *mask,
1325 		signop rtype_sgn, int rtype_precision,
1326 		const widest_int &rval, const widest_int &rmask)
1327 {
1328   switch (code)
1329     {
1330     case BIT_NOT_EXPR:
1331       *mask = rmask;
1332       *val = ~rval;
1333       break;
1334 
1335     case NEGATE_EXPR:
1336       {
1337 	widest_int temv, temm;
1338 	/* Return ~rval + 1.  */
1339 	bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1340 			type_sgn, type_precision, rval, rmask);
1341 	bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1342 			 type_sgn, type_precision, temv, temm,
1343 			 type_sgn, type_precision, 1, 0);
1344 	break;
1345       }
1346 
1347     CASE_CONVERT:
1348       {
1349 	/* First extend mask and value according to the original type.  */
1350 	*mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1351 	*val = wi::ext (rval, rtype_precision, rtype_sgn);
1352 
1353 	/* Then extend mask and value according to the target type.  */
1354 	*mask = wi::ext (*mask, type_precision, type_sgn);
1355 	*val = wi::ext (*val, type_precision, type_sgn);
1356 	break;
1357       }
1358 
1359     case ABS_EXPR:
1360     case ABSU_EXPR:
1361       if (wi::sext (rmask, rtype_precision) == -1)
1362 	*mask = -1;
1363       else if (wi::neg_p (rmask))
1364 	{
1365 	  /* Result is either rval or -rval.  */
1366 	  widest_int temv, temm;
1367 	  bit_value_unop (NEGATE_EXPR, rtype_sgn, rtype_precision, &temv,
1368 			  &temm, type_sgn, type_precision, rval, rmask);
1369 	  temm |= (rmask | (rval ^ temv));
1370 	  /* Extend the result.  */
1371 	  *mask = wi::ext (temm, type_precision, type_sgn);
1372 	  *val = wi::ext (temv, type_precision, type_sgn);
1373 	}
1374       else if (wi::neg_p (rval))
1375 	{
1376 	  bit_value_unop (NEGATE_EXPR, type_sgn, type_precision, val, mask,
1377 			  type_sgn, type_precision, rval, rmask);
1378 	}
1379       else
1380 	{
1381 	  *mask = rmask;
1382 	  *val = rval;
1383 	}
1384       break;
1385 
1386     default:
1387       *mask = -1;
1388       break;
1389     }
1390 }
1391 
1392 /* Determine the mask pair *VAL and *MASK from multiplying the
1393    argument mask pair RVAL, RMASK by the unsigned constant C.  */
1394 void
bit_value_mult_const(signop sgn,int width,widest_int * val,widest_int * mask,const widest_int & rval,const widest_int & rmask,widest_int c)1395 bit_value_mult_const (signop sgn, int width,
1396 		      widest_int *val, widest_int *mask,
1397 		      const widest_int &rval, const widest_int &rmask,
1398 		      widest_int c)
1399 {
1400   widest_int sum_mask = 0;
1401 
1402   /* Ensure rval_lo only contains known bits.  */
1403   widest_int rval_lo = wi::bit_and_not (rval, rmask);
1404 
1405   if (rval_lo != 0)
1406     {
1407       /* General case (some bits of multiplicand are known set).  */
1408       widest_int sum_val = 0;
1409       while (c != 0)
1410 	{
1411 	  /* Determine the lowest bit set in the multiplier.  */
1412 	  int bitpos = wi::ctz (c);
1413 	  widest_int term_mask = rmask << bitpos;
1414 	  widest_int term_val = rval_lo << bitpos;
1415 
1416 	  /* sum += term.  */
1417 	  widest_int lo = sum_val + term_val;
1418 	  widest_int hi = (sum_val | sum_mask) + (term_val | term_mask);
1419 	  sum_mask |= term_mask | (lo ^ hi);
1420 	  sum_val = lo;
1421 
1422 	  /* Clear this bit in the multiplier.  */
1423 	  c ^= wi::lshift (1, bitpos);
1424 	}
1425       /* Correctly extend the result value.  */
1426       *val = wi::ext (sum_val, width, sgn);
1427     }
1428   else
1429     {
1430       /* Special case (no bits of multiplicand are known set).  */
1431       while (c != 0)
1432 	{
1433 	  /* Determine the lowest bit set in the multiplier.  */
1434 	  int bitpos = wi::ctz (c);
1435 	  widest_int term_mask = rmask << bitpos;
1436 
1437 	  /* sum += term.  */
1438 	  widest_int hi = sum_mask + term_mask;
1439 	  sum_mask |= term_mask | hi;
1440 
1441 	  /* Clear this bit in the multiplier.  */
1442 	  c ^= wi::lshift (1, bitpos);
1443 	}
1444       *val = 0;
1445     }
1446 
1447   /* Correctly extend the result mask.  */
1448   *mask = wi::ext (sum_mask, width, sgn);
1449 }
1450 
1451 /* Fill up to MAX values in the BITS array with values representing
1452    each of the non-zero bits in the value X.  Returns the number of
1453    bits in X (capped at the maximum value MAX).  For example, an X
1454    value 11, places 1, 2 and 8 in BITS and returns the value 3.  */
1455 
1456 unsigned int
get_individual_bits(widest_int * bits,widest_int x,unsigned int max)1457 get_individual_bits (widest_int *bits, widest_int x, unsigned int max)
1458 {
1459   unsigned int count = 0;
1460   while (count < max && x != 0)
1461     {
1462       int bitpos = wi::ctz (x);
1463       bits[count] = wi::lshift (1, bitpos);
1464       x ^= bits[count];
1465       count++;
1466     }
1467   return count;
1468 }
1469 
1470 /* Array of 2^N - 1 values representing the bits flipped between
1471    consecutive Gray codes.  This is used to efficiently enumerate
1472    all permutations on N bits using XOR.  */
1473 static const unsigned char gray_code_bit_flips[63] = {
1474   0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1475   0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,
1476   0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1477   0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
1478 };
1479 
1480 /* Apply the operation CODE in type TYPE to the value, mask pairs
1481    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1482    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1483 
1484 void
bit_value_binop(enum tree_code code,signop sgn,int width,widest_int * val,widest_int * mask,signop r1type_sgn,int r1type_precision,const widest_int & r1val,const widest_int & r1mask,signop r2type_sgn,int r2type_precision ATTRIBUTE_UNUSED,const widest_int & r2val,const widest_int & r2mask)1485 bit_value_binop (enum tree_code code, signop sgn, int width,
1486 		 widest_int *val, widest_int *mask,
1487 		 signop r1type_sgn, int r1type_precision,
1488 		 const widest_int &r1val, const widest_int &r1mask,
1489 		 signop r2type_sgn, int r2type_precision ATTRIBUTE_UNUSED,
1490 		 const widest_int &r2val, const widest_int &r2mask)
1491 {
1492   bool swap_p = false;
1493 
1494   /* Assume we'll get a constant result.  Use an initial non varying
1495      value, we fall back to varying in the end if necessary.  */
1496   *mask = -1;
1497   /* Ensure that VAL is initialized (to any value).  */
1498   *val = 0;
1499 
1500   switch (code)
1501     {
1502     case BIT_AND_EXPR:
1503       /* The mask is constant where there is a known not
1504 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1505       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1506       *val = r1val & r2val;
1507       break;
1508 
1509     case BIT_IOR_EXPR:
1510       /* The mask is constant where there is a known
1511 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1512       *mask = wi::bit_and_not (r1mask | r2mask,
1513 			       wi::bit_and_not (r1val, r1mask)
1514 			       | wi::bit_and_not (r2val, r2mask));
1515       *val = r1val | r2val;
1516       break;
1517 
1518     case BIT_XOR_EXPR:
1519       /* m1 | m2  */
1520       *mask = r1mask | r2mask;
1521       *val = r1val ^ r2val;
1522       break;
1523 
1524     case LROTATE_EXPR:
1525     case RROTATE_EXPR:
1526       if (r2mask == 0)
1527 	{
1528 	  widest_int shift = r2val;
1529 	  if (shift == 0)
1530 	    {
1531 	      *mask = r1mask;
1532 	      *val = r1val;
1533 	    }
1534 	  else
1535 	    {
1536 	      if (wi::neg_p (shift, r2type_sgn))
1537 		{
1538 		  shift = -shift;
1539 		  if (code == RROTATE_EXPR)
1540 		    code = LROTATE_EXPR;
1541 		  else
1542 		    code = RROTATE_EXPR;
1543 		}
1544 	      if (code == RROTATE_EXPR)
1545 		{
1546 		  *mask = wi::rrotate (r1mask, shift, width);
1547 		  *val = wi::rrotate (r1val, shift, width);
1548 		}
1549 	      else
1550 		{
1551 		  *mask = wi::lrotate (r1mask, shift, width);
1552 		  *val = wi::lrotate (r1val, shift, width);
1553 		}
1554 	    }
1555 	}
1556       else if (wi::ltu_p (r2val | r2mask, width)
1557 	       && wi::popcount (r2mask) <= 4)
1558 	{
1559 	  widest_int bits[4];
1560 	  widest_int res_val, res_mask;
1561 	  widest_int tmp_val, tmp_mask;
1562 	  widest_int shift = wi::bit_and_not (r2val, r2mask);
1563 	  unsigned int bit_count = get_individual_bits (bits, r2mask, 4);
1564 	  unsigned int count = (1 << bit_count) - 1;
1565 
1566 	  /* Initialize result to rotate by smallest value of shift.  */
1567 	  if (code == RROTATE_EXPR)
1568 	    {
1569 	      res_mask = wi::rrotate (r1mask, shift, width);
1570 	      res_val = wi::rrotate (r1val, shift, width);
1571 	    }
1572 	  else
1573 	    {
1574 	      res_mask = wi::lrotate (r1mask, shift, width);
1575 	      res_val = wi::lrotate (r1val, shift, width);
1576 	    }
1577 
1578 	  /* Iterate through the remaining values of shift.  */
1579 	  for (unsigned int i=0; i<count; i++)
1580 	    {
1581 	      shift ^= bits[gray_code_bit_flips[i]];
1582 	      if (code == RROTATE_EXPR)
1583 		{
1584 		  tmp_mask = wi::rrotate (r1mask, shift, width);
1585 		  tmp_val = wi::rrotate (r1val, shift, width);
1586 		}
1587 	      else
1588 		{
1589 		  tmp_mask = wi::lrotate (r1mask, shift, width);
1590 		  tmp_val = wi::lrotate (r1val, shift, width);
1591 		}
1592 	      /* Accumulate the result.  */
1593 	      res_mask |= tmp_mask | (res_val ^ tmp_val);
1594 	    }
1595 	  *val = wi::bit_and_not (res_val, res_mask);
1596 	  *mask = res_mask;
1597 	}
1598       break;
1599 
1600     case LSHIFT_EXPR:
1601     case RSHIFT_EXPR:
1602       /* ???  We can handle partially known shift counts if we know
1603 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1604 	 is zero.  */
1605       if (r2mask == 0)
1606 	{
1607 	  widest_int shift = r2val;
1608 	  if (shift == 0)
1609 	    {
1610 	      *mask = r1mask;
1611 	      *val = r1val;
1612 	    }
1613 	  else
1614 	    {
1615 	      if (wi::neg_p (shift, r2type_sgn))
1616 		break;
1617 	      if (code == RSHIFT_EXPR)
1618 		{
1619 		  *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1620 		  *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1621 		}
1622 	      else
1623 		{
1624 		  *mask = wi::ext (r1mask << shift, width, sgn);
1625 		  *val = wi::ext (r1val << shift, width, sgn);
1626 		}
1627 	    }
1628 	}
1629       else if (wi::ltu_p (r2val | r2mask, width))
1630 	{
1631 	  if (wi::popcount (r2mask) <= 4)
1632 	    {
1633 	      widest_int bits[4];
1634 	      widest_int arg_val, arg_mask;
1635 	      widest_int res_val, res_mask;
1636 	      widest_int tmp_val, tmp_mask;
1637 	      widest_int shift = wi::bit_and_not (r2val, r2mask);
1638 	      unsigned int bit_count = get_individual_bits (bits, r2mask, 4);
1639 	      unsigned int count = (1 << bit_count) - 1;
1640 
1641 	      /* Initialize result to shift by smallest value of shift.  */
1642 	      if (code == RSHIFT_EXPR)
1643 		{
1644 		  arg_mask = wi::ext (r1mask, width, sgn);
1645 		  arg_val = wi::ext (r1val, width, sgn);
1646 		  res_mask = wi::rshift (arg_mask, shift, sgn);
1647 		  res_val = wi::rshift (arg_val, shift, sgn);
1648 		}
1649 	      else
1650 		{
1651 		  arg_mask = r1mask;
1652 		  arg_val = r1val;
1653 		  res_mask = arg_mask << shift;
1654 		  res_val = arg_val << shift;
1655 		}
1656 
1657 	      /* Iterate through the remaining values of shift.  */
1658 	      for (unsigned int i=0; i<count; i++)
1659 		{
1660 		  shift ^= bits[gray_code_bit_flips[i]];
1661 		  if (code == RSHIFT_EXPR)
1662 		    {
1663 		      tmp_mask = wi::rshift (arg_mask, shift, sgn);
1664 		      tmp_val = wi::rshift (arg_val, shift, sgn);
1665 		    }
1666 		  else
1667 		    {
1668 		      tmp_mask = arg_mask << shift;
1669 		      tmp_val = arg_val << shift;
1670 		    }
1671 		  /* Accumulate the result.  */
1672 		  res_mask |= tmp_mask | (res_val ^ tmp_val);
1673 		}
1674 	      res_mask = wi::ext (res_mask, width, sgn);
1675 	      res_val = wi::ext (res_val, width, sgn);
1676 	      *val = wi::bit_and_not (res_val, res_mask);
1677 	      *mask = res_mask;
1678 	    }
1679 	  else if ((r1val | r1mask) == 0)
1680 	    {
1681 	      /* Handle shifts of zero to avoid undefined wi::ctz below.  */
1682 	      *mask = 0;
1683 	      *val = 0;
1684 	    }
1685 	  else if (code == LSHIFT_EXPR)
1686 	    {
1687 	      widest_int tmp = wi::mask <widest_int> (width, false);
1688 	      tmp <<= wi::ctz (r1val | r1mask);
1689 	      tmp <<= wi::bit_and_not (r2val, r2mask);
1690 	      *mask = wi::ext (tmp, width, sgn);
1691 	      *val = 0;
1692 	    }
1693 	  else if (!wi::neg_p (r1val | r1mask, sgn))
1694 	    {
1695 	      /* Logical right shift, or zero sign bit.  */
1696 	      widest_int arg = r1val | r1mask;
1697 	      int lzcount = wi::clz (arg);
1698 	      if (lzcount)
1699 		lzcount -= wi::get_precision (arg) - width;
1700 	      widest_int tmp = wi::mask <widest_int> (width, false);
1701 	      tmp = wi::lrshift (tmp, lzcount);
1702 	      tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask));
1703 	      *mask = wi::ext (tmp, width, sgn);
1704 	      *val = 0;
1705 	    }
1706 	  else if (!wi::neg_p (r1mask))
1707 	    {
1708 	      /* Arithmetic right shift with set sign bit.  */
1709 	      widest_int arg = wi::bit_and_not (r1val, r1mask);
1710 	      int sbcount = wi::clrsb (arg);
1711 	      sbcount -= wi::get_precision (arg) - width;
1712 	      widest_int tmp = wi::mask <widest_int> (width, false);
1713 	      tmp = wi::lrshift (tmp, sbcount);
1714 	      tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask));
1715 	      *mask = wi::sext (tmp, width);
1716 	      tmp = wi::bit_not (tmp);
1717 	      *val = wi::sext (tmp, width);
1718 	    }
1719 	}
1720       break;
1721 
1722     case PLUS_EXPR:
1723     case POINTER_PLUS_EXPR:
1724       {
1725 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1726 	   zero wherever possible.  */
1727 	widest_int lo = (wi::bit_and_not (r1val, r1mask)
1728 			 + wi::bit_and_not (r2val, r2mask));
1729 	lo = wi::ext (lo, width, sgn);
1730 	/* Do the addition with unknown bits set to one, to give carry-ins of
1731 	   one wherever possible.  */
1732 	widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1733 	hi = wi::ext (hi, width, sgn);
1734 	/* Each bit in the result is known if (a) the corresponding bits in
1735 	   both inputs are known, and (b) the carry-in to that bit position
1736 	   is known.  We can check condition (b) by seeing if we got the same
1737 	   result with minimised carries as with maximised carries.  */
1738 	*mask = r1mask | r2mask | (lo ^ hi);
1739 	*mask = wi::ext (*mask, width, sgn);
1740 	/* It shouldn't matter whether we choose lo or hi here.  */
1741 	*val = lo;
1742 	break;
1743       }
1744 
1745     case MINUS_EXPR:
1746     case POINTER_DIFF_EXPR:
1747       {
1748 	/* Subtraction is derived from the addition algorithm above.  */
1749 	widest_int lo = wi::bit_and_not (r1val, r1mask) - (r2val | r2mask);
1750 	lo = wi::ext (lo, width, sgn);
1751 	widest_int hi = (r1val | r1mask) - wi::bit_and_not (r2val, r2mask);
1752 	hi = wi::ext (hi, width, sgn);
1753 	*mask = r1mask | r2mask | (lo ^ hi);
1754 	*mask = wi::ext (*mask, width, sgn);
1755 	*val = lo;
1756 	break;
1757       }
1758 
1759     case MULT_EXPR:
1760       if (r2mask == 0
1761 	  && !wi::neg_p (r2val, sgn)
1762 	  && (flag_expensive_optimizations || wi::popcount (r2val) < 8))
1763 	bit_value_mult_const (sgn, width, val, mask, r1val, r1mask, r2val);
1764       else if (r1mask == 0
1765 	       && !wi::neg_p (r1val, sgn)
1766 	       && (flag_expensive_optimizations || wi::popcount (r1val) < 8))
1767 	bit_value_mult_const (sgn, width, val, mask, r2val, r2mask, r1val);
1768       else
1769 	{
1770 	  /* Just track trailing zeros in both operands and transfer
1771 	     them to the other.  */
1772 	  int r1tz = wi::ctz (r1val | r1mask);
1773 	  int r2tz = wi::ctz (r2val | r2mask);
1774 	  if (r1tz + r2tz >= width)
1775 	    {
1776 	      *mask = 0;
1777 	      *val = 0;
1778 	    }
1779 	  else if (r1tz + r2tz > 0)
1780 	    {
1781 	      *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1782 			       width, sgn);
1783 	      *val = 0;
1784 	    }
1785 	}
1786       break;
1787 
1788     case EQ_EXPR:
1789     case NE_EXPR:
1790       {
1791 	widest_int m = r1mask | r2mask;
1792 	if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1793 	  {
1794 	    *mask = 0;
1795 	    *val = ((code == EQ_EXPR) ? 0 : 1);
1796 	  }
1797 	else
1798 	  {
1799 	    /* We know the result of a comparison is always one or zero.  */
1800 	    *mask = 1;
1801 	    *val = 0;
1802 	  }
1803 	break;
1804       }
1805 
1806     case GE_EXPR:
1807     case GT_EXPR:
1808       swap_p = true;
1809       code = swap_tree_comparison (code);
1810       /* Fall through.  */
1811     case LT_EXPR:
1812     case LE_EXPR:
1813       {
1814 	widest_int min1, max1, min2, max2;
1815 	int minmax, maxmin;
1816 
1817 	const widest_int &o1val = swap_p ? r2val : r1val;
1818 	const widest_int &o1mask = swap_p ? r2mask : r1mask;
1819 	const widest_int &o2val = swap_p ? r1val : r2val;
1820 	const widest_int &o2mask = swap_p ? r1mask : r2mask;
1821 
1822 	value_mask_to_min_max (&min1, &max1, o1val, o1mask,
1823 			       r1type_sgn, r1type_precision);
1824 	value_mask_to_min_max (&min2, &max2, o2val, o2mask,
1825 			       r1type_sgn, r1type_precision);
1826 
1827 	/* For comparisons the signedness is in the comparison operands.  */
1828 	/* Do a cross comparison of the max/min pairs.  */
1829 	maxmin = wi::cmp (max1, min2, r1type_sgn);
1830 	minmax = wi::cmp (min1, max2, r1type_sgn);
1831 	if (maxmin < (code == LE_EXPR ? 1: 0))  /* o1 < or <= o2.  */
1832 	  {
1833 	    *mask = 0;
1834 	    *val = 1;
1835 	  }
1836 	else if (minmax > (code == LT_EXPR ? -1 : 0))  /* o1 >= or > o2.  */
1837 	  {
1838 	    *mask = 0;
1839 	    *val = 0;
1840 	  }
1841 	else if (maxmin == minmax)  /* o1 and o2 are equal.  */
1842 	  {
1843 	    /* This probably should never happen as we'd have
1844 	       folded the thing during fully constant value folding.  */
1845 	    *mask = 0;
1846 	    *val = (code == LE_EXPR ? 1 : 0);
1847 	  }
1848 	else
1849 	  {
1850 	    /* We know the result of a comparison is always one or zero.  */
1851 	    *mask = 1;
1852 	    *val = 0;
1853 	  }
1854 	break;
1855       }
1856 
1857     case MIN_EXPR:
1858     case MAX_EXPR:
1859       {
1860 	widest_int min1, max1, min2, max2;
1861 
1862 	value_mask_to_min_max (&min1, &max1, r1val, r1mask, sgn, width);
1863 	value_mask_to_min_max (&min2, &max2, r2val, r2mask, sgn, width);
1864 
1865 	if (wi::cmp (max1, min2, sgn) <= 0)  /* r1 is less than r2.  */
1866 	  {
1867 	    if (code == MIN_EXPR)
1868 	      {
1869 		*mask = r1mask;
1870 		*val = r1val;
1871 	      }
1872 	    else
1873 	      {
1874 		*mask = r2mask;
1875 		*val = r2val;
1876 	      }
1877 	  }
1878 	else if (wi::cmp (min1, max2, sgn) >= 0)  /* r2 is less than r1.  */
1879 	  {
1880 	    if (code == MIN_EXPR)
1881 	      {
1882 		*mask = r2mask;
1883 		*val = r2val;
1884 	      }
1885 	    else
1886 	      {
1887 		*mask = r1mask;
1888 		*val = r1val;
1889 	      }
1890 	  }
1891 	else
1892 	  {
1893 	    /* The result is either r1 or r2.  */
1894 	    *mask = r1mask | r2mask | (r1val ^ r2val);
1895 	    *val = r1val;
1896 	  }
1897 	break;
1898       }
1899 
1900     case TRUNC_MOD_EXPR:
1901       {
1902 	widest_int r1max = r1val | r1mask;
1903 	widest_int r2max = r2val | r2mask;
1904 	if (sgn == UNSIGNED
1905 	    || (!wi::neg_p (r1max) && !wi::neg_p (r2max)))
1906 	  {
1907 	    /* Confirm R2 has some bits set, to avoid division by zero.  */
1908 	    widest_int r2min = wi::bit_and_not (r2val, r2mask);
1909 	    if (r2min != 0)
1910 	      {
1911 		/* R1 % R2 is R1 if R1 is always less than R2.  */
1912 		if (wi::ltu_p (r1max, r2min))
1913 		  {
1914 		    *mask = r1mask;
1915 		    *val = r1val;
1916 		  }
1917 		else
1918 		  {
1919 		    /* R1 % R2 is always less than the maximum of R2.  */
1920 		    unsigned int lzcount = wi::clz (r2max);
1921 		    unsigned int bits = wi::get_precision (r2max) - lzcount;
1922 		    if (r2max == wi::lshift (1, bits))
1923 		      bits--;
1924 		    *mask = wi::mask <widest_int> (bits, false);
1925 		    *val = 0;
1926 		  }
1927 	       }
1928 	    }
1929 	}
1930       break;
1931 
1932     case TRUNC_DIV_EXPR:
1933       {
1934 	widest_int r1max = r1val | r1mask;
1935 	widest_int r2max = r2val | r2mask;
1936 	if (sgn == UNSIGNED
1937 	    || (!wi::neg_p (r1max) && !wi::neg_p (r2max)))
1938 	  {
1939 	    /* Confirm R2 has some bits set, to avoid division by zero.  */
1940 	    widest_int r2min = wi::bit_and_not (r2val, r2mask);
1941 	    if (r2min != 0)
1942 	      {
1943 		/* R1 / R2 is zero if R1 is always less than R2.  */
1944 		if (wi::ltu_p (r1max, r2min))
1945 		  {
1946 		    *mask = 0;
1947 		    *val = 0;
1948 		  }
1949 		else
1950 		  {
1951 		    widest_int upper = wi::udiv_trunc (r1max, r2min);
1952 		    unsigned int lzcount = wi::clz (upper);
1953 		    unsigned int bits = wi::get_precision (upper) - lzcount;
1954 		    *mask = wi::mask <widest_int> (bits, false);
1955 		    *val = 0;
1956 		  }
1957 	       }
1958 	    }
1959 	}
1960       break;
1961 
1962     default:;
1963     }
1964 }
1965 
1966 /* Return the propagation value when applying the operation CODE to
1967    the value RHS yielding type TYPE.  */
1968 
1969 static ccp_prop_value_t
bit_value_unop(enum tree_code code,tree type,tree rhs)1970 bit_value_unop (enum tree_code code, tree type, tree rhs)
1971 {
1972   ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1973   widest_int value, mask;
1974   ccp_prop_value_t val;
1975 
1976   if (rval.lattice_val == UNDEFINED)
1977     return rval;
1978 
1979   gcc_assert ((rval.lattice_val == CONSTANT
1980 	       && TREE_CODE (rval.value) == INTEGER_CST)
1981 	      || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1982   bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1983 		  TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1984 		  value_to_wide_int (rval), rval.mask);
1985   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1986     {
1987       val.lattice_val = CONSTANT;
1988       val.mask = mask;
1989       /* ???  Delay building trees here.  */
1990       val.value = wide_int_to_tree (type, value);
1991     }
1992   else
1993     {
1994       val.lattice_val = VARYING;
1995       val.value = NULL_TREE;
1996       val.mask = -1;
1997     }
1998   return val;
1999 }
2000 
2001 /* Return the propagation value when applying the operation CODE to
2002    the values RHS1 and RHS2 yielding type TYPE.  */
2003 
2004 static ccp_prop_value_t
bit_value_binop(enum tree_code code,tree type,tree rhs1,tree rhs2)2005 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
2006 {
2007   ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
2008   ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
2009   widest_int value, mask;
2010   ccp_prop_value_t val;
2011 
2012   if (r1val.lattice_val == UNDEFINED
2013       || r2val.lattice_val == UNDEFINED)
2014     {
2015       val.lattice_val = VARYING;
2016       val.value = NULL_TREE;
2017       val.mask = -1;
2018       return val;
2019     }
2020 
2021   gcc_assert ((r1val.lattice_val == CONSTANT
2022 	       && TREE_CODE (r1val.value) == INTEGER_CST)
2023 	      || wi::sext (r1val.mask,
2024 			   TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
2025   gcc_assert ((r2val.lattice_val == CONSTANT
2026 	       && TREE_CODE (r2val.value) == INTEGER_CST)
2027 	      || wi::sext (r2val.mask,
2028 			   TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
2029   bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
2030 		   TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
2031 		   value_to_wide_int (r1val), r1val.mask,
2032 		   TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
2033 		   value_to_wide_int (r2val), r2val.mask);
2034 
2035   /* (x * x) & 2 == 0.  */
2036   if (code == MULT_EXPR && rhs1 == rhs2 && TYPE_PRECISION (type) > 1)
2037     {
2038       widest_int m = 2;
2039       if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
2040 	value = wi::bit_and_not (value, m);
2041       else
2042 	value = 0;
2043       mask = wi::bit_and_not (mask, m);
2044     }
2045 
2046   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
2047     {
2048       val.lattice_val = CONSTANT;
2049       val.mask = mask;
2050       /* ???  Delay building trees here.  */
2051       val.value = wide_int_to_tree (type, value);
2052     }
2053   else
2054     {
2055       val.lattice_val = VARYING;
2056       val.value = NULL_TREE;
2057       val.mask = -1;
2058     }
2059   return val;
2060 }
2061 
2062 /* Return the propagation value for __builtin_assume_aligned
2063    and functions with assume_aligned or alloc_aligned attribute.
2064    For __builtin_assume_aligned, ATTR is NULL_TREE,
2065    for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
2066    is false, for alloc_aligned attribute ATTR is non-NULL and
2067    ALLOC_ALIGNED is true.  */
2068 
2069 static ccp_prop_value_t
bit_value_assume_aligned(gimple * stmt,tree attr,ccp_prop_value_t ptrval,bool alloc_aligned)2070 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
2071 			  bool alloc_aligned)
2072 {
2073   tree align, misalign = NULL_TREE, type;
2074   unsigned HOST_WIDE_INT aligni, misaligni = 0;
2075   ccp_prop_value_t alignval;
2076   widest_int value, mask;
2077   ccp_prop_value_t val;
2078 
2079   if (attr == NULL_TREE)
2080     {
2081       tree ptr = gimple_call_arg (stmt, 0);
2082       type = TREE_TYPE (ptr);
2083       ptrval = get_value_for_expr (ptr, true);
2084     }
2085   else
2086     {
2087       tree lhs = gimple_call_lhs (stmt);
2088       type = TREE_TYPE (lhs);
2089     }
2090 
2091   if (ptrval.lattice_val == UNDEFINED)
2092     return ptrval;
2093   gcc_assert ((ptrval.lattice_val == CONSTANT
2094 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
2095 	      || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
2096   if (attr == NULL_TREE)
2097     {
2098       /* Get aligni and misaligni from __builtin_assume_aligned.  */
2099       align = gimple_call_arg (stmt, 1);
2100       if (!tree_fits_uhwi_p (align))
2101 	return ptrval;
2102       aligni = tree_to_uhwi (align);
2103       if (gimple_call_num_args (stmt) > 2)
2104 	{
2105 	  misalign = gimple_call_arg (stmt, 2);
2106 	  if (!tree_fits_uhwi_p (misalign))
2107 	    return ptrval;
2108 	  misaligni = tree_to_uhwi (misalign);
2109 	}
2110     }
2111   else
2112     {
2113       /* Get aligni and misaligni from assume_aligned or
2114 	 alloc_align attributes.  */
2115       if (TREE_VALUE (attr) == NULL_TREE)
2116 	return ptrval;
2117       attr = TREE_VALUE (attr);
2118       align = TREE_VALUE (attr);
2119       if (!tree_fits_uhwi_p (align))
2120 	return ptrval;
2121       aligni = tree_to_uhwi (align);
2122       if (alloc_aligned)
2123 	{
2124 	  if (aligni == 0 || aligni > gimple_call_num_args (stmt))
2125 	    return ptrval;
2126 	  align = gimple_call_arg (stmt, aligni - 1);
2127 	  if (!tree_fits_uhwi_p (align))
2128 	    return ptrval;
2129 	  aligni = tree_to_uhwi (align);
2130 	}
2131       else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
2132 	{
2133 	  misalign = TREE_VALUE (TREE_CHAIN (attr));
2134 	  if (!tree_fits_uhwi_p (misalign))
2135 	    return ptrval;
2136 	  misaligni = tree_to_uhwi (misalign);
2137 	}
2138     }
2139   if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
2140     return ptrval;
2141 
2142   align = build_int_cst_type (type, -aligni);
2143   alignval = get_value_for_expr (align, true);
2144   bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
2145 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
2146 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
2147 
2148   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
2149     {
2150       val.lattice_val = CONSTANT;
2151       val.mask = mask;
2152       gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
2153       gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
2154       value |= misaligni;
2155       /* ???  Delay building trees here.  */
2156       val.value = wide_int_to_tree (type, value);
2157     }
2158   else
2159     {
2160       val.lattice_val = VARYING;
2161       val.value = NULL_TREE;
2162       val.mask = -1;
2163     }
2164   return val;
2165 }
2166 
2167 /* Evaluate statement STMT.
2168    Valid only for assignments, calls, conditionals, and switches. */
2169 
2170 static ccp_prop_value_t
evaluate_stmt(gimple * stmt)2171 evaluate_stmt (gimple *stmt)
2172 {
2173   ccp_prop_value_t val;
2174   tree simplified = NULL_TREE;
2175   ccp_lattice_t likelyvalue = likely_value (stmt);
2176   bool is_constant = false;
2177   unsigned int align;
2178   bool ignore_return_flags = false;
2179 
2180   if (dump_file && (dump_flags & TDF_DETAILS))
2181     {
2182       fprintf (dump_file, "which is likely ");
2183       switch (likelyvalue)
2184 	{
2185 	case CONSTANT:
2186 	  fprintf (dump_file, "CONSTANT");
2187 	  break;
2188 	case UNDEFINED:
2189 	  fprintf (dump_file, "UNDEFINED");
2190 	  break;
2191 	case VARYING:
2192 	  fprintf (dump_file, "VARYING");
2193 	  break;
2194 	default:;
2195 	}
2196       fprintf (dump_file, "\n");
2197     }
2198 
2199   /* If the statement is likely to have a CONSTANT result, then try
2200      to fold the statement to determine the constant value.  */
2201   /* FIXME.  This is the only place that we call ccp_fold.
2202      Since likely_value never returns CONSTANT for calls, we will
2203      not attempt to fold them, including builtins that may profit.  */
2204   if (likelyvalue == CONSTANT)
2205     {
2206       fold_defer_overflow_warnings ();
2207       simplified = ccp_fold (stmt);
2208       if (simplified
2209 	  && TREE_CODE (simplified) == SSA_NAME)
2210 	{
2211 	  /* We may not use values of something that may be simulated again,
2212 	     see valueize_op_1.  */
2213 	  if (SSA_NAME_IS_DEFAULT_DEF (simplified)
2214 	      || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
2215 	    {
2216 	      ccp_prop_value_t *val = get_value (simplified);
2217 	      if (val && val->lattice_val != VARYING)
2218 		{
2219 		  fold_undefer_overflow_warnings (true, stmt, 0);
2220 		  return *val;
2221 		}
2222 	    }
2223 	  else
2224 	    /* We may also not place a non-valueized copy in the lattice
2225 	       as that might become stale if we never re-visit this stmt.  */
2226 	    simplified = NULL_TREE;
2227 	}
2228       is_constant = simplified && is_gimple_min_invariant (simplified);
2229       fold_undefer_overflow_warnings (is_constant, stmt, 0);
2230       if (is_constant)
2231 	{
2232 	  /* The statement produced a constant value.  */
2233 	  val.lattice_val = CONSTANT;
2234 	  val.value = simplified;
2235 	  val.mask = 0;
2236 	  return val;
2237 	}
2238     }
2239   /* If the statement is likely to have a VARYING result, then do not
2240      bother folding the statement.  */
2241   else if (likelyvalue == VARYING)
2242     {
2243       enum gimple_code code = gimple_code (stmt);
2244       if (code == GIMPLE_ASSIGN)
2245         {
2246           enum tree_code subcode = gimple_assign_rhs_code (stmt);
2247 
2248           /* Other cases cannot satisfy is_gimple_min_invariant
2249              without folding.  */
2250           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
2251             simplified = gimple_assign_rhs1 (stmt);
2252         }
2253       else if (code == GIMPLE_SWITCH)
2254         simplified = gimple_switch_index (as_a <gswitch *> (stmt));
2255       else
2256 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
2257 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
2258       is_constant = simplified && is_gimple_min_invariant (simplified);
2259       if (is_constant)
2260 	{
2261 	  /* The statement produced a constant value.  */
2262 	  val.lattice_val = CONSTANT;
2263 	  val.value = simplified;
2264 	  val.mask = 0;
2265 	}
2266     }
2267   /* If the statement result is likely UNDEFINED, make it so.  */
2268   else if (likelyvalue == UNDEFINED)
2269     {
2270       val.lattice_val = UNDEFINED;
2271       val.value = NULL_TREE;
2272       val.mask = 0;
2273       return val;
2274     }
2275 
2276   /* Resort to simplification for bitwise tracking.  */
2277   if (flag_tree_bit_ccp
2278       && (likelyvalue == CONSTANT || is_gimple_call (stmt)
2279 	  || (gimple_assign_single_p (stmt)
2280 	      && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
2281       && !is_constant)
2282     {
2283       enum gimple_code code = gimple_code (stmt);
2284       val.lattice_val = VARYING;
2285       val.value = NULL_TREE;
2286       val.mask = -1;
2287       if (code == GIMPLE_ASSIGN)
2288 	{
2289 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
2290 	  tree rhs1 = gimple_assign_rhs1 (stmt);
2291 	  tree lhs = gimple_assign_lhs (stmt);
2292 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2293 	       || POINTER_TYPE_P (TREE_TYPE (lhs)))
2294 	      && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2295 		  || POINTER_TYPE_P (TREE_TYPE (rhs1))))
2296 	    switch (get_gimple_rhs_class (subcode))
2297 	      {
2298 	      case GIMPLE_SINGLE_RHS:
2299 	        val = get_value_for_expr (rhs1, true);
2300 		break;
2301 
2302 	      case GIMPLE_UNARY_RHS:
2303 		val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
2304 		break;
2305 
2306 	      case GIMPLE_BINARY_RHS:
2307 		val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
2308 				       gimple_assign_rhs2 (stmt));
2309 		break;
2310 
2311 	      default:;
2312 	      }
2313 	}
2314       else if (code == GIMPLE_COND)
2315 	{
2316 	  enum tree_code code = gimple_cond_code (stmt);
2317 	  tree rhs1 = gimple_cond_lhs (stmt);
2318 	  tree rhs2 = gimple_cond_rhs (stmt);
2319 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2320 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
2321 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
2322 	}
2323       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
2324 	{
2325 	  tree fndecl = gimple_call_fndecl (stmt);
2326 	  switch (DECL_FUNCTION_CODE (fndecl))
2327 	    {
2328 	    case BUILT_IN_MALLOC:
2329 	    case BUILT_IN_REALLOC:
2330 	    case BUILT_IN_CALLOC:
2331 	    case BUILT_IN_STRDUP:
2332 	    case BUILT_IN_STRNDUP:
2333 	      val.lattice_val = CONSTANT;
2334 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
2335 	      val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
2336 			   / BITS_PER_UNIT - 1);
2337 	      break;
2338 
2339 	    CASE_BUILT_IN_ALLOCA:
2340 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
2341 		       ? BIGGEST_ALIGNMENT
2342 		       : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2343 	      val.lattice_val = CONSTANT;
2344 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
2345 	      val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
2346 	      break;
2347 
2348 	    case BUILT_IN_ASSUME_ALIGNED:
2349 	      val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
2350 	      ignore_return_flags = true;
2351 	      break;
2352 
2353 	    case BUILT_IN_ALIGNED_ALLOC:
2354 	    case BUILT_IN_GOMP_ALLOC:
2355 	      {
2356 		tree align = get_constant_value (gimple_call_arg (stmt, 0));
2357 		if (align
2358 		    && tree_fits_uhwi_p (align))
2359 		  {
2360 		    unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
2361 		    if (aligni > 1
2362 			/* align must be power-of-two */
2363 			&& (aligni & (aligni - 1)) == 0)
2364 		      {
2365 			val.lattice_val = CONSTANT;
2366 			val.value = build_int_cst (ptr_type_node, 0);
2367 			val.mask = -aligni;
2368 		      }
2369 		  }
2370 		break;
2371 	      }
2372 
2373 	    case BUILT_IN_BSWAP16:
2374 	    case BUILT_IN_BSWAP32:
2375 	    case BUILT_IN_BSWAP64:
2376 	    case BUILT_IN_BSWAP128:
2377 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
2378 	      if (val.lattice_val == UNDEFINED)
2379 		break;
2380 	      else if (val.lattice_val == CONSTANT
2381 		       && val.value
2382 		       && TREE_CODE (val.value) == INTEGER_CST)
2383 		{
2384 		  tree type = TREE_TYPE (gimple_call_lhs (stmt));
2385 		  int prec = TYPE_PRECISION (type);
2386 		  wide_int wval = wi::to_wide (val.value);
2387 		  val.value
2388 		    = wide_int_to_tree (type,
2389 					wide_int::from (wval, prec,
2390 							UNSIGNED).bswap ());
2391 		  val.mask
2392 		    = widest_int::from (wide_int::from (val.mask, prec,
2393 							UNSIGNED).bswap (),
2394 					UNSIGNED);
2395 		  if (wi::sext (val.mask, prec) != -1)
2396 		    break;
2397 		}
2398 	      val.lattice_val = VARYING;
2399 	      val.value = NULL_TREE;
2400 	      val.mask = -1;
2401 	      break;
2402 
2403 	    default:;
2404 	    }
2405 	}
2406       if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
2407 	{
2408 	  tree fntype = gimple_call_fntype (stmt);
2409 	  if (fntype)
2410 	    {
2411 	      tree attrs = lookup_attribute ("assume_aligned",
2412 					     TYPE_ATTRIBUTES (fntype));
2413 	      if (attrs)
2414 		val = bit_value_assume_aligned (stmt, attrs, val, false);
2415 	      attrs = lookup_attribute ("alloc_align",
2416 					TYPE_ATTRIBUTES (fntype));
2417 	      if (attrs)
2418 		val = bit_value_assume_aligned (stmt, attrs, val, true);
2419 	    }
2420 	  int flags = ignore_return_flags
2421 		      ? 0 : gimple_call_return_flags (as_a <gcall *> (stmt));
2422 	  if (flags & ERF_RETURNS_ARG
2423 	      && (flags & ERF_RETURN_ARG_MASK) < gimple_call_num_args (stmt))
2424 	    {
2425 	      val = get_value_for_expr
2426 			 (gimple_call_arg (stmt,
2427 					   flags & ERF_RETURN_ARG_MASK), true);
2428 	    }
2429 	}
2430       is_constant = (val.lattice_val == CONSTANT);
2431     }
2432 
2433   if (flag_tree_bit_ccp
2434       && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
2435 	  || !is_constant)
2436       && gimple_get_lhs (stmt)
2437       && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
2438     {
2439       tree lhs = gimple_get_lhs (stmt);
2440       wide_int nonzero_bits = get_nonzero_bits (lhs);
2441       if (nonzero_bits != -1)
2442 	{
2443 	  if (!is_constant)
2444 	    {
2445 	      val.lattice_val = CONSTANT;
2446 	      val.value = build_zero_cst (TREE_TYPE (lhs));
2447 	      val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
2448 	      is_constant = true;
2449 	    }
2450 	  else
2451 	    {
2452 	      if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
2453 		val.value = wide_int_to_tree (TREE_TYPE (lhs),
2454 					      nonzero_bits
2455 					      & wi::to_wide (val.value));
2456 	      if (nonzero_bits == 0)
2457 		val.mask = 0;
2458 	      else
2459 		val.mask = val.mask & extend_mask (nonzero_bits,
2460 						   TYPE_SIGN (TREE_TYPE (lhs)));
2461 	    }
2462 	}
2463     }
2464 
2465   /* The statement produced a nonconstant value.  */
2466   if (!is_constant)
2467     {
2468       /* The statement produced a copy.  */
2469       if (simplified && TREE_CODE (simplified) == SSA_NAME
2470 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2471 	{
2472 	  val.lattice_val = CONSTANT;
2473 	  val.value = simplified;
2474 	  val.mask = -1;
2475 	}
2476       /* The statement is VARYING.  */
2477       else
2478 	{
2479 	  val.lattice_val = VARYING;
2480 	  val.value = NULL_TREE;
2481 	  val.mask = -1;
2482 	}
2483     }
2484 
2485   return val;
2486 }
2487 
2488 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2489 
2490 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2491    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
2492 
2493 static void
insert_clobber_before_stack_restore(tree saved_val,tree var,gimple_htab ** visited)2494 insert_clobber_before_stack_restore (tree saved_val, tree var,
2495 				     gimple_htab **visited)
2496 {
2497   gimple *stmt;
2498   gassign *clobber_stmt;
2499   tree clobber;
2500   imm_use_iterator iter;
2501   gimple_stmt_iterator i;
2502   gimple **slot;
2503 
2504   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2505     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2506       {
2507 	clobber = build_clobber (TREE_TYPE (var));
2508 	clobber_stmt = gimple_build_assign (var, clobber);
2509 
2510 	i = gsi_for_stmt (stmt);
2511 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2512       }
2513     else if (gimple_code (stmt) == GIMPLE_PHI)
2514       {
2515 	if (!*visited)
2516 	  *visited = new gimple_htab (10);
2517 
2518 	slot = (*visited)->find_slot (stmt, INSERT);
2519 	if (*slot != NULL)
2520 	  continue;
2521 
2522 	*slot = stmt;
2523 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2524 					     visited);
2525       }
2526     else if (gimple_assign_ssa_name_copy_p (stmt))
2527       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2528 					   visited);
2529 }
2530 
2531 /* Advance the iterator to the previous non-debug gimple statement in the same
2532    or dominating basic block.  */
2533 
2534 static inline void
gsi_prev_dom_bb_nondebug(gimple_stmt_iterator * i)2535 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2536 {
2537   basic_block dom;
2538 
2539   gsi_prev_nondebug (i);
2540   while (gsi_end_p (*i))
2541     {
2542       dom = get_immediate_dominator (CDI_DOMINATORS, gsi_bb (*i));
2543       if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2544 	return;
2545 
2546       *i = gsi_last_bb (dom);
2547     }
2548 }
2549 
2550 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2551    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2552 
2553    It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2554    a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2555    In that case the function gives up without inserting the clobbers.  */
2556 
2557 static void
insert_clobbers_for_var(gimple_stmt_iterator i,tree var)2558 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2559 {
2560   gimple *stmt;
2561   tree saved_val;
2562   gimple_htab *visited = NULL;
2563 
2564   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2565     {
2566       stmt = gsi_stmt (i);
2567 
2568       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2569 	continue;
2570 
2571       saved_val = gimple_call_lhs (stmt);
2572       if (saved_val == NULL_TREE)
2573 	continue;
2574 
2575       insert_clobber_before_stack_restore (saved_val, var, &visited);
2576       break;
2577     }
2578 
2579   delete visited;
2580 }
2581 
2582 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
2583    fixed-size array and returns the address, if found, otherwise returns
2584    NULL_TREE.  */
2585 
2586 static tree
fold_builtin_alloca_with_align(gimple * stmt)2587 fold_builtin_alloca_with_align (gimple *stmt)
2588 {
2589   unsigned HOST_WIDE_INT size, threshold, n_elem;
2590   tree lhs, arg, block, var, elem_type, array_type;
2591 
2592   /* Get lhs.  */
2593   lhs = gimple_call_lhs (stmt);
2594   if (lhs == NULL_TREE)
2595     return NULL_TREE;
2596 
2597   /* Detect constant argument.  */
2598   arg = get_constant_value (gimple_call_arg (stmt, 0));
2599   if (arg == NULL_TREE
2600       || TREE_CODE (arg) != INTEGER_CST
2601       || !tree_fits_uhwi_p (arg))
2602     return NULL_TREE;
2603 
2604   size = tree_to_uhwi (arg);
2605 
2606   /* Heuristic: don't fold large allocas.  */
2607   threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
2608   /* In case the alloca is located at function entry, it has the same lifetime
2609      as a declared array, so we allow a larger size.  */
2610   block = gimple_block (stmt);
2611   if (!(cfun->after_inlining
2612 	&& block
2613         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2614     threshold /= 10;
2615   if (size > threshold)
2616     return NULL_TREE;
2617 
2618   /* We have to be able to move points-to info.  We used to assert
2619      that we can but IPA PTA might end up with two UIDs here
2620      as it might need to handle more than one instance being
2621      live at the same time.  Instead of trying to detect this case
2622      (using the first UID would be OK) just give up for now.  */
2623   struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2624   unsigned uid = 0;
2625   if (pi != NULL
2626       && !pi->pt.anything
2627       && !pt_solution_singleton_or_null_p (&pi->pt, &uid))
2628     return NULL_TREE;
2629 
2630   /* Declare array.  */
2631   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2632   n_elem = size * 8 / BITS_PER_UNIT;
2633   array_type = build_array_type_nelts (elem_type, n_elem);
2634 
2635   if (tree ssa_name = SSA_NAME_IDENTIFIER (lhs))
2636     {
2637       /* Give the temporary a name derived from the name of the VLA
2638 	 declaration so it can be referenced in diagnostics.  */
2639       const char *name = IDENTIFIER_POINTER (ssa_name);
2640       var = create_tmp_var (array_type, name);
2641     }
2642   else
2643     var = create_tmp_var (array_type);
2644 
2645   if (gimple *lhsdef = SSA_NAME_DEF_STMT (lhs))
2646     {
2647       /* Set the temporary's location to that of the VLA declaration
2648 	 so it can be pointed to in diagnostics.  */
2649       location_t loc = gimple_location (lhsdef);
2650       DECL_SOURCE_LOCATION (var) = loc;
2651     }
2652 
2653   SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2654   if (uid != 0)
2655     SET_DECL_PT_UID (var, uid);
2656 
2657   /* Fold alloca to the address of the array.  */
2658   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2659 }
2660 
2661 /* Fold the stmt at *GSI with CCP specific information that propagating
2662    and regular folding does not catch.  */
2663 
2664 bool
fold_stmt(gimple_stmt_iterator * gsi)2665 ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2666 {
2667   gimple *stmt = gsi_stmt (*gsi);
2668 
2669   switch (gimple_code (stmt))
2670     {
2671     case GIMPLE_COND:
2672       {
2673 	gcond *cond_stmt = as_a <gcond *> (stmt);
2674 	ccp_prop_value_t val;
2675 	/* Statement evaluation will handle type mismatches in constants
2676 	   more gracefully than the final propagation.  This allows us to
2677 	   fold more conditionals here.  */
2678 	val = evaluate_stmt (stmt);
2679 	if (val.lattice_val != CONSTANT
2680 	    || val.mask != 0)
2681 	  return false;
2682 
2683 	if (dump_file)
2684 	  {
2685 	    fprintf (dump_file, "Folding predicate ");
2686 	    print_gimple_expr (dump_file, stmt, 0);
2687 	    fprintf (dump_file, " to ");
2688 	    print_generic_expr (dump_file, val.value);
2689 	    fprintf (dump_file, "\n");
2690 	  }
2691 
2692 	if (integer_zerop (val.value))
2693 	  gimple_cond_make_false (cond_stmt);
2694 	else
2695 	  gimple_cond_make_true (cond_stmt);
2696 
2697 	return true;
2698       }
2699 
2700     case GIMPLE_CALL:
2701       {
2702 	tree lhs = gimple_call_lhs (stmt);
2703 	int flags = gimple_call_flags (stmt);
2704 	tree val;
2705 	tree argt;
2706 	bool changed = false;
2707 	unsigned i;
2708 
2709 	/* If the call was folded into a constant make sure it goes
2710 	   away even if we cannot propagate into all uses because of
2711 	   type issues.  */
2712 	if (lhs
2713 	    && TREE_CODE (lhs) == SSA_NAME
2714 	    && (val = get_constant_value (lhs))
2715 	    /* Don't optimize away calls that have side-effects.  */
2716 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
2717 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2718 	  {
2719 	    tree new_rhs = unshare_expr (val);
2720 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
2721 					    TREE_TYPE (new_rhs)))
2722 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2723 	    gimplify_and_update_call_from_tree (gsi, new_rhs);
2724 	    return true;
2725 	  }
2726 
2727 	/* Internal calls provide no argument types, so the extra laxity
2728 	   for normal calls does not apply.  */
2729 	if (gimple_call_internal_p (stmt))
2730 	  return false;
2731 
2732         /* The heuristic of fold_builtin_alloca_with_align differs before and
2733 	   after inlining, so we don't require the arg to be changed into a
2734 	   constant for folding, but just to be constant.  */
2735         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2736 	    || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2737           {
2738             tree new_rhs = fold_builtin_alloca_with_align (stmt);
2739             if (new_rhs)
2740 	      {
2741 		gimplify_and_update_call_from_tree (gsi, new_rhs);
2742 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2743 		insert_clobbers_for_var (*gsi, var);
2744 		return true;
2745 	      }
2746           }
2747 
2748 	/* If there's no extra info from an assume_aligned call,
2749 	   drop it so it doesn't act as otherwise useless dataflow
2750 	   barrier.  */
2751 	if (gimple_call_builtin_p (stmt, BUILT_IN_ASSUME_ALIGNED))
2752 	  {
2753 	    tree ptr = gimple_call_arg (stmt, 0);
2754 	    ccp_prop_value_t ptrval = get_value_for_expr (ptr, true);
2755 	    if (ptrval.lattice_val == CONSTANT
2756 		&& TREE_CODE (ptrval.value) == INTEGER_CST
2757 		&& ptrval.mask != 0)
2758 	      {
2759 		ccp_prop_value_t val
2760 		  = bit_value_assume_aligned (stmt, NULL_TREE, ptrval, false);
2761 		unsigned int ptralign = least_bit_hwi (ptrval.mask.to_uhwi ());
2762 		unsigned int align = least_bit_hwi (val.mask.to_uhwi ());
2763 		if (ptralign == align
2764 		    && ((TREE_INT_CST_LOW (ptrval.value) & (align - 1))
2765 			== (TREE_INT_CST_LOW (val.value) & (align - 1))))
2766 		  {
2767 		    replace_call_with_value (gsi, ptr);
2768 		    return true;
2769 		  }
2770 	      }
2771 	  }
2772 
2773 	/* Propagate into the call arguments.  Compared to replace_uses_in
2774 	   this can use the argument slot types for type verification
2775 	   instead of the current argument type.  We also can safely
2776 	   drop qualifiers here as we are dealing with constants anyway.  */
2777 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2778 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
2779 	     ++i, argt = TREE_CHAIN (argt))
2780 	  {
2781 	    tree arg = gimple_call_arg (stmt, i);
2782 	    if (TREE_CODE (arg) == SSA_NAME
2783 		&& (val = get_constant_value (arg))
2784 		&& useless_type_conversion_p
2785 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2786 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2787 	      {
2788 		gimple_call_set_arg (stmt, i, unshare_expr (val));
2789 		changed = true;
2790 	      }
2791 	  }
2792 
2793 	return changed;
2794       }
2795 
2796     case GIMPLE_ASSIGN:
2797       {
2798 	tree lhs = gimple_assign_lhs (stmt);
2799 	tree val;
2800 
2801 	/* If we have a load that turned out to be constant replace it
2802 	   as we cannot propagate into all uses in all cases.  */
2803 	if (gimple_assign_single_p (stmt)
2804 	    && TREE_CODE (lhs) == SSA_NAME
2805 	    && (val = get_constant_value (lhs)))
2806 	  {
2807 	    tree rhs = unshare_expr (val);
2808 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2809 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2810 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
2811 	    return true;
2812 	  }
2813 
2814 	return false;
2815       }
2816 
2817     default:
2818       return false;
2819     }
2820 }
2821 
2822 /* Visit the assignment statement STMT.  Set the value of its LHS to the
2823    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
2824    creates virtual definitions, set the value of each new name to that
2825    of the RHS (if we can derive a constant out of the RHS).
2826    Value-returning call statements also perform an assignment, and
2827    are handled here.  */
2828 
2829 static enum ssa_prop_result
visit_assignment(gimple * stmt,tree * output_p)2830 visit_assignment (gimple *stmt, tree *output_p)
2831 {
2832   ccp_prop_value_t val;
2833   enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2834 
2835   tree lhs = gimple_get_lhs (stmt);
2836   if (TREE_CODE (lhs) == SSA_NAME)
2837     {
2838       /* Evaluate the statement, which could be
2839 	 either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
2840       val = evaluate_stmt (stmt);
2841 
2842       /* If STMT is an assignment to an SSA_NAME, we only have one
2843 	 value to set.  */
2844       if (set_lattice_value (lhs, &val))
2845 	{
2846 	  *output_p = lhs;
2847 	  if (val.lattice_val == VARYING)
2848 	    retval = SSA_PROP_VARYING;
2849 	  else
2850 	    retval = SSA_PROP_INTERESTING;
2851 	}
2852     }
2853 
2854   return retval;
2855 }
2856 
2857 
2858 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2859    if it can determine which edge will be taken.  Otherwise, return
2860    SSA_PROP_VARYING.  */
2861 
2862 static enum ssa_prop_result
visit_cond_stmt(gimple * stmt,edge * taken_edge_p)2863 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2864 {
2865   ccp_prop_value_t val;
2866   basic_block block;
2867 
2868   block = gimple_bb (stmt);
2869   val = evaluate_stmt (stmt);
2870   if (val.lattice_val != CONSTANT
2871       || val.mask != 0)
2872     return SSA_PROP_VARYING;
2873 
2874   /* Find which edge out of the conditional block will be taken and add it
2875      to the worklist.  If no single edge can be determined statically,
2876      return SSA_PROP_VARYING to feed all the outgoing edges to the
2877      propagation engine.  */
2878   *taken_edge_p = find_taken_edge (block, val.value);
2879   if (*taken_edge_p)
2880     return SSA_PROP_INTERESTING;
2881   else
2882     return SSA_PROP_VARYING;
2883 }
2884 
2885 
2886 /* Evaluate statement STMT.  If the statement produces an output value and
2887    its evaluation changes the lattice value of its output, return
2888    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2889    output value.
2890 
2891    If STMT is a conditional branch and we can determine its truth
2892    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2893    value, return SSA_PROP_VARYING.  */
2894 
2895 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)2896 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2897 {
2898   tree def;
2899   ssa_op_iter iter;
2900 
2901   if (dump_file && (dump_flags & TDF_DETAILS))
2902     {
2903       fprintf (dump_file, "\nVisiting statement:\n");
2904       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2905     }
2906 
2907   switch (gimple_code (stmt))
2908     {
2909       case GIMPLE_ASSIGN:
2910         /* If the statement is an assignment that produces a single
2911            output value, evaluate its RHS to see if the lattice value of
2912            its output has changed.  */
2913         return visit_assignment (stmt, output_p);
2914 
2915       case GIMPLE_CALL:
2916         /* A value-returning call also performs an assignment.  */
2917         if (gimple_call_lhs (stmt) != NULL_TREE)
2918           return visit_assignment (stmt, output_p);
2919         break;
2920 
2921       case GIMPLE_COND:
2922       case GIMPLE_SWITCH:
2923         /* If STMT is a conditional branch, see if we can determine
2924            which branch will be taken.   */
2925         /* FIXME.  It appears that we should be able to optimize
2926            computed GOTOs here as well.  */
2927         return visit_cond_stmt (stmt, taken_edge_p);
2928 
2929       default:
2930         break;
2931     }
2932 
2933   /* Any other kind of statement is not interesting for constant
2934      propagation and, therefore, not worth simulating.  */
2935   if (dump_file && (dump_flags & TDF_DETAILS))
2936     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2937 
2938   /* Definitions made by statements other than assignments to
2939      SSA_NAMEs represent unknown modifications to their outputs.
2940      Mark them VARYING.  */
2941   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2942     set_value_varying (def);
2943 
2944   return SSA_PROP_VARYING;
2945 }
2946 
2947 
2948 /* Main entry point for SSA Conditional Constant Propagation.  If NONZERO_P,
2949    record nonzero bits.  */
2950 
2951 static unsigned int
do_ssa_ccp(bool nonzero_p)2952 do_ssa_ccp (bool nonzero_p)
2953 {
2954   unsigned int todo = 0;
2955   calculate_dominance_info (CDI_DOMINATORS);
2956 
2957   ccp_initialize ();
2958   class ccp_propagate ccp_propagate;
2959   ccp_propagate.ssa_propagate ();
2960   if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2961     {
2962       todo = (TODO_cleanup_cfg | TODO_update_ssa);
2963 
2964       /* ccp_finalize does not preserve loop-closed ssa.  */
2965       loops_state_clear (LOOP_CLOSED_SSA);
2966     }
2967 
2968   free_dominance_info (CDI_DOMINATORS);
2969   return todo;
2970 }
2971 
2972 
2973 namespace {
2974 
2975 const pass_data pass_data_ccp =
2976 {
2977   GIMPLE_PASS, /* type */
2978   "ccp", /* name */
2979   OPTGROUP_NONE, /* optinfo_flags */
2980   TV_TREE_CCP, /* tv_id */
2981   ( PROP_cfg | PROP_ssa ), /* properties_required */
2982   0, /* properties_provided */
2983   0, /* properties_destroyed */
2984   0, /* todo_flags_start */
2985   TODO_update_address_taken, /* todo_flags_finish */
2986 };
2987 
2988 class pass_ccp : public gimple_opt_pass
2989 {
2990 public:
pass_ccp(gcc::context * ctxt)2991   pass_ccp (gcc::context *ctxt)
2992     : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2993   {}
2994 
2995   /* opt_pass methods: */
clone()2996   opt_pass * clone () { return new pass_ccp (m_ctxt); }
set_pass_param(unsigned int n,bool param)2997   void set_pass_param (unsigned int n, bool param)
2998     {
2999       gcc_assert (n == 0);
3000       nonzero_p = param;
3001     }
gate(function *)3002   virtual bool gate (function *) { return flag_tree_ccp != 0; }
execute(function *)3003   virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
3004 
3005  private:
3006   /* Determines whether the pass instance records nonzero bits.  */
3007   bool nonzero_p;
3008 }; // class pass_ccp
3009 
3010 } // anon namespace
3011 
3012 gimple_opt_pass *
make_pass_ccp(gcc::context * ctxt)3013 make_pass_ccp (gcc::context *ctxt)
3014 {
3015   return new pass_ccp (ctxt);
3016 }
3017 
3018 
3019 
3020 /* Try to optimize out __builtin_stack_restore.  Optimize it out
3021    if there is another __builtin_stack_restore in the same basic
3022    block and no calls or ASM_EXPRs are in between, or if this block's
3023    only outgoing edge is to EXIT_BLOCK and there are no calls or
3024    ASM_EXPRs after this __builtin_stack_restore.  */
3025 
3026 static tree
optimize_stack_restore(gimple_stmt_iterator i)3027 optimize_stack_restore (gimple_stmt_iterator i)
3028 {
3029   tree callee;
3030   gimple *stmt;
3031 
3032   basic_block bb = gsi_bb (i);
3033   gimple *call = gsi_stmt (i);
3034 
3035   if (gimple_code (call) != GIMPLE_CALL
3036       || gimple_call_num_args (call) != 1
3037       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
3038       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
3039     return NULL_TREE;
3040 
3041   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
3042     {
3043       stmt = gsi_stmt (i);
3044       if (gimple_code (stmt) == GIMPLE_ASM)
3045 	return NULL_TREE;
3046       if (gimple_code (stmt) != GIMPLE_CALL)
3047 	continue;
3048 
3049       callee = gimple_call_fndecl (stmt);
3050       if (!callee
3051 	  || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
3052 	  /* All regular builtins are ok, just obviously not alloca.  */
3053 	  || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
3054 	return NULL_TREE;
3055 
3056       if (fndecl_built_in_p (callee, BUILT_IN_STACK_RESTORE))
3057 	goto second_stack_restore;
3058     }
3059 
3060   if (!gsi_end_p (i))
3061     return NULL_TREE;
3062 
3063   /* Allow one successor of the exit block, or zero successors.  */
3064   switch (EDGE_COUNT (bb->succs))
3065     {
3066     case 0:
3067       break;
3068     case 1:
3069       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3070 	return NULL_TREE;
3071       break;
3072     default:
3073       return NULL_TREE;
3074     }
3075  second_stack_restore:
3076 
3077   /* If there's exactly one use, then zap the call to __builtin_stack_save.
3078      If there are multiple uses, then the last one should remove the call.
3079      In any case, whether the call to __builtin_stack_save can be removed
3080      or not is irrelevant to removing the call to __builtin_stack_restore.  */
3081   if (has_single_use (gimple_call_arg (call, 0)))
3082     {
3083       gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
3084       if (is_gimple_call (stack_save))
3085 	{
3086 	  callee = gimple_call_fndecl (stack_save);
3087 	  if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE))
3088 	    {
3089 	      gimple_stmt_iterator stack_save_gsi;
3090 	      tree rhs;
3091 
3092 	      stack_save_gsi = gsi_for_stmt (stack_save);
3093 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
3094 	      replace_call_with_value (&stack_save_gsi, rhs);
3095 	    }
3096 	}
3097     }
3098 
3099   /* No effect, so the statement will be deleted.  */
3100   return integer_zero_node;
3101 }
3102 
3103 /* If va_list type is a simple pointer and nothing special is needed,
3104    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
3105    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
3106    pointer assignment.  */
3107 
3108 static tree
optimize_stdarg_builtin(gimple * call)3109 optimize_stdarg_builtin (gimple *call)
3110 {
3111   tree callee, lhs, rhs, cfun_va_list;
3112   bool va_list_simple_ptr;
3113   location_t loc = gimple_location (call);
3114 
3115   callee = gimple_call_fndecl (call);
3116 
3117   cfun_va_list = targetm.fn_abi_va_list (callee);
3118   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
3119 		       && (TREE_TYPE (cfun_va_list) == void_type_node
3120 			   || TREE_TYPE (cfun_va_list) == char_type_node);
3121 
3122   switch (DECL_FUNCTION_CODE (callee))
3123     {
3124     case BUILT_IN_VA_START:
3125       if (!va_list_simple_ptr
3126 	  || targetm.expand_builtin_va_start != NULL
3127 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
3128 	return NULL_TREE;
3129 
3130       if (gimple_call_num_args (call) != 2)
3131 	return NULL_TREE;
3132 
3133       lhs = gimple_call_arg (call, 0);
3134       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
3135 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
3136 	     != TYPE_MAIN_VARIANT (cfun_va_list))
3137 	return NULL_TREE;
3138 
3139       lhs = build_fold_indirect_ref_loc (loc, lhs);
3140       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
3141                              1, integer_zero_node);
3142       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
3143       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
3144 
3145     case BUILT_IN_VA_COPY:
3146       if (!va_list_simple_ptr)
3147 	return NULL_TREE;
3148 
3149       if (gimple_call_num_args (call) != 2)
3150 	return NULL_TREE;
3151 
3152       lhs = gimple_call_arg (call, 0);
3153       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
3154 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
3155 	     != TYPE_MAIN_VARIANT (cfun_va_list))
3156 	return NULL_TREE;
3157 
3158       lhs = build_fold_indirect_ref_loc (loc, lhs);
3159       rhs = gimple_call_arg (call, 1);
3160       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
3161 	  != TYPE_MAIN_VARIANT (cfun_va_list))
3162 	return NULL_TREE;
3163 
3164       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
3165       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
3166 
3167     case BUILT_IN_VA_END:
3168       /* No effect, so the statement will be deleted.  */
3169       return integer_zero_node;
3170 
3171     default:
3172       gcc_unreachable ();
3173     }
3174 }
3175 
3176 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
3177    the incoming jumps.  Return true if at least one jump was changed.  */
3178 
3179 static bool
optimize_unreachable(gimple_stmt_iterator i)3180 optimize_unreachable (gimple_stmt_iterator i)
3181 {
3182   basic_block bb = gsi_bb (i);
3183   gimple_stmt_iterator gsi;
3184   gimple *stmt;
3185   edge_iterator ei;
3186   edge e;
3187   bool ret;
3188 
3189   if (flag_sanitize & SANITIZE_UNREACHABLE)
3190     return false;
3191 
3192   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3193     {
3194       stmt = gsi_stmt (gsi);
3195 
3196       if (is_gimple_debug (stmt))
3197        continue;
3198 
3199       if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
3200 	{
3201 	  /* Verify we do not need to preserve the label.  */
3202 	  if (FORCED_LABEL (gimple_label_label (label_stmt)))
3203 	    return false;
3204 
3205 	  continue;
3206 	}
3207 
3208       /* Only handle the case that __builtin_unreachable is the first statement
3209 	 in the block.  We rely on DCE to remove stmts without side-effects
3210 	 before __builtin_unreachable.  */
3211       if (gsi_stmt (gsi) != gsi_stmt (i))
3212         return false;
3213     }
3214 
3215   ret = false;
3216   FOR_EACH_EDGE (e, ei, bb->preds)
3217     {
3218       gsi = gsi_last_bb (e->src);
3219       if (gsi_end_p (gsi))
3220 	continue;
3221 
3222       stmt = gsi_stmt (gsi);
3223       if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
3224 	{
3225 	  if (e->flags & EDGE_TRUE_VALUE)
3226 	    gimple_cond_make_false (cond_stmt);
3227 	  else if (e->flags & EDGE_FALSE_VALUE)
3228 	    gimple_cond_make_true (cond_stmt);
3229 	  else
3230 	    gcc_unreachable ();
3231 	  update_stmt (cond_stmt);
3232 	}
3233       else
3234 	{
3235 	  /* Todo: handle other cases.  Note that unreachable switch case
3236 	     statements have already been removed.  */
3237 	  continue;
3238 	}
3239 
3240       ret = true;
3241     }
3242 
3243   return ret;
3244 }
3245 
3246 /* Convert
3247    _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3248    _7 = ~_1;
3249    _5 = (_Bool) _7;
3250    to
3251    _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3252    _8 = _1 & 1;
3253    _5 = _8 == 0;
3254    and convert
3255    _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3256    _7 = ~_1;
3257    _4 = (_Bool) _7;
3258    to
3259    _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3260    _8 = _1 & 1;
3261    _4 = (_Bool) _8;
3262 
3263    USE_STMT is the gimplt statement which uses the return value of
3264    __atomic_fetch_or_*.  LHS is the return value of __atomic_fetch_or_*.
3265    MASK is the mask passed to __atomic_fetch_or_*.
3266  */
3267 
3268 static gimple *
convert_atomic_bit_not(enum internal_fn fn,gimple * use_stmt,tree lhs,tree mask)3269 convert_atomic_bit_not (enum internal_fn fn, gimple *use_stmt,
3270 			tree lhs, tree mask)
3271 {
3272   tree and_mask;
3273   if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3274     {
3275       /* MASK must be ~1.  */
3276       if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),
3277 					   ~HOST_WIDE_INT_1), mask, 0))
3278 	return nullptr;
3279       and_mask = build_int_cst (TREE_TYPE (lhs), 1);
3280     }
3281   else
3282     {
3283       /* MASK must be 1.  */
3284       if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), 1), mask, 0))
3285 	return nullptr;
3286       and_mask = mask;
3287     }
3288 
3289   tree use_lhs = gimple_assign_lhs (use_stmt);
3290 
3291   use_operand_p use_p;
3292   gimple *use_not_stmt;
3293 
3294   if (!single_imm_use (use_lhs, &use_p, &use_not_stmt)
3295       || !is_gimple_assign (use_not_stmt))
3296     return nullptr;
3297 
3298   if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt)))
3299     return nullptr;
3300 
3301   tree use_not_lhs = gimple_assign_lhs (use_not_stmt);
3302   if (TREE_CODE (TREE_TYPE (use_not_lhs)) != BOOLEAN_TYPE)
3303     return nullptr;
3304 
3305   gimple_stmt_iterator gsi;
3306   gsi = gsi_for_stmt (use_stmt);
3307   gsi_remove (&gsi, true);
3308   tree var = make_ssa_name (TREE_TYPE (lhs));
3309   use_stmt = gimple_build_assign (var, BIT_AND_EXPR, lhs, and_mask);
3310   gsi = gsi_for_stmt (use_not_stmt);
3311   gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT);
3312   lhs = gimple_assign_lhs (use_not_stmt);
3313   gimple *g = gimple_build_assign (lhs, EQ_EXPR, var,
3314 				   build_zero_cst (TREE_TYPE (mask)));
3315   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3316   gsi = gsi_for_stmt (use_not_stmt);
3317   gsi_remove (&gsi, true);
3318   return use_stmt;
3319 }
3320 
3321 /* match.pd function to match atomic_bit_test_and pattern which
3322    has nop_convert:
3323      _1 = __atomic_fetch_or_4 (&v, 1, 0);
3324      _2 = (int) _1;
3325      _5 = _2 & 1;
3326  */
3327 extern bool gimple_nop_atomic_bit_test_and_p (tree, tree *,
3328 					      tree (*) (tree));
3329 extern bool gimple_nop_convert (tree, tree*, tree (*) (tree));
3330 
3331 /* Optimize
3332      mask_2 = 1 << cnt_1;
3333      _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
3334      _5 = _4 & mask_2;
3335    to
3336      _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
3337      _5 = _4;
3338    If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
3339    is passed instead of 0, and the builtin just returns a zero
3340    or 1 value instead of the actual bit.
3341    Similarly for __sync_fetch_and_or_* (without the ", _3" part
3342    in there), and/or if mask_2 is a power of 2 constant.
3343    Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
3344    in that case.  And similarly for and instead of or, except that
3345    the second argument to the builtin needs to be one's complement
3346    of the mask instead of mask.  */
3347 
3348 static void
optimize_atomic_bit_test_and(gimple_stmt_iterator * gsip,enum internal_fn fn,bool has_model_arg,bool after)3349 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
3350 			      enum internal_fn fn, bool has_model_arg,
3351 			      bool after)
3352 {
3353   gimple *call = gsi_stmt (*gsip);
3354   tree lhs = gimple_call_lhs (call);
3355   use_operand_p use_p;
3356   gimple *use_stmt;
3357   tree mask;
3358   optab optab;
3359 
3360   if (!flag_inline_atomics
3361       || optimize_debug
3362       || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
3363       || !lhs
3364       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
3365       || !single_imm_use (lhs, &use_p, &use_stmt)
3366       || !is_gimple_assign (use_stmt)
3367       || !gimple_vdef (call))
3368     return;
3369 
3370   switch (fn)
3371     {
3372     case IFN_ATOMIC_BIT_TEST_AND_SET:
3373       optab = atomic_bit_test_and_set_optab;
3374       break;
3375     case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
3376       optab = atomic_bit_test_and_complement_optab;
3377       break;
3378     case IFN_ATOMIC_BIT_TEST_AND_RESET:
3379       optab = atomic_bit_test_and_reset_optab;
3380       break;
3381     default:
3382       return;
3383     }
3384 
3385   tree bit = nullptr;
3386 
3387   mask = gimple_call_arg (call, 1);
3388   tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3389   if (rhs_code != BIT_AND_EXPR)
3390     {
3391       if (rhs_code != NOP_EXPR && rhs_code != BIT_NOT_EXPR)
3392 	return;
3393 
3394       tree use_lhs = gimple_assign_lhs (use_stmt);
3395       if (TREE_CODE (use_lhs) == SSA_NAME
3396 	  && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
3397 	return;
3398 
3399       tree use_rhs = gimple_assign_rhs1 (use_stmt);
3400       if (lhs != use_rhs)
3401 	return;
3402 
3403       if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))
3404 	  == CODE_FOR_nothing)
3405 	return;
3406 
3407       gimple *g;
3408       gimple_stmt_iterator gsi;
3409       tree var;
3410       int ibit = -1;
3411 
3412       if (rhs_code == BIT_NOT_EXPR)
3413 	{
3414 	  g = convert_atomic_bit_not (fn, use_stmt, lhs, mask);
3415 	  if (!g)
3416 	    return;
3417 	  use_stmt = g;
3418 	  ibit = 0;
3419 	}
3420       else if (TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE)
3421 	{
3422 	  tree and_mask;
3423 	  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3424 	    {
3425 	      /* MASK must be ~1.  */
3426 	      if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),
3427 						   ~HOST_WIDE_INT_1),
3428 				    mask, 0))
3429 		return;
3430 
3431 	      /* Convert
3432 		 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3433 		 _4 = (_Bool) _1;
3434 		 to
3435 		 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3436 		 _5 = _1 & 1;
3437 		 _4 = (_Bool) _5;
3438 	       */
3439 	      and_mask = build_int_cst (TREE_TYPE (lhs), 1);
3440 	    }
3441 	  else
3442 	    {
3443 	      and_mask = build_int_cst (TREE_TYPE (lhs), 1);
3444 	      if (!operand_equal_p (and_mask, mask, 0))
3445 		return;
3446 
3447 	      /* Convert
3448 		 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3449 		 _4 = (_Bool) _1;
3450 		 to
3451 		 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3452 		 _5 = _1 & 1;
3453 		 _4 = (_Bool) _5;
3454 	       */
3455 	    }
3456 	  var = make_ssa_name (TREE_TYPE (use_rhs));
3457 	  replace_uses_by (use_rhs, var);
3458 	  g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,
3459 				   and_mask);
3460 	  gsi = gsi_for_stmt (use_stmt);
3461 	  gsi_insert_before (&gsi, g, GSI_NEW_STMT);
3462 	  use_stmt = g;
3463 	  ibit = 0;
3464 	}
3465       else if (TYPE_PRECISION (TREE_TYPE (use_lhs))
3466 	       <= TYPE_PRECISION (TREE_TYPE (use_rhs)))
3467 	{
3468 	  gimple *use_nop_stmt;
3469 	  if (!single_imm_use (use_lhs, &use_p, &use_nop_stmt)
3470 	      || !is_gimple_assign (use_nop_stmt))
3471 	    return;
3472 	  tree use_nop_lhs = gimple_assign_lhs (use_nop_stmt);
3473 	  rhs_code = gimple_assign_rhs_code (use_nop_stmt);
3474 	  if (rhs_code != BIT_AND_EXPR)
3475 	    {
3476 	      if (TREE_CODE (use_nop_lhs) == SSA_NAME
3477 		  && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs))
3478 		return;
3479 	      if (rhs_code == BIT_NOT_EXPR)
3480 		{
3481 		  g = convert_atomic_bit_not (fn, use_nop_stmt, lhs,
3482 					      mask);
3483 		  if (!g)
3484 		    return;
3485 		  /* Convert
3486 		     _1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
3487 		     _2 = (int) _1;
3488 		     _7 = ~_2;
3489 		     _5 = (_Bool) _7;
3490 		     to
3491 		     _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3);
3492 		     _8 = _1 & 1;
3493 		     _5 = _8 == 0;
3494 		     and convert
3495 		     _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
3496 		     _2 = (int) _1;
3497 		     _7 = ~_2;
3498 		     _5 = (_Bool) _7;
3499 		     to
3500 		     _1 = __atomic_fetch_and_4 (ptr_6, 1, _3);
3501 		     _8 = _1 & 1;
3502 		     _5 = _8 == 0;
3503 		   */
3504 		  gsi = gsi_for_stmt (use_stmt);
3505 		  gsi_remove (&gsi, true);
3506 		  use_stmt = g;
3507 		  ibit = 0;
3508 		}
3509 	      else
3510 		{
3511 		  if (TREE_CODE (TREE_TYPE (use_nop_lhs)) != BOOLEAN_TYPE)
3512 		    return;
3513 		  if (rhs_code != GE_EXPR && rhs_code != LT_EXPR)
3514 		    return;
3515 		  tree cmp_rhs1 = gimple_assign_rhs1 (use_nop_stmt);
3516 		  if (use_lhs != cmp_rhs1)
3517 		    return;
3518 		  tree cmp_rhs2 = gimple_assign_rhs2 (use_nop_stmt);
3519 		  if (!integer_zerop (cmp_rhs2))
3520 		    return;
3521 
3522 		  tree and_mask;
3523 
3524 		  unsigned HOST_WIDE_INT bytes
3525 		    = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs)));
3526 		  ibit = bytes * BITS_PER_UNIT - 1;
3527 		  unsigned HOST_WIDE_INT highest
3528 		    = HOST_WIDE_INT_1U << ibit;
3529 
3530 		  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3531 		    {
3532 		      /* Get the signed maximum of the USE_RHS type.  */
3533 		      and_mask = build_int_cst (TREE_TYPE (use_rhs),
3534 						highest - 1);
3535 		      if (!operand_equal_p (and_mask, mask, 0))
3536 			return;
3537 
3538 		      /* Convert
3539 			 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3540 			 _5 = (signed int) _1;
3541 			 _4 = _5 < 0 or _5 >= 0;
3542 			 to
3543 			 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3544 			 _6 = _1 & 0x80000000;
3545 			 _4 = _6 != 0 or _6 == 0;
3546 		       */
3547 		      and_mask = build_int_cst (TREE_TYPE (use_rhs),
3548 						highest);
3549 		    }
3550 		  else
3551 		    {
3552 		      /* Get the signed minimum of the USE_RHS type.  */
3553 		      and_mask = build_int_cst (TREE_TYPE (use_rhs),
3554 						highest);
3555 		      if (!operand_equal_p (and_mask, mask, 0))
3556 			return;
3557 
3558 		      /* Convert
3559 			 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3560 			 _5 = (signed int) _1;
3561 			 _4 = _5 < 0 or _5 >= 0;
3562 			 to
3563 			 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3564 			 _6 = _1 & 0x80000000;
3565 			 _4 = _6 != 0 or _6 == 0;
3566 		       */
3567 		    }
3568 		  var = make_ssa_name (TREE_TYPE (use_rhs));
3569 		  gsi = gsi_for_stmt (use_stmt);
3570 		  gsi_remove (&gsi, true);
3571 		  g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,
3572 					   and_mask);
3573 		  gsi = gsi_for_stmt (use_nop_stmt);
3574 		  gsi_insert_before (&gsi, g, GSI_NEW_STMT);
3575 		  use_stmt = g;
3576 		  g = gimple_build_assign (use_nop_lhs,
3577 					   (rhs_code == GE_EXPR
3578 					    ? EQ_EXPR : NE_EXPR),
3579 					   var,
3580 					   build_zero_cst (TREE_TYPE (use_rhs)));
3581 		  gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3582 		  gsi = gsi_for_stmt (use_nop_stmt);
3583 		  gsi_remove (&gsi, true);
3584 		}
3585 	    }
3586 	  else
3587 	    {
3588 	      tree match_op[3];
3589 	      gimple *g;
3590 	      if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs,
3591 						     &match_op[0], NULL)
3592 		  || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op[2])
3593 		  || !single_imm_use (match_op[2], &use_p, &g)
3594 		  || !is_gimple_assign (g))
3595 		return;
3596 	      mask = match_op[0];
3597 	      if (TREE_CODE (match_op[1]) == INTEGER_CST)
3598 		{
3599 		  ibit = tree_log2 (match_op[1]);
3600 		  gcc_assert (ibit >= 0);
3601 		}
3602 	      else
3603 		{
3604 		  g = SSA_NAME_DEF_STMT (match_op[1]);
3605 		  gcc_assert (is_gimple_assign (g));
3606 		  bit = gimple_assign_rhs2 (g);
3607 		}
3608 	      /* Convert
3609 		 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3610 		 _2 = (int) _1;
3611 		 _5 = _2 & mask;
3612 		 to
3613 		 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3614 		 _6 = _1 & mask;
3615 		 _5 = (int) _6;
3616 		 and convert
3617 		 _1 = ~mask_7;
3618 		 _2 = (unsigned int) _1;
3619 		 _3 = __atomic_fetch_and_4 (ptr_6, _2, 0);
3620 		 _4 = (int) _3;
3621 		 _5 = _4 & mask_7;
3622 		 to
3623 		 _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3);
3624 		 _12 = _3 & mask_7;
3625 		 _5 = (int) _12;
3626 
3627 		 and Convert
3628 		 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3629 		 _2 = (short int) _1;
3630 		 _5 = _2 & mask;
3631 		 to
3632 		 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3633 		 _8 = _1 & mask;
3634 		 _5 = (short int) _8;
3635 	      */
3636 	      gimple_seq stmts = NULL;
3637 	      match_op[1] = gimple_convert (&stmts,
3638 					    TREE_TYPE (use_rhs),
3639 					    match_op[1]);
3640 	      var = gimple_build (&stmts, BIT_AND_EXPR,
3641 				  TREE_TYPE (use_rhs), use_rhs, match_op[1]);
3642 	      gsi = gsi_for_stmt (use_stmt);
3643 	      gsi_remove (&gsi, true);
3644 	      release_defs (use_stmt);
3645 	      use_stmt = gimple_seq_last_stmt (stmts);
3646 	      gsi = gsi_for_stmt (use_nop_stmt);
3647 	      gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
3648 	      gimple_assign_set_rhs_with_ops (&gsi, CONVERT_EXPR, var);
3649 	      update_stmt (use_nop_stmt);
3650 	    }
3651 	}
3652       else
3653 	return;
3654 
3655       if (!bit)
3656 	{
3657 	  if (ibit < 0)
3658 	    gcc_unreachable ();
3659 	  bit = build_int_cst (TREE_TYPE (lhs), ibit);
3660 	}
3661     }
3662   else if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)))
3663 	   == CODE_FOR_nothing)
3664     return;
3665 
3666   tree use_lhs = gimple_assign_lhs (use_stmt);
3667   if (!use_lhs)
3668     return;
3669 
3670   if (!bit)
3671     {
3672       if (TREE_CODE (mask) == INTEGER_CST)
3673 	{
3674 	  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3675 	    mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
3676 	  mask = fold_convert (TREE_TYPE (lhs), mask);
3677 	  int ibit = tree_log2 (mask);
3678 	  if (ibit < 0)
3679 	    return;
3680 	  bit = build_int_cst (TREE_TYPE (lhs), ibit);
3681 	}
3682       else if (TREE_CODE (mask) == SSA_NAME)
3683 	{
3684 	  gimple *g = SSA_NAME_DEF_STMT (mask);
3685 	  tree match_op;
3686 	  if (gimple_nop_convert (mask, &match_op, NULL))
3687 	    {
3688 	      mask = match_op;
3689 	      if (TREE_CODE (mask) != SSA_NAME)
3690 		return;
3691 	      g = SSA_NAME_DEF_STMT (mask);
3692 	    }
3693 	  if (!is_gimple_assign (g))
3694 	    return;
3695 
3696 	  if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3697 	    {
3698 	      if (gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
3699 		return;
3700 	      mask = gimple_assign_rhs1 (g);
3701 	      if (TREE_CODE (mask) != SSA_NAME)
3702 		return;
3703 	      g = SSA_NAME_DEF_STMT (mask);
3704 	    }
3705 
3706 	  rhs_code = gimple_assign_rhs_code (g);
3707 	  if (rhs_code != LSHIFT_EXPR
3708 	      || !integer_onep (gimple_assign_rhs1 (g)))
3709 	    return;
3710 	  bit = gimple_assign_rhs2 (g);
3711 	}
3712       else
3713 	return;
3714 
3715       tree cmp_mask;
3716       if (gimple_assign_rhs1 (use_stmt) == lhs)
3717 	cmp_mask = gimple_assign_rhs2 (use_stmt);
3718       else
3719 	cmp_mask = gimple_assign_rhs1 (use_stmt);
3720 
3721       tree match_op;
3722       if (gimple_nop_convert (cmp_mask, &match_op, NULL))
3723 	cmp_mask = match_op;
3724 
3725       if (!operand_equal_p (cmp_mask, mask, 0))
3726 	return;
3727     }
3728 
3729   bool use_bool = true;
3730   bool has_debug_uses = false;
3731   imm_use_iterator iter;
3732   gimple *g;
3733 
3734   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
3735     use_bool = false;
3736   FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3737     {
3738       enum tree_code code = ERROR_MARK;
3739       tree op0 = NULL_TREE, op1 = NULL_TREE;
3740       if (is_gimple_debug (g))
3741 	{
3742 	  has_debug_uses = true;
3743 	  continue;
3744 	}
3745       else if (is_gimple_assign (g))
3746 	switch (gimple_assign_rhs_code (g))
3747 	  {
3748 	  case COND_EXPR:
3749 	    op1 = gimple_assign_rhs1 (g);
3750 	    code = TREE_CODE (op1);
3751 	    op0 = TREE_OPERAND (op1, 0);
3752 	    op1 = TREE_OPERAND (op1, 1);
3753 	    break;
3754 	  case EQ_EXPR:
3755 	  case NE_EXPR:
3756 	    code = gimple_assign_rhs_code (g);
3757 	    op0 = gimple_assign_rhs1 (g);
3758 	    op1 = gimple_assign_rhs2 (g);
3759 	    break;
3760 	  default:
3761 	    break;
3762 	  }
3763       else if (gimple_code (g) == GIMPLE_COND)
3764 	{
3765 	  code = gimple_cond_code (g);
3766 	  op0 = gimple_cond_lhs (g);
3767 	  op1 = gimple_cond_rhs (g);
3768 	}
3769 
3770       if ((code == EQ_EXPR || code == NE_EXPR)
3771 	  && op0 == use_lhs
3772 	  && integer_zerop (op1))
3773 	{
3774 	  use_operand_p use_p;
3775 	  int n = 0;
3776 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3777 	    n++;
3778 	  if (n == 1)
3779 	    continue;
3780 	}
3781 
3782       use_bool = false;
3783       break;
3784     }
3785 
3786   tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
3787   tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
3788   if (has_model_arg)
3789     g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
3790 				    bit, flag, gimple_call_arg (call, 2));
3791   else
3792     g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
3793 				    bit, flag);
3794   gimple_call_set_lhs (g, new_lhs);
3795   gimple_set_location (g, gimple_location (call));
3796   gimple_move_vops (g, call);
3797   bool throws = stmt_can_throw_internal (cfun, call);
3798   gimple_call_set_nothrow (as_a <gcall *> (g),
3799 			   gimple_call_nothrow_p (as_a <gcall *> (call)));
3800   gimple_stmt_iterator gsi = *gsip;
3801   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3802   edge e = NULL;
3803   if (throws)
3804     {
3805       maybe_clean_or_replace_eh_stmt (call, g);
3806       if (after || (use_bool && has_debug_uses))
3807 	e = find_fallthru_edge (gsi_bb (gsi)->succs);
3808     }
3809   if (after)
3810     {
3811       /* The internal function returns the value of the specified bit
3812 	 before the atomic operation.  If we are interested in the value
3813 	 of the specified bit after the atomic operation (makes only sense
3814 	 for xor, otherwise the bit content is compile time known),
3815 	 we need to invert the bit.  */
3816       tree mask_convert = mask;
3817       gimple_seq stmts = NULL;
3818       if (!use_bool)
3819 	mask_convert = gimple_convert (&stmts, TREE_TYPE (lhs), mask);
3820       new_lhs = gimple_build (&stmts, BIT_XOR_EXPR, TREE_TYPE (lhs), new_lhs,
3821 			      use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
3822 				       : mask_convert);
3823       if (throws)
3824 	{
3825 	  gsi_insert_seq_on_edge_immediate (e, stmts);
3826 	  gsi = gsi_for_stmt (gimple_seq_last (stmts));
3827 	}
3828       else
3829 	gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
3830     }
3831   if (use_bool && has_debug_uses)
3832     {
3833       tree temp = NULL_TREE;
3834       if (!throws || after || single_pred_p (e->dest))
3835 	{
3836 	  temp = build_debug_expr_decl (TREE_TYPE (lhs));
3837 	  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
3838 	  g = gimple_build_debug_bind (temp, t, g);
3839 	  if (throws && !after)
3840 	    {
3841 	      gsi = gsi_after_labels (e->dest);
3842 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3843 	    }
3844 	  else
3845 	    gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3846 	}
3847       FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3848 	if (is_gimple_debug (g))
3849 	  {
3850 	    use_operand_p use_p;
3851 	    if (temp == NULL_TREE)
3852 	      gimple_debug_bind_reset_value (g);
3853 	    else
3854 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3855 		SET_USE (use_p, temp);
3856 	    update_stmt (g);
3857 	  }
3858     }
3859   SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3860     = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3861   replace_uses_by (use_lhs, new_lhs);
3862   gsi = gsi_for_stmt (use_stmt);
3863   gsi_remove (&gsi, true);
3864   release_defs (use_stmt);
3865   gsi_remove (gsip, true);
3866   release_ssa_name (lhs);
3867 }
3868 
3869 /* Optimize
3870    a = {};
3871    b = a;
3872    into
3873    a = {};
3874    b = {};
3875    Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3876    and/or memcpy (&b, &a, sizeof (a)); instead of b = a;  */
3877 
3878 static void
optimize_memcpy(gimple_stmt_iterator * gsip,tree dest,tree src,tree len)3879 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3880 {
3881   gimple *stmt = gsi_stmt (*gsip);
3882   if (gimple_has_volatile_ops (stmt))
3883     return;
3884 
3885   tree vuse = gimple_vuse (stmt);
3886   if (vuse == NULL)
3887     return;
3888 
3889   gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3890   tree src2 = NULL_TREE, len2 = NULL_TREE;
3891   poly_int64 offset, offset2;
3892   tree val = integer_zero_node;
3893   if (gimple_store_p (defstmt)
3894       && gimple_assign_single_p (defstmt)
3895       && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3896       && !gimple_clobber_p (defstmt))
3897     src2 = gimple_assign_lhs (defstmt);
3898   else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3899 	   && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3900 	   && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3901     {
3902       src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3903       len2 = gimple_call_arg (defstmt, 2);
3904       val = gimple_call_arg (defstmt, 1);
3905       /* For non-0 val, we'd have to transform stmt from assignment
3906 	 into memset (only if dest is addressable).  */
3907       if (!integer_zerop (val) && is_gimple_assign (stmt))
3908 	src2 = NULL_TREE;
3909     }
3910 
3911   if (src2 == NULL_TREE)
3912     return;
3913 
3914   if (len == NULL_TREE)
3915     len = (TREE_CODE (src) == COMPONENT_REF
3916 	   ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3917 	   : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3918   if (len2 == NULL_TREE)
3919     len2 = (TREE_CODE (src2) == COMPONENT_REF
3920 	    ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3921 	    : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3922   if (len == NULL_TREE
3923       || !poly_int_tree_p (len)
3924       || len2 == NULL_TREE
3925       || !poly_int_tree_p (len2))
3926     return;
3927 
3928   src = get_addr_base_and_unit_offset (src, &offset);
3929   src2 = get_addr_base_and_unit_offset (src2, &offset2);
3930   if (src == NULL_TREE
3931       || src2 == NULL_TREE
3932       || maybe_lt (offset, offset2))
3933     return;
3934 
3935   if (!operand_equal_p (src, src2, 0))
3936     return;
3937 
3938   /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3939      Make sure that
3940      [ src + offset, src + offset + len - 1 ] is a subset of that.  */
3941   if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2),
3942 		wi::to_poly_offset (len2)))
3943     return;
3944 
3945   if (dump_file && (dump_flags & TDF_DETAILS))
3946     {
3947       fprintf (dump_file, "Simplified\n  ");
3948       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3949       fprintf (dump_file, "after previous\n  ");
3950       print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3951     }
3952 
3953   /* For simplicity, don't change the kind of the stmt,
3954      turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3955      into memset (&dest, val, len);
3956      In theory we could change dest = src into memset if dest
3957      is addressable (maybe beneficial if val is not 0), or
3958      memcpy (&dest, &src, len) into dest = {} if len is the size
3959      of dest, dest isn't volatile.  */
3960   if (is_gimple_assign (stmt))
3961     {
3962       tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3963       gimple_assign_set_rhs_from_tree (gsip, ctor);
3964       update_stmt (stmt);
3965     }
3966   else /* If stmt is memcpy, transform it into memset.  */
3967     {
3968       gcall *call = as_a <gcall *> (stmt);
3969       tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3970       gimple_call_set_fndecl (call, fndecl);
3971       gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3972       gimple_call_set_arg (call, 1, val);
3973       update_stmt (stmt);
3974     }
3975 
3976   if (dump_file && (dump_flags & TDF_DETAILS))
3977     {
3978       fprintf (dump_file, "into\n  ");
3979       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3980     }
3981 }
3982 
3983 /* A simple pass that attempts to fold all builtin functions.  This pass
3984    is run after we've propagated as many constants as we can.  */
3985 
3986 namespace {
3987 
3988 const pass_data pass_data_fold_builtins =
3989 {
3990   GIMPLE_PASS, /* type */
3991   "fab", /* name */
3992   OPTGROUP_NONE, /* optinfo_flags */
3993   TV_NONE, /* tv_id */
3994   ( PROP_cfg | PROP_ssa ), /* properties_required */
3995   0, /* properties_provided */
3996   0, /* properties_destroyed */
3997   0, /* todo_flags_start */
3998   TODO_update_ssa, /* todo_flags_finish */
3999 };
4000 
4001 class pass_fold_builtins : public gimple_opt_pass
4002 {
4003 public:
pass_fold_builtins(gcc::context * ctxt)4004   pass_fold_builtins (gcc::context *ctxt)
4005     : gimple_opt_pass (pass_data_fold_builtins, ctxt)
4006   {}
4007 
4008   /* opt_pass methods: */
clone()4009   opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
4010   virtual unsigned int execute (function *);
4011 
4012 }; // class pass_fold_builtins
4013 
4014 unsigned int
execute(function * fun)4015 pass_fold_builtins::execute (function *fun)
4016 {
4017   bool cfg_changed = false;
4018   basic_block bb;
4019   unsigned int todoflags = 0;
4020 
4021   FOR_EACH_BB_FN (bb, fun)
4022     {
4023       gimple_stmt_iterator i;
4024       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
4025 	{
4026 	  gimple *stmt, *old_stmt;
4027 	  tree callee;
4028 	  enum built_in_function fcode;
4029 
4030 	  stmt = gsi_stmt (i);
4031 
4032           if (gimple_code (stmt) != GIMPLE_CALL)
4033 	    {
4034 	      /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
4035 		 after the last GIMPLE DSE they aren't needed and might
4036 		 unnecessarily keep the SSA_NAMEs live.  */
4037 	      if (gimple_clobber_p (stmt))
4038 		{
4039 		  tree lhs = gimple_assign_lhs (stmt);
4040 		  if (TREE_CODE (lhs) == MEM_REF
4041 		      && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
4042 		    {
4043 		      unlink_stmt_vdef (stmt);
4044 		      gsi_remove (&i, true);
4045 		      release_defs (stmt);
4046 		      continue;
4047 		    }
4048 		}
4049 	      else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
4050 		optimize_memcpy (&i, gimple_assign_lhs (stmt),
4051 				 gimple_assign_rhs1 (stmt), NULL_TREE);
4052 	      gsi_next (&i);
4053 	      continue;
4054 	    }
4055 
4056 	  callee = gimple_call_fndecl (stmt);
4057 	  if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
4058 	    {
4059 	      gsi_next (&i);
4060 	      continue;
4061 	    }
4062 
4063 	  fcode = DECL_FUNCTION_CODE (callee);
4064 	  if (fold_stmt (&i))
4065 	    ;
4066 	  else
4067 	    {
4068 	      tree result = NULL_TREE;
4069 	      switch (DECL_FUNCTION_CODE (callee))
4070 		{
4071 		case BUILT_IN_CONSTANT_P:
4072 		  /* Resolve __builtin_constant_p.  If it hasn't been
4073 		     folded to integer_one_node by now, it's fairly
4074 		     certain that the value simply isn't constant.  */
4075 		  result = integer_zero_node;
4076 		  break;
4077 
4078 		case BUILT_IN_ASSUME_ALIGNED:
4079 		  /* Remove __builtin_assume_aligned.  */
4080 		  result = gimple_call_arg (stmt, 0);
4081 		  break;
4082 
4083 		case BUILT_IN_STACK_RESTORE:
4084 		  result = optimize_stack_restore (i);
4085 		  if (result)
4086 		    break;
4087 		  gsi_next (&i);
4088 		  continue;
4089 
4090 		case BUILT_IN_UNREACHABLE:
4091 		  if (optimize_unreachable (i))
4092 		    cfg_changed = true;
4093 		  break;
4094 
4095 		case BUILT_IN_ATOMIC_FETCH_OR_1:
4096 		case BUILT_IN_ATOMIC_FETCH_OR_2:
4097 		case BUILT_IN_ATOMIC_FETCH_OR_4:
4098 		case BUILT_IN_ATOMIC_FETCH_OR_8:
4099 		case BUILT_IN_ATOMIC_FETCH_OR_16:
4100 		  optimize_atomic_bit_test_and (&i,
4101 						IFN_ATOMIC_BIT_TEST_AND_SET,
4102 						true, false);
4103 		  break;
4104 		case BUILT_IN_SYNC_FETCH_AND_OR_1:
4105 		case BUILT_IN_SYNC_FETCH_AND_OR_2:
4106 		case BUILT_IN_SYNC_FETCH_AND_OR_4:
4107 		case BUILT_IN_SYNC_FETCH_AND_OR_8:
4108 		case BUILT_IN_SYNC_FETCH_AND_OR_16:
4109 		  optimize_atomic_bit_test_and (&i,
4110 						IFN_ATOMIC_BIT_TEST_AND_SET,
4111 						false, false);
4112 		  break;
4113 
4114 		case BUILT_IN_ATOMIC_FETCH_XOR_1:
4115 		case BUILT_IN_ATOMIC_FETCH_XOR_2:
4116 		case BUILT_IN_ATOMIC_FETCH_XOR_4:
4117 		case BUILT_IN_ATOMIC_FETCH_XOR_8:
4118 		case BUILT_IN_ATOMIC_FETCH_XOR_16:
4119 		  optimize_atomic_bit_test_and
4120 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
4121 		  break;
4122 		case BUILT_IN_SYNC_FETCH_AND_XOR_1:
4123 		case BUILT_IN_SYNC_FETCH_AND_XOR_2:
4124 		case BUILT_IN_SYNC_FETCH_AND_XOR_4:
4125 		case BUILT_IN_SYNC_FETCH_AND_XOR_8:
4126 		case BUILT_IN_SYNC_FETCH_AND_XOR_16:
4127 		  optimize_atomic_bit_test_and
4128 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
4129 		  break;
4130 
4131 		case BUILT_IN_ATOMIC_XOR_FETCH_1:
4132 		case BUILT_IN_ATOMIC_XOR_FETCH_2:
4133 		case BUILT_IN_ATOMIC_XOR_FETCH_4:
4134 		case BUILT_IN_ATOMIC_XOR_FETCH_8:
4135 		case BUILT_IN_ATOMIC_XOR_FETCH_16:
4136 		  optimize_atomic_bit_test_and
4137 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
4138 		  break;
4139 		case BUILT_IN_SYNC_XOR_AND_FETCH_1:
4140 		case BUILT_IN_SYNC_XOR_AND_FETCH_2:
4141 		case BUILT_IN_SYNC_XOR_AND_FETCH_4:
4142 		case BUILT_IN_SYNC_XOR_AND_FETCH_8:
4143 		case BUILT_IN_SYNC_XOR_AND_FETCH_16:
4144 		  optimize_atomic_bit_test_and
4145 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
4146 		  break;
4147 
4148 		case BUILT_IN_ATOMIC_FETCH_AND_1:
4149 		case BUILT_IN_ATOMIC_FETCH_AND_2:
4150 		case BUILT_IN_ATOMIC_FETCH_AND_4:
4151 		case BUILT_IN_ATOMIC_FETCH_AND_8:
4152 		case BUILT_IN_ATOMIC_FETCH_AND_16:
4153 		  optimize_atomic_bit_test_and (&i,
4154 						IFN_ATOMIC_BIT_TEST_AND_RESET,
4155 						true, false);
4156 		  break;
4157 		case BUILT_IN_SYNC_FETCH_AND_AND_1:
4158 		case BUILT_IN_SYNC_FETCH_AND_AND_2:
4159 		case BUILT_IN_SYNC_FETCH_AND_AND_4:
4160 		case BUILT_IN_SYNC_FETCH_AND_AND_8:
4161 		case BUILT_IN_SYNC_FETCH_AND_AND_16:
4162 		  optimize_atomic_bit_test_and (&i,
4163 						IFN_ATOMIC_BIT_TEST_AND_RESET,
4164 						false, false);
4165 		  break;
4166 
4167 		case BUILT_IN_MEMCPY:
4168 		  if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
4169 		      && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
4170 		      && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
4171 		      && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
4172 		    {
4173 		      tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
4174 		      tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
4175 		      tree len = gimple_call_arg (stmt, 2);
4176 		      optimize_memcpy (&i, dest, src, len);
4177 		    }
4178 		  break;
4179 
4180 		case BUILT_IN_VA_START:
4181 		case BUILT_IN_VA_END:
4182 		case BUILT_IN_VA_COPY:
4183 		  /* These shouldn't be folded before pass_stdarg.  */
4184 		  result = optimize_stdarg_builtin (stmt);
4185 		  break;
4186 
4187 		default:;
4188 		}
4189 
4190 	      if (!result)
4191 		{
4192 		  gsi_next (&i);
4193 		  continue;
4194 		}
4195 
4196 	      gimplify_and_update_call_from_tree (&i, result);
4197 	    }
4198 
4199 	  todoflags |= TODO_update_address_taken;
4200 
4201 	  if (dump_file && (dump_flags & TDF_DETAILS))
4202 	    {
4203 	      fprintf (dump_file, "Simplified\n  ");
4204 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
4205 	    }
4206 
4207           old_stmt = stmt;
4208 	  stmt = gsi_stmt (i);
4209 	  update_stmt (stmt);
4210 
4211 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
4212 	      && gimple_purge_dead_eh_edges (bb))
4213 	    cfg_changed = true;
4214 
4215 	  if (dump_file && (dump_flags & TDF_DETAILS))
4216 	    {
4217 	      fprintf (dump_file, "to\n  ");
4218 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
4219 	      fprintf (dump_file, "\n");
4220 	    }
4221 
4222 	  /* Retry the same statement if it changed into another
4223 	     builtin, there might be new opportunities now.  */
4224           if (gimple_code (stmt) != GIMPLE_CALL)
4225 	    {
4226 	      gsi_next (&i);
4227 	      continue;
4228 	    }
4229 	  callee = gimple_call_fndecl (stmt);
4230 	  if (!callee
4231 	      || !fndecl_built_in_p (callee, fcode))
4232 	    gsi_next (&i);
4233 	}
4234     }
4235 
4236   /* Delete unreachable blocks.  */
4237   if (cfg_changed)
4238     todoflags |= TODO_cleanup_cfg;
4239 
4240   return todoflags;
4241 }
4242 
4243 } // anon namespace
4244 
4245 gimple_opt_pass *
make_pass_fold_builtins(gcc::context * ctxt)4246 make_pass_fold_builtins (gcc::context *ctxt)
4247 {
4248   return new pass_fold_builtins (ctxt);
4249 }
4250 
4251 /* A simple pass that emits some warnings post IPA.  */
4252 
4253 namespace {
4254 
4255 const pass_data pass_data_post_ipa_warn =
4256 {
4257   GIMPLE_PASS, /* type */
4258   "post_ipa_warn", /* name */
4259   OPTGROUP_NONE, /* optinfo_flags */
4260   TV_NONE, /* tv_id */
4261   ( PROP_cfg | PROP_ssa ), /* properties_required */
4262   0, /* properties_provided */
4263   0, /* properties_destroyed */
4264   0, /* todo_flags_start */
4265   0, /* todo_flags_finish */
4266 };
4267 
4268 class pass_post_ipa_warn : public gimple_opt_pass
4269 {
4270 public:
pass_post_ipa_warn(gcc::context * ctxt)4271   pass_post_ipa_warn (gcc::context *ctxt)
4272     : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
4273   {}
4274 
4275   /* opt_pass methods: */
clone()4276   opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
gate(function *)4277   virtual bool gate (function *) { return warn_nonnull != 0; }
4278   virtual unsigned int execute (function *);
4279 
4280 }; // class pass_fold_builtins
4281 
4282 unsigned int
execute(function * fun)4283 pass_post_ipa_warn::execute (function *fun)
4284 {
4285   basic_block bb;
4286 
4287   FOR_EACH_BB_FN (bb, fun)
4288     {
4289       gimple_stmt_iterator gsi;
4290       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4291 	{
4292 	  gimple *stmt = gsi_stmt (gsi);
4293 	  if (!is_gimple_call (stmt) || warning_suppressed_p (stmt, OPT_Wnonnull))
4294 	    continue;
4295 
4296 	  tree fntype = gimple_call_fntype (stmt);
4297 	  bitmap nonnullargs = get_nonnull_args (fntype);
4298 	  if (!nonnullargs)
4299 	    continue;
4300 
4301 	  tree fndecl = gimple_call_fndecl (stmt);
4302 	  const bool closure = fndecl && DECL_LAMBDA_FUNCTION_P (fndecl);
4303 
4304 	  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
4305 	    {
4306 	      tree arg = gimple_call_arg (stmt, i);
4307 	      if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
4308 		continue;
4309 	      if (!integer_zerop (arg))
4310 		continue;
4311 	      if (i == 0 && closure)
4312 		/* Avoid warning for the first argument to lambda functions.  */
4313 		continue;
4314 	      if (!bitmap_empty_p (nonnullargs)
4315 		  && !bitmap_bit_p (nonnullargs, i))
4316 		continue;
4317 
4318 	      /* In C++ non-static member functions argument 0 refers
4319 		 to the implicit this pointer.  Use the same one-based
4320 		 numbering for ordinary arguments.  */
4321 	      unsigned argno = TREE_CODE (fntype) == METHOD_TYPE ? i : i + 1;
4322 	      location_t loc = (EXPR_HAS_LOCATION (arg)
4323 				? EXPR_LOCATION (arg)
4324 				: gimple_location (stmt));
4325 	      auto_diagnostic_group d;
4326 	      if (argno == 0)
4327 		{
4328 		  if (warning_at (loc, OPT_Wnonnull,
4329 				  "%qs pointer is null", "this")
4330 		      && fndecl)
4331 		    inform (DECL_SOURCE_LOCATION (fndecl),
4332 			    "in a call to non-static member function %qD",
4333 			    fndecl);
4334 		  continue;
4335 		}
4336 
4337 	      if (!warning_at (loc, OPT_Wnonnull,
4338 			       "argument %u null where non-null "
4339 			       "expected", argno))
4340 		continue;
4341 
4342 	      tree fndecl = gimple_call_fndecl (stmt);
4343 	      if (fndecl && DECL_IS_UNDECLARED_BUILTIN (fndecl))
4344 		inform (loc, "in a call to built-in function %qD",
4345 			fndecl);
4346 	      else if (fndecl)
4347 		inform (DECL_SOURCE_LOCATION (fndecl),
4348 			"in a call to function %qD declared %qs",
4349 			fndecl, "nonnull");
4350 	    }
4351 	  BITMAP_FREE (nonnullargs);
4352 	}
4353     }
4354   return 0;
4355 }
4356 
4357 } // anon namespace
4358 
4359 gimple_opt_pass *
make_pass_post_ipa_warn(gcc::context * ctxt)4360 make_pass_post_ipa_warn (gcc::context *ctxt)
4361 {
4362   return new pass_post_ipa_warn (ctxt);
4363 }
4364