1 /* Alias analysis for GNU C
2    Copyright (C) 1997-2016 Free Software Foundation, Inc.
3    Contributed by John Carr (jfc@mit.edu).
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "gimple-ssa.h"
32 #include "emit-rtl.h"
33 #include "alias.h"
34 #include "fold-const.h"
35 #include "varasm.h"
36 #include "cselib.h"
37 #include "langhooks.h"
38 #include "cfganal.h"
39 #include "rtl-iter.h"
40 #include "cgraph.h"
41 
42 /* The aliasing API provided here solves related but different problems:
43 
44    Say there exists (in c)
45 
46    struct X {
47      struct Y y1;
48      struct Z z2;
49    } x1, *px1,  *px2;
50 
51    struct Y y2, *py;
52    struct Z z2, *pz;
53 
54 
55    py = &x1.y1;
56    px2 = &x1;
57 
58    Consider the four questions:
59 
60    Can a store to x1 interfere with px2->y1?
61    Can a store to x1 interfere with px2->z2?
62    Can a store to x1 change the value pointed to by with py?
63    Can a store to x1 change the value pointed to by with pz?
64 
65    The answer to these questions can be yes, yes, yes, and maybe.
66 
67    The first two questions can be answered with a simple examination
68    of the type system.  If structure X contains a field of type Y then
69    a store through a pointer to an X can overwrite any field that is
70    contained (recursively) in an X (unless we know that px1 != px2).
71 
72    The last two questions can be solved in the same way as the first
73    two questions but this is too conservative.  The observation is
74    that in some cases we can know which (if any) fields are addressed
75    and if those addresses are used in bad ways.  This analysis may be
76    language specific.  In C, arbitrary operations may be applied to
77    pointers.  However, there is some indication that this may be too
78    conservative for some C++ types.
79 
80    The pass ipa-type-escape does this analysis for the types whose
81    instances do not escape across the compilation boundary.
82 
83    Historically in GCC, these two problems were combined and a single
84    data structure that was used to represent the solution to these
85    problems.  We now have two similar but different data structures,
86    The data structure to solve the last two questions is similar to
87    the first, but does not contain the fields whose address are never
88    taken.  For types that do escape the compilation unit, the data
89    structures will have identical information.
90 */
91 
92 /* The alias sets assigned to MEMs assist the back-end in determining
93    which MEMs can alias which other MEMs.  In general, two MEMs in
94    different alias sets cannot alias each other, with one important
95    exception.  Consider something like:
96 
97      struct S { int i; double d; };
98 
99    a store to an `S' can alias something of either type `int' or type
100    `double'.  (However, a store to an `int' cannot alias a `double'
101    and vice versa.)  We indicate this via a tree structure that looks
102    like:
103 	   struct S
104 	    /   \
105 	   /     \
106 	 |/_     _\|
107 	 int    double
108 
109    (The arrows are directed and point downwards.)
110     In this situation we say the alias set for `struct S' is the
111    `superset' and that those for `int' and `double' are `subsets'.
112 
113    To see whether two alias sets can point to the same memory, we must
114    see if either alias set is a subset of the other. We need not trace
115    past immediate descendants, however, since we propagate all
116    grandchildren up one level.
117 
118    Alias set zero is implicitly a superset of all other alias sets.
119    However, this is no actual entry for alias set zero.  It is an
120    error to attempt to explicitly construct a subset of zero.  */
121 
122 struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {};
123 
124 struct GTY(()) alias_set_entry {
125   /* The alias set number, as stored in MEM_ALIAS_SET.  */
126   alias_set_type alias_set;
127 
128   /* The children of the alias set.  These are not just the immediate
129      children, but, in fact, all descendants.  So, if we have:
130 
131        struct T { struct S s; float f; }
132 
133      continuing our example above, the children here will be all of
134      `int', `double', `float', and `struct S'.  */
135   hash_map<alias_set_hash, int> *children;
136 
137   /* Nonzero if would have a child of zero: this effectively makes this
138      alias set the same as alias set zero.  */
139   bool has_zero_child;
140   /* Nonzero if alias set corresponds to pointer type itself (i.e. not to
141      aggregate contaiing pointer.
142      This is used for a special case where we need an universal pointer type
143      compatible with all other pointer types.  */
144   bool is_pointer;
145   /* Nonzero if is_pointer or if one of childs have has_pointer set.  */
146   bool has_pointer;
147 };
148 
149 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
150 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
151 static void record_set (rtx, const_rtx, void *);
152 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
153 			     machine_mode);
154 static rtx find_base_value (rtx);
155 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
156 static alias_set_entry *get_alias_set_entry (alias_set_type);
157 static tree decl_for_component_ref (tree);
158 static int write_dependence_p (const_rtx,
159 			       const_rtx, machine_mode, rtx,
160 			       bool, bool, bool);
161 static int compare_base_symbol_refs (const_rtx, const_rtx);
162 
163 static void memory_modified_1 (rtx, const_rtx, void *);
164 
165 /* Query statistics for the different low-level disambiguators.
166    A high-level query may trigger multiple of them.  */
167 
168 static struct {
169   unsigned long long num_alias_zero;
170   unsigned long long num_same_alias_set;
171   unsigned long long num_same_objects;
172   unsigned long long num_volatile;
173   unsigned long long num_dag;
174   unsigned long long num_universal;
175   unsigned long long num_disambiguated;
176 } alias_stats;
177 
178 
179 /* Set up all info needed to perform alias analysis on memory references.  */
180 
181 /* Returns the size in bytes of the mode of X.  */
182 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
183 
184 /* Cap the number of passes we make over the insns propagating alias
185    information through set chains.
186    ??? 10 is a completely arbitrary choice.  This should be based on the
187    maximum loop depth in the CFG, but we do not have this information
188    available (even if current_loops _is_ available).  */
189 #define MAX_ALIAS_LOOP_PASSES 10
190 
191 /* reg_base_value[N] gives an address to which register N is related.
192    If all sets after the first add or subtract to the current value
193    or otherwise modify it so it does not point to a different top level
194    object, reg_base_value[N] is equal to the address part of the source
195    of the first set.
196 
197    A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
198    expressions represent three types of base:
199 
200      1. incoming arguments.  There is just one ADDRESS to represent all
201 	arguments, since we do not know at this level whether accesses
202 	based on different arguments can alias.  The ADDRESS has id 0.
203 
204      2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
205 	(if distinct from frame_pointer_rtx) and arg_pointer_rtx.
206 	Each of these rtxes has a separate ADDRESS associated with it,
207 	each with a negative id.
208 
209 	GCC is (and is required to be) precise in which register it
210 	chooses to access a particular region of stack.  We can therefore
211 	assume that accesses based on one of these rtxes do not alias
212 	accesses based on another of these rtxes.
213 
214      3. bases that are derived from malloc()ed memory (REG_NOALIAS).
215 	Each such piece of memory has a separate ADDRESS associated
216 	with it, each with an id greater than 0.
217 
218    Accesses based on one ADDRESS do not alias accesses based on other
219    ADDRESSes.  Accesses based on ADDRESSes in groups (2) and (3) do not
220    alias globals either; the ADDRESSes have Pmode to indicate this.
221    The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
222    indicate this.  */
223 
224 static GTY(()) vec<rtx, va_gc> *reg_base_value;
225 static rtx *new_reg_base_value;
226 
227 /* The single VOIDmode ADDRESS that represents all argument bases.
228    It has id 0.  */
229 static GTY(()) rtx arg_base_value;
230 
231 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS.  */
232 static int unique_id;
233 
234 /* We preserve the copy of old array around to avoid amount of garbage
235    produced.  About 8% of garbage produced were attributed to this
236    array.  */
237 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
238 
239 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
240    registers.  */
241 #define UNIQUE_BASE_VALUE_SP	-1
242 #define UNIQUE_BASE_VALUE_ARGP	-2
243 #define UNIQUE_BASE_VALUE_FP	-3
244 #define UNIQUE_BASE_VALUE_HFP	-4
245 
246 #define static_reg_base_value \
247   (this_target_rtl->x_static_reg_base_value)
248 
249 #define REG_BASE_VALUE(X)					\
250   (REGNO (X) < vec_safe_length (reg_base_value)			\
251    ? (*reg_base_value)[REGNO (X)] : 0)
252 
253 /* Vector indexed by N giving the initial (unchanging) value known for
254    pseudo-register N.  This vector is initialized in init_alias_analysis,
255    and does not change until end_alias_analysis is called.  */
256 static GTY(()) vec<rtx, va_gc> *reg_known_value;
257 
258 /* Vector recording for each reg_known_value whether it is due to a
259    REG_EQUIV note.  Future passes (viz., reload) may replace the
260    pseudo with the equivalent expression and so we account for the
261    dependences that would be introduced if that happens.
262 
263    The REG_EQUIV notes created in assign_parms may mention the arg
264    pointer, and there are explicit insns in the RTL that modify the
265    arg pointer.  Thus we must ensure that such insns don't get
266    scheduled across each other because that would invalidate the
267    REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
268    wrong, but solving the problem in the scheduler will likely give
269    better code, so we do it here.  */
270 static sbitmap reg_known_equiv_p;
271 
272 /* True when scanning insns from the start of the rtl to the
273    NOTE_INSN_FUNCTION_BEG note.  */
274 static bool copying_arguments;
275 
276 
277 /* The splay-tree used to store the various alias set entries.  */
278 static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets;
279 
280 /* Build a decomposed reference object for querying the alias-oracle
281    from the MEM rtx and store it in *REF.
282    Returns false if MEM is not suitable for the alias-oracle.  */
283 
284 static bool
ao_ref_from_mem(ao_ref * ref,const_rtx mem)285 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
286 {
287   tree expr = MEM_EXPR (mem);
288   tree base;
289 
290   if (!expr)
291     return false;
292 
293   ao_ref_init (ref, expr);
294 
295   /* Get the base of the reference and see if we have to reject or
296      adjust it.  */
297   base = ao_ref_base (ref);
298   if (base == NULL_TREE)
299     return false;
300 
301   /* The tree oracle doesn't like bases that are neither decls
302      nor indirect references of SSA names.  */
303   if (!(DECL_P (base)
304 	|| (TREE_CODE (base) == MEM_REF
305 	    && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
306 	|| (TREE_CODE (base) == TARGET_MEM_REF
307 	    && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
308     return false;
309 
310   /* If this is a reference based on a partitioned decl replace the
311      base with a MEM_REF of the pointer representative we
312      created during stack slot partitioning.  */
313   if (TREE_CODE (base) == VAR_DECL
314       && ! is_global_var (base)
315       && cfun->gimple_df->decls_to_pointers != NULL)
316     {
317       tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
318       if (namep)
319 	ref->base = build_simple_mem_ref (*namep);
320     }
321 
322   ref->ref_alias_set = MEM_ALIAS_SET (mem);
323 
324   /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
325      is conservative, so trust it.  */
326   if (!MEM_OFFSET_KNOWN_P (mem)
327       || !MEM_SIZE_KNOWN_P (mem))
328     return true;
329 
330   /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
331      drop ref->ref.  */
332   if (MEM_OFFSET (mem) < 0
333       || (ref->max_size != -1
334 	  && ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT
335 	      > ref->max_size)))
336     ref->ref = NULL_TREE;
337 
338   /* Refine size and offset we got from analyzing MEM_EXPR by using
339      MEM_SIZE and MEM_OFFSET.  */
340 
341   ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
342   ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
343 
344   /* The MEM may extend into adjacent fields, so adjust max_size if
345      necessary.  */
346   if (ref->max_size != -1
347       && ref->size > ref->max_size)
348     ref->max_size = ref->size;
349 
350   /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
351      the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
352   if (MEM_EXPR (mem) != get_spill_slot_decl (false)
353       && (ref->offset < 0
354 	  || (DECL_P (ref->base)
355 	      && (DECL_SIZE (ref->base) == NULL_TREE
356 		  || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
357 		  || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
358 				ref->offset + ref->size)))))
359     return false;
360 
361   return true;
362 }
363 
364 /* Query the alias-oracle on whether the two memory rtx X and MEM may
365    alias.  If TBAA_P is set also apply TBAA.  Returns true if the
366    two rtxen may alias, false otherwise.  */
367 
368 static bool
rtx_refs_may_alias_p(const_rtx x,const_rtx mem,bool tbaa_p)369 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
370 {
371   ao_ref ref1, ref2;
372 
373   if (!ao_ref_from_mem (&ref1, x)
374       || !ao_ref_from_mem (&ref2, mem))
375     return true;
376 
377   return refs_may_alias_p_1 (&ref1, &ref2,
378 			     tbaa_p
379 			     && MEM_ALIAS_SET (x) != 0
380 			     && MEM_ALIAS_SET (mem) != 0);
381 }
382 
383 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
384    such an entry, or NULL otherwise.  */
385 
386 static inline alias_set_entry *
get_alias_set_entry(alias_set_type alias_set)387 get_alias_set_entry (alias_set_type alias_set)
388 {
389   return (*alias_sets)[alias_set];
390 }
391 
392 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
393    the two MEMs cannot alias each other.  */
394 
395 static inline int
mems_in_disjoint_alias_sets_p(const_rtx mem1,const_rtx mem2)396 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
397 {
398   return (flag_strict_aliasing
399 	  && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
400 				      MEM_ALIAS_SET (mem2)));
401 }
402 
403 /* Return true if the first alias set is a subset of the second.  */
404 
405 bool
alias_set_subset_of(alias_set_type set1,alias_set_type set2)406 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
407 {
408   alias_set_entry *ase2;
409 
410   /* Disable TBAA oracle with !flag_strict_aliasing.  */
411   if (!flag_strict_aliasing)
412     return true;
413 
414   /* Everything is a subset of the "aliases everything" set.  */
415   if (set2 == 0)
416     return true;
417 
418   /* Check if set1 is a subset of set2.  */
419   ase2 = get_alias_set_entry (set2);
420   if (ase2 != 0
421       && (ase2->has_zero_child
422 	  || (ase2->children && ase2->children->get (set1))))
423     return true;
424 
425   /* As a special case we consider alias set of "void *" to be both subset
426      and superset of every alias set of a pointer.  This extra symmetry does
427      not matter for alias_sets_conflict_p but it makes aliasing_component_refs_p
428      to return true on the following testcase:
429 
430      void *ptr;
431      char **ptr2=(char **)&ptr;
432      *ptr2 = ...
433 
434      Additionally if a set contains universal pointer, we consider every pointer
435      to be a subset of it, but we do not represent this explicitely - doing so
436      would require us to update transitive closure each time we introduce new
437      pointer type.  This makes aliasing_component_refs_p to return true
438      on the following testcase:
439 
440      struct a {void *ptr;}
441      char **ptr = (char **)&a.ptr;
442      ptr = ...
443 
444      This makes void * truly universal pointer type.  See pointer handling in
445      get_alias_set for more details.  */
446   if (ase2 && ase2->has_pointer)
447     {
448       alias_set_entry *ase1 = get_alias_set_entry (set1);
449 
450       if (ase1 && ase1->is_pointer)
451 	{
452           alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
453 	  /* If one is ptr_type_node and other is pointer, then we consider
454  	     them subset of each other.  */
455 	  if (set1 == voidptr_set || set2 == voidptr_set)
456 	    return true;
457 	  /* If SET2 contains universal pointer's alias set, then we consdier
458  	     every (non-universal) pointer.  */
459 	  if (ase2->children && set1 != voidptr_set
460 	      && ase2->children->get (voidptr_set))
461 	    return true;
462 	}
463     }
464   return false;
465 }
466 
467 /* Return 1 if the two specified alias sets may conflict.  */
468 
469 int
alias_sets_conflict_p(alias_set_type set1,alias_set_type set2)470 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
471 {
472   alias_set_entry *ase1;
473   alias_set_entry *ase2;
474 
475   /* The easy case.  */
476   if (alias_sets_must_conflict_p (set1, set2))
477     return 1;
478 
479   /* See if the first alias set is a subset of the second.  */
480   ase1 = get_alias_set_entry (set1);
481   if (ase1 != 0
482       && ase1->children && ase1->children->get (set2))
483     {
484       ++alias_stats.num_dag;
485       return 1;
486     }
487 
488   /* Now do the same, but with the alias sets reversed.  */
489   ase2 = get_alias_set_entry (set2);
490   if (ase2 != 0
491       && ase2->children && ase2->children->get (set1))
492     {
493       ++alias_stats.num_dag;
494       return 1;
495     }
496 
497   /* We want void * to be compatible with any other pointer without
498      really dropping it to alias set 0. Doing so would make it
499      compatible with all non-pointer types too.
500 
501      This is not strictly necessary by the C/C++ language
502      standards, but avoids common type punning mistakes.  In
503      addition to that, we need the existence of such universal
504      pointer to implement Fortran's C_PTR type (which is defined as
505      type compatible with all C pointers).  */
506   if (ase1 && ase2 && ase1->has_pointer && ase2->has_pointer)
507     {
508       alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
509 
510       /* If one of the sets corresponds to universal pointer,
511  	 we consider it to conflict with anything that is
512 	 or contains pointer.  */
513       if (set1 == voidptr_set || set2 == voidptr_set)
514 	{
515 	  ++alias_stats.num_universal;
516 	  return true;
517 	}
518      /* If one of sets is (non-universal) pointer and the other
519  	contains universal pointer, we also get conflict.  */
520      if (ase1->is_pointer && set2 != voidptr_set
521 	 && ase2->children && ase2->children->get (voidptr_set))
522 	{
523 	  ++alias_stats.num_universal;
524 	  return true;
525 	}
526      if (ase2->is_pointer && set1 != voidptr_set
527 	 && ase1->children && ase1->children->get (voidptr_set))
528 	{
529 	  ++alias_stats.num_universal;
530 	  return true;
531 	}
532     }
533 
534   ++alias_stats.num_disambiguated;
535 
536   /* The two alias sets are distinct and neither one is the
537      child of the other.  Therefore, they cannot conflict.  */
538   return 0;
539 }
540 
541 /* Return 1 if the two specified alias sets will always conflict.  */
542 
543 int
alias_sets_must_conflict_p(alias_set_type set1,alias_set_type set2)544 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
545 {
546   /* Disable TBAA oracle with !flag_strict_aliasing.  */
547   if (!flag_strict_aliasing)
548     return 1;
549   if (set1 == 0 || set2 == 0)
550     {
551       ++alias_stats.num_alias_zero;
552       return 1;
553     }
554   if (set1 == set2)
555     {
556       ++alias_stats.num_same_alias_set;
557       return 1;
558     }
559 
560   return 0;
561 }
562 
563 /* Return 1 if any MEM object of type T1 will always conflict (using the
564    dependency routines in this file) with any MEM object of type T2.
565    This is used when allocating temporary storage.  If T1 and/or T2 are
566    NULL_TREE, it means we know nothing about the storage.  */
567 
568 int
objects_must_conflict_p(tree t1,tree t2)569 objects_must_conflict_p (tree t1, tree t2)
570 {
571   alias_set_type set1, set2;
572 
573   /* If neither has a type specified, we don't know if they'll conflict
574      because we may be using them to store objects of various types, for
575      example the argument and local variables areas of inlined functions.  */
576   if (t1 == 0 && t2 == 0)
577     return 0;
578 
579   /* If they are the same type, they must conflict.  */
580   if (t1 == t2)
581     {
582       ++alias_stats.num_same_objects;
583       return 1;
584     }
585   /* Likewise if both are volatile.  */
586   if (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2))
587     {
588       ++alias_stats.num_volatile;
589       return 1;
590     }
591 
592   set1 = t1 ? get_alias_set (t1) : 0;
593   set2 = t2 ? get_alias_set (t2) : 0;
594 
595   /* We can't use alias_sets_conflict_p because we must make sure
596      that every subtype of t1 will conflict with every subtype of
597      t2 for which a pair of subobjects of these respective subtypes
598      overlaps on the stack.  */
599   return alias_sets_must_conflict_p (set1, set2);
600 }
601 
602 /* Return the outermost parent of component present in the chain of
603    component references handled by get_inner_reference in T with the
604    following property:
605      - the component is non-addressable, or
606      - the parent has alias set zero,
607    or NULL_TREE if no such parent exists.  In the former cases, the alias
608    set of this parent is the alias set that must be used for T itself.  */
609 
610 tree
component_uses_parent_alias_set_from(const_tree t)611 component_uses_parent_alias_set_from (const_tree t)
612 {
613   const_tree found = NULL_TREE;
614 
615   while (handled_component_p (t))
616     {
617       switch (TREE_CODE (t))
618 	{
619 	case COMPONENT_REF:
620 	  if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
621 	    found = t;
622 	  break;
623 
624 	case ARRAY_REF:
625 	case ARRAY_RANGE_REF:
626 	  if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
627 	    found = t;
628 	  break;
629 
630 	case REALPART_EXPR:
631 	case IMAGPART_EXPR:
632 	  break;
633 
634 	case BIT_FIELD_REF:
635 	case VIEW_CONVERT_EXPR:
636 	  /* Bitfields and casts are never addressable.  */
637 	  found = t;
638 	  break;
639 
640 	default:
641 	  gcc_unreachable ();
642 	}
643 
644       if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
645 	found = t;
646 
647       t = TREE_OPERAND (t, 0);
648     }
649 
650   if (found)
651     return TREE_OPERAND (found, 0);
652 
653   return NULL_TREE;
654 }
655 
656 
657 /* Return whether the pointer-type T effective for aliasing may
658    access everything and thus the reference has to be assigned
659    alias-set zero.  */
660 
661 static bool
ref_all_alias_ptr_type_p(const_tree t)662 ref_all_alias_ptr_type_p (const_tree t)
663 {
664   return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
665 	  || TYPE_REF_CAN_ALIAS_ALL (t));
666 }
667 
668 /* Return the alias set for the memory pointed to by T, which may be
669    either a type or an expression.  Return -1 if there is nothing
670    special about dereferencing T.  */
671 
672 static alias_set_type
get_deref_alias_set_1(tree t)673 get_deref_alias_set_1 (tree t)
674 {
675   /* All we care about is the type.  */
676   if (! TYPE_P (t))
677     t = TREE_TYPE (t);
678 
679   /* If we have an INDIRECT_REF via a void pointer, we don't
680      know anything about what that might alias.  Likewise if the
681      pointer is marked that way.  */
682   if (ref_all_alias_ptr_type_p (t))
683     return 0;
684 
685   return -1;
686 }
687 
688 /* Return the alias set for the memory pointed to by T, which may be
689    either a type or an expression.  */
690 
691 alias_set_type
get_deref_alias_set(tree t)692 get_deref_alias_set (tree t)
693 {
694   /* If we're not doing any alias analysis, just assume everything
695      aliases everything else.  */
696   if (!flag_strict_aliasing)
697     return 0;
698 
699   alias_set_type set = get_deref_alias_set_1 (t);
700 
701   /* Fall back to the alias-set of the pointed-to type.  */
702   if (set == -1)
703     {
704       if (! TYPE_P (t))
705 	t = TREE_TYPE (t);
706       set = get_alias_set (TREE_TYPE (t));
707     }
708 
709   return set;
710 }
711 
712 /* Return the pointer-type relevant for TBAA purposes from the
713    memory reference tree *T or NULL_TREE in which case *T is
714    adjusted to point to the outermost component reference that
715    can be used for assigning an alias set.  */
716 
717 static tree
reference_alias_ptr_type_1(tree * t)718 reference_alias_ptr_type_1 (tree *t)
719 {
720   tree inner;
721 
722   /* Get the base object of the reference.  */
723   inner = *t;
724   while (handled_component_p (inner))
725     {
726       /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
727 	 the type of any component references that wrap it to
728 	 determine the alias-set.  */
729       if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
730 	*t = TREE_OPERAND (inner, 0);
731       inner = TREE_OPERAND (inner, 0);
732     }
733 
734   /* Handle pointer dereferences here, they can override the
735      alias-set.  */
736   if (INDIRECT_REF_P (inner)
737       && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
738     return TREE_TYPE (TREE_OPERAND (inner, 0));
739   else if (TREE_CODE (inner) == TARGET_MEM_REF)
740     return TREE_TYPE (TMR_OFFSET (inner));
741   else if (TREE_CODE (inner) == MEM_REF
742 	   && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
743     return TREE_TYPE (TREE_OPERAND (inner, 1));
744 
745   /* If the innermost reference is a MEM_REF that has a
746      conversion embedded treat it like a VIEW_CONVERT_EXPR above,
747      using the memory access type for determining the alias-set.  */
748   if (TREE_CODE (inner) == MEM_REF
749       && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
750 	  != TYPE_MAIN_VARIANT
751 	       (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
752     return TREE_TYPE (TREE_OPERAND (inner, 1));
753 
754   /* Otherwise, pick up the outermost object that we could have
755      a pointer to.  */
756   tree tem = component_uses_parent_alias_set_from (*t);
757   if (tem)
758     *t = tem;
759 
760   return NULL_TREE;
761 }
762 
763 /* Return the pointer-type relevant for TBAA purposes from the
764    gimple memory reference tree T.  This is the type to be used for
765    the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
766    and guarantees that get_alias_set will return the same alias
767    set for T and the replacement.  */
768 
769 tree
reference_alias_ptr_type(tree t)770 reference_alias_ptr_type (tree t)
771 {
772   /* If the frontend assigns this alias-set zero, preserve that.  */
773   if (lang_hooks.get_alias_set (t) == 0)
774     return ptr_type_node;
775 
776   tree ptype = reference_alias_ptr_type_1 (&t);
777   /* If there is a given pointer type for aliasing purposes, return it.  */
778   if (ptype != NULL_TREE)
779     return ptype;
780 
781   /* Otherwise build one from the outermost component reference we
782      may use.  */
783   if (TREE_CODE (t) == MEM_REF
784       || TREE_CODE (t) == TARGET_MEM_REF)
785     return TREE_TYPE (TREE_OPERAND (t, 1));
786   else
787     return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
788 }
789 
790 /* Return whether the pointer-types T1 and T2 used to determine
791    two alias sets of two references will yield the same answer
792    from get_deref_alias_set.  */
793 
794 bool
alias_ptr_types_compatible_p(tree t1,tree t2)795 alias_ptr_types_compatible_p (tree t1, tree t2)
796 {
797   if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
798     return true;
799 
800   if (ref_all_alias_ptr_type_p (t1)
801       || ref_all_alias_ptr_type_p (t2))
802     return false;
803 
804   return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
805 	  == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
806 }
807 
808 /* Create emptry alias set entry.  */
809 
810 alias_set_entry *
init_alias_set_entry(alias_set_type set)811 init_alias_set_entry (alias_set_type set)
812 {
813   alias_set_entry *ase = ggc_alloc<alias_set_entry> ();
814   ase->alias_set = set;
815   ase->children = NULL;
816   ase->has_zero_child = false;
817   ase->is_pointer = false;
818   ase->has_pointer = false;
819   gcc_checking_assert (!get_alias_set_entry (set));
820   (*alias_sets)[set] = ase;
821   return ase;
822 }
823 
824 /* Return the alias set for T, which may be either a type or an
825    expression.  Call language-specific routine for help, if needed.  */
826 
827 alias_set_type
get_alias_set(tree t)828 get_alias_set (tree t)
829 {
830   alias_set_type set;
831 
832   /* We can not give up with -fno-strict-aliasing because we need to build
833      proper type representation for possible functions which are build with
834      -fstrict-aliasing.  */
835 
836   /* return 0 if this or its type is an error.  */
837   if (t == error_mark_node
838       || (! TYPE_P (t)
839 	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
840     return 0;
841 
842   /* We can be passed either an expression or a type.  This and the
843      language-specific routine may make mutually-recursive calls to each other
844      to figure out what to do.  At each juncture, we see if this is a tree
845      that the language may need to handle specially.  First handle things that
846      aren't types.  */
847   if (! TYPE_P (t))
848     {
849       /* Give the language a chance to do something with this tree
850 	 before we look at it.  */
851       STRIP_NOPS (t);
852       set = lang_hooks.get_alias_set (t);
853       if (set != -1)
854 	return set;
855 
856       /* Get the alias pointer-type to use or the outermost object
857          that we could have a pointer to.  */
858       tree ptype = reference_alias_ptr_type_1 (&t);
859       if (ptype != NULL)
860 	return get_deref_alias_set (ptype);
861 
862       /* If we've already determined the alias set for a decl, just return
863 	 it.  This is necessary for C++ anonymous unions, whose component
864 	 variables don't look like union members (boo!).  */
865       if (TREE_CODE (t) == VAR_DECL
866 	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
867 	return MEM_ALIAS_SET (DECL_RTL (t));
868 
869       /* Now all we care about is the type.  */
870       t = TREE_TYPE (t);
871     }
872 
873   /* Variant qualifiers don't affect the alias set, so get the main
874      variant.  */
875   t = TYPE_MAIN_VARIANT (t);
876 
877   /* Always use the canonical type as well.  If this is a type that
878      requires structural comparisons to identify compatible types
879      use alias set zero.  */
880   if (TYPE_STRUCTURAL_EQUALITY_P (t))
881     {
882       /* Allow the language to specify another alias set for this
883 	 type.  */
884       set = lang_hooks.get_alias_set (t);
885       if (set != -1)
886 	return set;
887       /* Handle structure type equality for pointer types, arrays and vectors.
888 	 This is easy to do, because the code bellow ignore canonical types on
889 	 these anyway.  This is important for LTO, where TYPE_CANONICAL for
890 	 pointers can not be meaningfuly computed by the frotnend.  */
891       if (canonical_type_used_p (t))
892 	{
893 	  /* In LTO we set canonical types for all types where it makes
894 	     sense to do so.  Double check we did not miss some type.  */
895 	  gcc_checking_assert (!in_lto_p || !type_with_alias_set_p (t));
896           return 0;
897 	}
898     }
899   else
900     {
901       t = TYPE_CANONICAL (t);
902       gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
903     }
904 
905   /* If this is a type with a known alias set, return it.  */
906   gcc_checking_assert (t == TYPE_MAIN_VARIANT (t));
907   if (TYPE_ALIAS_SET_KNOWN_P (t))
908     return TYPE_ALIAS_SET (t);
909 
910   /* We don't want to set TYPE_ALIAS_SET for incomplete types.  */
911   if (!COMPLETE_TYPE_P (t))
912     {
913       /* For arrays with unknown size the conservative answer is the
914 	 alias set of the element type.  */
915       if (TREE_CODE (t) == ARRAY_TYPE)
916 	return get_alias_set (TREE_TYPE (t));
917 
918       /* But return zero as a conservative answer for incomplete types.  */
919       return 0;
920     }
921 
922   /* See if the language has special handling for this type.  */
923   set = lang_hooks.get_alias_set (t);
924   if (set != -1)
925     return set;
926 
927   /* There are no objects of FUNCTION_TYPE, so there's no point in
928      using up an alias set for them.  (There are, of course, pointers
929      and references to functions, but that's different.)  */
930   else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
931     set = 0;
932 
933   /* Unless the language specifies otherwise, let vector types alias
934      their components.  This avoids some nasty type punning issues in
935      normal usage.  And indeed lets vectors be treated more like an
936      array slice.  */
937   else if (TREE_CODE (t) == VECTOR_TYPE)
938     set = get_alias_set (TREE_TYPE (t));
939 
940   /* Unless the language specifies otherwise, treat array types the
941      same as their components.  This avoids the asymmetry we get
942      through recording the components.  Consider accessing a
943      character(kind=1) through a reference to a character(kind=1)[1:1].
944      Or consider if we want to assign integer(kind=4)[0:D.1387] and
945      integer(kind=4)[4] the same alias set or not.
946      Just be pragmatic here and make sure the array and its element
947      type get the same alias set assigned.  */
948   else if (TREE_CODE (t) == ARRAY_TYPE
949 	   && (!TYPE_NONALIASED_COMPONENT (t)
950 	       || TYPE_STRUCTURAL_EQUALITY_P (t)))
951     set = get_alias_set (TREE_TYPE (t));
952 
953   /* From the former common C and C++ langhook implementation:
954 
955      Unfortunately, there is no canonical form of a pointer type.
956      In particular, if we have `typedef int I', then `int *', and
957      `I *' are different types.  So, we have to pick a canonical
958      representative.  We do this below.
959 
960      Technically, this approach is actually more conservative that
961      it needs to be.  In particular, `const int *' and `int *'
962      should be in different alias sets, according to the C and C++
963      standard, since their types are not the same, and so,
964      technically, an `int **' and `const int **' cannot point at
965      the same thing.
966 
967      But, the standard is wrong.  In particular, this code is
968      legal C++:
969 
970      int *ip;
971      int **ipp = &ip;
972      const int* const* cipp = ipp;
973      And, it doesn't make sense for that to be legal unless you
974      can dereference IPP and CIPP.  So, we ignore cv-qualifiers on
975      the pointed-to types.  This issue has been reported to the
976      C++ committee.
977 
978      For this reason go to canonical type of the unqalified pointer type.
979      Until GCC 6 this code set all pointers sets to have alias set of
980      ptr_type_node but that is a bad idea, because it prevents disabiguations
981      in between pointers.  For Firefox this accounts about 20% of all
982      disambiguations in the program.  */
983   else if (POINTER_TYPE_P (t) && t != ptr_type_node)
984     {
985       tree p;
986       auto_vec <bool, 8> reference;
987 
988       /* Unnest all pointers and references.
989 	 We also want to make pointer to array/vector equivalent to pointer to
990 	 its element (see the reasoning above). Skip all those types, too.  */
991       for (p = t; POINTER_TYPE_P (p)
992 	   || (TREE_CODE (p) == ARRAY_TYPE
993 	       && (!TYPE_NONALIASED_COMPONENT (p)
994 		   || !COMPLETE_TYPE_P (p)
995 		   || TYPE_STRUCTURAL_EQUALITY_P (p)))
996 	   || TREE_CODE (p) == VECTOR_TYPE;
997 	   p = TREE_TYPE (p))
998 	{
999 	  /* Ada supports recusive pointers.  Instead of doing recrusion check
1000 	     just give up once the preallocated space of 8 elements is up.
1001 	     In this case just punt to void * alias set.  */
1002 	  if (reference.length () == 8)
1003 	    {
1004 	      p = ptr_type_node;
1005 	      break;
1006 	    }
1007 	  if (TREE_CODE (p) == REFERENCE_TYPE)
1008 	    /* In LTO we want languages that use references to be compatible
1009  	       with languages that use pointers.  */
1010 	    reference.safe_push (true && !in_lto_p);
1011 	  if (TREE_CODE (p) == POINTER_TYPE)
1012 	    reference.safe_push (false);
1013 	}
1014       p = TYPE_MAIN_VARIANT (p);
1015 
1016       /* Make void * compatible with char * and also void **.
1017 	 Programs are commonly violating TBAA by this.
1018 
1019 	 We also make void * to conflict with every pointer
1020 	 (see record_component_aliases) and thus it is safe it to use it for
1021 	 pointers to types with TYPE_STRUCTURAL_EQUALITY_P.  */
1022       if (TREE_CODE (p) == VOID_TYPE || TYPE_STRUCTURAL_EQUALITY_P (p))
1023 	set = get_alias_set (ptr_type_node);
1024       else
1025 	{
1026 	  /* Rebuild pointer type starting from canonical types using
1027 	     unqualified pointers and references only.  This way all such
1028 	     pointers will have the same alias set and will conflict with
1029 	     each other.
1030 
1031 	     Most of time we already have pointers or references of a given type.
1032 	     If not we build new one just to be sure that if someone later
1033 	     (probably only middle-end can, as we should assign all alias
1034 	     classes only after finishing translation unit) builds the pointer
1035 	     type, the canonical type will match.  */
1036 	  p = TYPE_CANONICAL (p);
1037 	  while (!reference.is_empty ())
1038 	    {
1039 	      if (reference.pop ())
1040 		p = build_reference_type (p);
1041 	      else
1042 		p = build_pointer_type (p);
1043 	      gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1044 	      /* build_pointer_type should always return the canonical type.
1045 		 For LTO TYPE_CANOINCAL may be NULL, because we do not compute
1046 		 them.  Be sure that frontends do not glob canonical types of
1047 		 pointers in unexpected way and that p == TYPE_CANONICAL (p)
1048 		 in all other cases.  */
1049 	      gcc_checking_assert (!TYPE_CANONICAL (p)
1050 				   || p == TYPE_CANONICAL (p));
1051 	    }
1052 
1053 	  /* Assign the alias set to both p and t.
1054 	     We can not call get_alias_set (p) here as that would trigger
1055 	     infinite recursion when p == t.  In other cases it would just
1056 	     trigger unnecesary legwork of rebuilding the pointer again.  */
1057 	  gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1058 	  if (TYPE_ALIAS_SET_KNOWN_P (p))
1059 	    set = TYPE_ALIAS_SET (p);
1060 	  else
1061 	    {
1062 	      set = new_alias_set ();
1063 	      TYPE_ALIAS_SET (p) = set;
1064 	    }
1065 	}
1066     }
1067   /* Alias set of ptr_type_node is special and serve as universal pointer which
1068      is TBAA compatible with every other pointer type.  Be sure we have the
1069      alias set built even for LTO which otherwise keeps all TYPE_CANONICAL
1070      of pointer types NULL.  */
1071   else if (t == ptr_type_node)
1072     set = new_alias_set ();
1073 
1074   /* Otherwise make a new alias set for this type.  */
1075   else
1076     {
1077       /* Each canonical type gets its own alias set, so canonical types
1078 	 shouldn't form a tree.  It doesn't really matter for types
1079 	 we handle specially above, so only check it where it possibly
1080 	 would result in a bogus alias set.  */
1081       gcc_checking_assert (TYPE_CANONICAL (t) == t);
1082 
1083       set = new_alias_set ();
1084     }
1085 
1086   TYPE_ALIAS_SET (t) = set;
1087 
1088   /* If this is an aggregate type or a complex type, we must record any
1089      component aliasing information.  */
1090   if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
1091     record_component_aliases (t);
1092 
1093   /* We treat pointer types specially in alias_set_subset_of.  */
1094   if (POINTER_TYPE_P (t) && set)
1095     {
1096       alias_set_entry *ase = get_alias_set_entry (set);
1097       if (!ase)
1098 	ase = init_alias_set_entry (set);
1099       ase->is_pointer = true;
1100       ase->has_pointer = true;
1101     }
1102 
1103   return set;
1104 }
1105 
1106 /* Return a brand-new alias set.  */
1107 
1108 alias_set_type
new_alias_set(void)1109 new_alias_set (void)
1110 {
1111   if (alias_sets == 0)
1112     vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1113   vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1114   return alias_sets->length () - 1;
1115 }
1116 
1117 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
1118    not everything that aliases SUPERSET also aliases SUBSET.  For example,
1119    in C, a store to an `int' can alias a load of a structure containing an
1120    `int', and vice versa.  But it can't alias a load of a 'double' member
1121    of the same structure.  Here, the structure would be the SUPERSET and
1122    `int' the SUBSET.  This relationship is also described in the comment at
1123    the beginning of this file.
1124 
1125    This function should be called only once per SUPERSET/SUBSET pair.
1126 
1127    It is illegal for SUPERSET to be zero; everything is implicitly a
1128    subset of alias set zero.  */
1129 
1130 void
record_alias_subset(alias_set_type superset,alias_set_type subset)1131 record_alias_subset (alias_set_type superset, alias_set_type subset)
1132 {
1133   alias_set_entry *superset_entry;
1134   alias_set_entry *subset_entry;
1135 
1136   /* It is possible in complex type situations for both sets to be the same,
1137      in which case we can ignore this operation.  */
1138   if (superset == subset)
1139     return;
1140 
1141   gcc_assert (superset);
1142 
1143   superset_entry = get_alias_set_entry (superset);
1144   if (superset_entry == 0)
1145     {
1146       /* Create an entry for the SUPERSET, so that we have a place to
1147 	 attach the SUBSET.  */
1148       superset_entry = init_alias_set_entry (superset);
1149     }
1150 
1151   if (subset == 0)
1152     superset_entry->has_zero_child = 1;
1153   else
1154     {
1155       subset_entry = get_alias_set_entry (subset);
1156       if (!superset_entry->children)
1157 	superset_entry->children
1158 	  = hash_map<alias_set_hash, int>::create_ggc (64);
1159       /* If there is an entry for the subset, enter all of its children
1160 	 (if they are not already present) as children of the SUPERSET.  */
1161       if (subset_entry)
1162 	{
1163 	  if (subset_entry->has_zero_child)
1164 	    superset_entry->has_zero_child = true;
1165           if (subset_entry->has_pointer)
1166 	    superset_entry->has_pointer = true;
1167 
1168 	  if (subset_entry->children)
1169 	    {
1170 	      hash_map<alias_set_hash, int>::iterator iter
1171 		= subset_entry->children->begin ();
1172 	      for (; iter != subset_entry->children->end (); ++iter)
1173 		superset_entry->children->put ((*iter).first, (*iter).second);
1174 	    }
1175 	}
1176 
1177       /* Enter the SUBSET itself as a child of the SUPERSET.  */
1178       superset_entry->children->put (subset, 0);
1179     }
1180 }
1181 
1182 /* Record that component types of TYPE, if any, are part of that type for
1183    aliasing purposes.  For record types, we only record component types
1184    for fields that are not marked non-addressable.  For array types, we
1185    only record the component type if it is not marked non-aliased.  */
1186 
1187 void
record_component_aliases(tree type)1188 record_component_aliases (tree type)
1189 {
1190   alias_set_type superset = get_alias_set (type);
1191   tree field;
1192 
1193   if (superset == 0)
1194     return;
1195 
1196   switch (TREE_CODE (type))
1197     {
1198     case RECORD_TYPE:
1199     case UNION_TYPE:
1200     case QUAL_UNION_TYPE:
1201       for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1202 	if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1203 	  {
1204 	    /* LTO type merging does not make any difference between
1205 	       component pointer types.  We may have
1206 
1207 	       struct foo {int *a;};
1208 
1209 	       as TYPE_CANONICAL of
1210 
1211 	       struct bar {float *a;};
1212 
1213 	       Because accesses to int * and float * do not alias, we would get
1214 	       false negative when accessing the same memory location by
1215 	       float ** and bar *. We thus record the canonical type as:
1216 
1217 	       struct {void *a;};
1218 
1219 	       void * is special cased and works as a universal pointer type.
1220 	       Accesses to it conflicts with accesses to any other pointer
1221 	       type.  */
1222 	    tree t = TREE_TYPE (field);
1223 	    if (in_lto_p)
1224 	      {
1225 		/* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1226 		   element type and that type has to be normalized to void *,
1227 		   too, in the case it is a pointer. */
1228 		while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t))
1229 		  {
1230 		    gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t));
1231 		    t = TREE_TYPE (t);
1232 		  }
1233 		if (POINTER_TYPE_P (t))
1234 		  t = ptr_type_node;
1235 		else if (flag_checking)
1236 		  gcc_checking_assert (get_alias_set (t)
1237 				       == get_alias_set (TREE_TYPE (field)));
1238 	      }
1239 
1240 	    record_alias_subset (superset, get_alias_set (t));
1241 	  }
1242       break;
1243 
1244     case COMPLEX_TYPE:
1245       record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1246       break;
1247 
1248     /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1249        element type.  */
1250 
1251     default:
1252       break;
1253     }
1254 }
1255 
1256 /* Allocate an alias set for use in storing and reading from the varargs
1257    spill area.  */
1258 
1259 static GTY(()) alias_set_type varargs_set = -1;
1260 
1261 alias_set_type
get_varargs_alias_set(void)1262 get_varargs_alias_set (void)
1263 {
1264 #if 1
1265   /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1266      varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1267      consistently use the varargs alias set for loads from the varargs
1268      area.  So don't use it anywhere.  */
1269   return 0;
1270 #else
1271   if (varargs_set == -1)
1272     varargs_set = new_alias_set ();
1273 
1274   return varargs_set;
1275 #endif
1276 }
1277 
1278 /* Likewise, but used for the fixed portions of the frame, e.g., register
1279    save areas.  */
1280 
1281 static GTY(()) alias_set_type frame_set = -1;
1282 
1283 alias_set_type
get_frame_alias_set(void)1284 get_frame_alias_set (void)
1285 {
1286   if (frame_set == -1)
1287     frame_set = new_alias_set ();
1288 
1289   return frame_set;
1290 }
1291 
1292 /* Create a new, unique base with id ID.  */
1293 
1294 static rtx
unique_base_value(HOST_WIDE_INT id)1295 unique_base_value (HOST_WIDE_INT id)
1296 {
1297   return gen_rtx_ADDRESS (Pmode, id);
1298 }
1299 
1300 /* Return true if accesses based on any other base value cannot alias
1301    those based on X.  */
1302 
1303 static bool
unique_base_value_p(rtx x)1304 unique_base_value_p (rtx x)
1305 {
1306   return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1307 }
1308 
1309 /* Return true if X is known to be a base value.  */
1310 
1311 static bool
known_base_value_p(rtx x)1312 known_base_value_p (rtx x)
1313 {
1314   switch (GET_CODE (x))
1315     {
1316     case LABEL_REF:
1317     case SYMBOL_REF:
1318       return true;
1319 
1320     case ADDRESS:
1321       /* Arguments may or may not be bases; we don't know for sure.  */
1322       return GET_MODE (x) != VOIDmode;
1323 
1324     default:
1325       return false;
1326     }
1327 }
1328 
1329 /* Inside SRC, the source of a SET, find a base address.  */
1330 
1331 static rtx
find_base_value(rtx src)1332 find_base_value (rtx src)
1333 {
1334   unsigned int regno;
1335 
1336 #if defined (FIND_BASE_TERM)
1337   /* Try machine-dependent ways to find the base term.  */
1338   src = FIND_BASE_TERM (src);
1339 #endif
1340 
1341   switch (GET_CODE (src))
1342     {
1343     case SYMBOL_REF:
1344     case LABEL_REF:
1345       return src;
1346 
1347     case REG:
1348       regno = REGNO (src);
1349       /* At the start of a function, argument registers have known base
1350 	 values which may be lost later.  Returning an ADDRESS
1351 	 expression here allows optimization based on argument values
1352 	 even when the argument registers are used for other purposes.  */
1353       if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1354 	return new_reg_base_value[regno];
1355 
1356       /* If a pseudo has a known base value, return it.  Do not do this
1357 	 for non-fixed hard regs since it can result in a circular
1358 	 dependency chain for registers which have values at function entry.
1359 
1360 	 The test above is not sufficient because the scheduler may move
1361 	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
1362       if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1363 	  && regno < vec_safe_length (reg_base_value))
1364 	{
1365 	  /* If we're inside init_alias_analysis, use new_reg_base_value
1366 	     to reduce the number of relaxation iterations.  */
1367 	  if (new_reg_base_value && new_reg_base_value[regno]
1368 	      && DF_REG_DEF_COUNT (regno) == 1)
1369 	    return new_reg_base_value[regno];
1370 
1371 	  if ((*reg_base_value)[regno])
1372 	    return (*reg_base_value)[regno];
1373 	}
1374 
1375       return 0;
1376 
1377     case MEM:
1378       /* Check for an argument passed in memory.  Only record in the
1379 	 copying-arguments block; it is too hard to track changes
1380 	 otherwise.  */
1381       if (copying_arguments
1382 	  && (XEXP (src, 0) == arg_pointer_rtx
1383 	      || (GET_CODE (XEXP (src, 0)) == PLUS
1384 		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1385 	return arg_base_value;
1386       return 0;
1387 
1388     case CONST:
1389       src = XEXP (src, 0);
1390       if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1391 	break;
1392 
1393       /* ... fall through ...  */
1394 
1395     case PLUS:
1396     case MINUS:
1397       {
1398 	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1399 
1400 	/* If either operand is a REG that is a known pointer, then it
1401 	   is the base.  */
1402 	if (REG_P (src_0) && REG_POINTER (src_0))
1403 	  return find_base_value (src_0);
1404 	if (REG_P (src_1) && REG_POINTER (src_1))
1405 	  return find_base_value (src_1);
1406 
1407 	/* If either operand is a REG, then see if we already have
1408 	   a known value for it.  */
1409 	if (REG_P (src_0))
1410 	  {
1411 	    temp = find_base_value (src_0);
1412 	    if (temp != 0)
1413 	      src_0 = temp;
1414 	  }
1415 
1416 	if (REG_P (src_1))
1417 	  {
1418 	    temp = find_base_value (src_1);
1419 	    if (temp!= 0)
1420 	      src_1 = temp;
1421 	  }
1422 
1423 	/* If either base is named object or a special address
1424 	   (like an argument or stack reference), then use it for the
1425 	   base term.  */
1426 	if (src_0 != 0 && known_base_value_p (src_0))
1427 	  return src_0;
1428 
1429 	if (src_1 != 0 && known_base_value_p (src_1))
1430 	  return src_1;
1431 
1432 	/* Guess which operand is the base address:
1433 	   If either operand is a symbol, then it is the base.  If
1434 	   either operand is a CONST_INT, then the other is the base.  */
1435 	if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1436 	  return find_base_value (src_0);
1437 	else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1438 	  return find_base_value (src_1);
1439 
1440 	return 0;
1441       }
1442 
1443     case LO_SUM:
1444       /* The standard form is (lo_sum reg sym) so look only at the
1445 	 second operand.  */
1446       return find_base_value (XEXP (src, 1));
1447 
1448     case AND:
1449       /* If the second operand is constant set the base
1450 	 address to the first operand.  */
1451       if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1452 	return find_base_value (XEXP (src, 0));
1453       return 0;
1454 
1455     case TRUNCATE:
1456       /* As we do not know which address space the pointer is referring to, we can
1457 	 handle this only if the target does not support different pointer or
1458 	 address modes depending on the address space.  */
1459       if (!target_default_pointer_address_modes_p ())
1460 	break;
1461       if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1462 	break;
1463       /* Fall through.  */
1464     case HIGH:
1465     case PRE_INC:
1466     case PRE_DEC:
1467     case POST_INC:
1468     case POST_DEC:
1469     case PRE_MODIFY:
1470     case POST_MODIFY:
1471       return find_base_value (XEXP (src, 0));
1472 
1473     case ZERO_EXTEND:
1474     case SIGN_EXTEND:	/* used for NT/Alpha pointers */
1475       /* As we do not know which address space the pointer is referring to, we can
1476 	 handle this only if the target does not support different pointer or
1477 	 address modes depending on the address space.  */
1478       if (!target_default_pointer_address_modes_p ())
1479 	break;
1480 
1481       {
1482 	rtx temp = find_base_value (XEXP (src, 0));
1483 
1484 	if (temp != 0 && CONSTANT_P (temp))
1485 	  temp = convert_memory_address (Pmode, temp);
1486 
1487 	return temp;
1488       }
1489 
1490     default:
1491       break;
1492     }
1493 
1494   return 0;
1495 }
1496 
1497 /* Called from init_alias_analysis indirectly through note_stores,
1498    or directly if DEST is a register with a REG_NOALIAS note attached.
1499    SET is null in the latter case.  */
1500 
1501 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1502    register N has been set in this function.  */
1503 static sbitmap reg_seen;
1504 
1505 static void
record_set(rtx dest,const_rtx set,void * data ATTRIBUTE_UNUSED)1506 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1507 {
1508   unsigned regno;
1509   rtx src;
1510   int n;
1511 
1512   if (!REG_P (dest))
1513     return;
1514 
1515   regno = REGNO (dest);
1516 
1517   gcc_checking_assert (regno < reg_base_value->length ());
1518 
1519   n = REG_NREGS (dest);
1520   if (n != 1)
1521     {
1522       while (--n >= 0)
1523 	{
1524 	  bitmap_set_bit (reg_seen, regno + n);
1525 	  new_reg_base_value[regno + n] = 0;
1526 	}
1527       return;
1528     }
1529 
1530   if (set)
1531     {
1532       /* A CLOBBER wipes out any old value but does not prevent a previously
1533 	 unset register from acquiring a base address (i.e. reg_seen is not
1534 	 set).  */
1535       if (GET_CODE (set) == CLOBBER)
1536 	{
1537 	  new_reg_base_value[regno] = 0;
1538 	  return;
1539 	}
1540       src = SET_SRC (set);
1541     }
1542   else
1543     {
1544       /* There's a REG_NOALIAS note against DEST.  */
1545       if (bitmap_bit_p (reg_seen, regno))
1546 	{
1547 	  new_reg_base_value[regno] = 0;
1548 	  return;
1549 	}
1550       bitmap_set_bit (reg_seen, regno);
1551       new_reg_base_value[regno] = unique_base_value (unique_id++);
1552       return;
1553     }
1554 
1555   /* If this is not the first set of REGNO, see whether the new value
1556      is related to the old one.  There are two cases of interest:
1557 
1558 	(1) The register might be assigned an entirely new value
1559 	    that has the same base term as the original set.
1560 
1561 	(2) The set might be a simple self-modification that
1562 	    cannot change REGNO's base value.
1563 
1564      If neither case holds, reject the original base value as invalid.
1565      Note that the following situation is not detected:
1566 
1567 	 extern int x, y;  int *p = &x; p += (&y-&x);
1568 
1569      ANSI C does not allow computing the difference of addresses
1570      of distinct top level objects.  */
1571   if (new_reg_base_value[regno] != 0
1572       && find_base_value (src) != new_reg_base_value[regno])
1573     switch (GET_CODE (src))
1574       {
1575       case LO_SUM:
1576       case MINUS:
1577 	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1578 	  new_reg_base_value[regno] = 0;
1579 	break;
1580       case PLUS:
1581 	/* If the value we add in the PLUS is also a valid base value,
1582 	   this might be the actual base value, and the original value
1583 	   an index.  */
1584 	{
1585 	  rtx other = NULL_RTX;
1586 
1587 	  if (XEXP (src, 0) == dest)
1588 	    other = XEXP (src, 1);
1589 	  else if (XEXP (src, 1) == dest)
1590 	    other = XEXP (src, 0);
1591 
1592 	  if (! other || find_base_value (other))
1593 	    new_reg_base_value[regno] = 0;
1594 	  break;
1595 	}
1596       case AND:
1597 	if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1598 	  new_reg_base_value[regno] = 0;
1599 	break;
1600       default:
1601 	new_reg_base_value[regno] = 0;
1602 	break;
1603       }
1604   /* If this is the first set of a register, record the value.  */
1605   else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1606 	   && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1607     new_reg_base_value[regno] = find_base_value (src);
1608 
1609   bitmap_set_bit (reg_seen, regno);
1610 }
1611 
1612 /* Return REG_BASE_VALUE for REGNO.  Selective scheduler uses this to avoid
1613    using hard registers with non-null REG_BASE_VALUE for renaming.  */
1614 rtx
get_reg_base_value(unsigned int regno)1615 get_reg_base_value (unsigned int regno)
1616 {
1617   return (*reg_base_value)[regno];
1618 }
1619 
1620 /* If a value is known for REGNO, return it.  */
1621 
1622 rtx
get_reg_known_value(unsigned int regno)1623 get_reg_known_value (unsigned int regno)
1624 {
1625   if (regno >= FIRST_PSEUDO_REGISTER)
1626     {
1627       regno -= FIRST_PSEUDO_REGISTER;
1628       if (regno < vec_safe_length (reg_known_value))
1629 	return (*reg_known_value)[regno];
1630     }
1631   return NULL;
1632 }
1633 
1634 /* Set it.  */
1635 
1636 static void
set_reg_known_value(unsigned int regno,rtx val)1637 set_reg_known_value (unsigned int regno, rtx val)
1638 {
1639   if (regno >= FIRST_PSEUDO_REGISTER)
1640     {
1641       regno -= FIRST_PSEUDO_REGISTER;
1642       if (regno < vec_safe_length (reg_known_value))
1643 	(*reg_known_value)[regno] = val;
1644     }
1645 }
1646 
1647 /* Similarly for reg_known_equiv_p.  */
1648 
1649 bool
get_reg_known_equiv_p(unsigned int regno)1650 get_reg_known_equiv_p (unsigned int regno)
1651 {
1652   if (regno >= FIRST_PSEUDO_REGISTER)
1653     {
1654       regno -= FIRST_PSEUDO_REGISTER;
1655       if (regno < vec_safe_length (reg_known_value))
1656 	return bitmap_bit_p (reg_known_equiv_p, regno);
1657     }
1658   return false;
1659 }
1660 
1661 static void
set_reg_known_equiv_p(unsigned int regno,bool val)1662 set_reg_known_equiv_p (unsigned int regno, bool val)
1663 {
1664   if (regno >= FIRST_PSEUDO_REGISTER)
1665     {
1666       regno -= FIRST_PSEUDO_REGISTER;
1667       if (regno < vec_safe_length (reg_known_value))
1668 	{
1669 	  if (val)
1670 	    bitmap_set_bit (reg_known_equiv_p, regno);
1671 	  else
1672 	    bitmap_clear_bit (reg_known_equiv_p, regno);
1673 	}
1674     }
1675 }
1676 
1677 
1678 /* Returns a canonical version of X, from the point of view alias
1679    analysis.  (For example, if X is a MEM whose address is a register,
1680    and the register has a known value (say a SYMBOL_REF), then a MEM
1681    whose address is the SYMBOL_REF is returned.)  */
1682 
1683 rtx
canon_rtx(rtx x)1684 canon_rtx (rtx x)
1685 {
1686   /* Recursively look for equivalences.  */
1687   if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1688     {
1689       rtx t = get_reg_known_value (REGNO (x));
1690       if (t == x)
1691 	return x;
1692       if (t)
1693 	return canon_rtx (t);
1694     }
1695 
1696   if (GET_CODE (x) == PLUS)
1697     {
1698       rtx x0 = canon_rtx (XEXP (x, 0));
1699       rtx x1 = canon_rtx (XEXP (x, 1));
1700 
1701       if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1702 	{
1703 	  if (CONST_INT_P (x0))
1704 	    return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1705 	  else if (CONST_INT_P (x1))
1706 	    return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1707 	  return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1708 	}
1709     }
1710 
1711   /* This gives us much better alias analysis when called from
1712      the loop optimizer.   Note we want to leave the original
1713      MEM alone, but need to return the canonicalized MEM with
1714      all the flags with their original values.  */
1715   else if (MEM_P (x))
1716     x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1717 
1718   return x;
1719 }
1720 
1721 /* Return 1 if X and Y are identical-looking rtx's.
1722    Expect that X and Y has been already canonicalized.
1723 
1724    We use the data in reg_known_value above to see if two registers with
1725    different numbers are, in fact, equivalent.  */
1726 
1727 static int
rtx_equal_for_memref_p(const_rtx x,const_rtx y)1728 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1729 {
1730   int i;
1731   int j;
1732   enum rtx_code code;
1733   const char *fmt;
1734 
1735   if (x == 0 && y == 0)
1736     return 1;
1737   if (x == 0 || y == 0)
1738     return 0;
1739 
1740   if (x == y)
1741     return 1;
1742 
1743   code = GET_CODE (x);
1744   /* Rtx's of different codes cannot be equal.  */
1745   if (code != GET_CODE (y))
1746     return 0;
1747 
1748   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1749      (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1750 
1751   if (GET_MODE (x) != GET_MODE (y))
1752     return 0;
1753 
1754   /* Some RTL can be compared without a recursive examination.  */
1755   switch (code)
1756     {
1757     case REG:
1758       return REGNO (x) == REGNO (y);
1759 
1760     case LABEL_REF:
1761       return LABEL_REF_LABEL (x) == LABEL_REF_LABEL (y);
1762 
1763     case SYMBOL_REF:
1764       return compare_base_symbol_refs (x, y) == 1;
1765 
1766     case ENTRY_VALUE:
1767       /* This is magic, don't go through canonicalization et al.  */
1768       return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1769 
1770     case VALUE:
1771     CASE_CONST_UNIQUE:
1772       /* Pointer equality guarantees equality for these nodes.  */
1773       return 0;
1774 
1775     default:
1776       break;
1777     }
1778 
1779   /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1780   if (code == PLUS)
1781     return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1782 	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1783 	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1784 		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1785   /* For commutative operations, the RTX match if the operand match in any
1786      order.  Also handle the simple binary and unary cases without a loop.  */
1787   if (COMMUTATIVE_P (x))
1788     {
1789       rtx xop0 = canon_rtx (XEXP (x, 0));
1790       rtx yop0 = canon_rtx (XEXP (y, 0));
1791       rtx yop1 = canon_rtx (XEXP (y, 1));
1792 
1793       return ((rtx_equal_for_memref_p (xop0, yop0)
1794 	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1795 	      || (rtx_equal_for_memref_p (xop0, yop1)
1796 		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1797     }
1798   else if (NON_COMMUTATIVE_P (x))
1799     {
1800       return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1801 				      canon_rtx (XEXP (y, 0)))
1802 	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1803 					 canon_rtx (XEXP (y, 1))));
1804     }
1805   else if (UNARY_P (x))
1806     return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1807 				   canon_rtx (XEXP (y, 0)));
1808 
1809   /* Compare the elements.  If any pair of corresponding elements
1810      fail to match, return 0 for the whole things.
1811 
1812      Limit cases to types which actually appear in addresses.  */
1813 
1814   fmt = GET_RTX_FORMAT (code);
1815   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1816     {
1817       switch (fmt[i])
1818 	{
1819 	case 'i':
1820 	  if (XINT (x, i) != XINT (y, i))
1821 	    return 0;
1822 	  break;
1823 
1824 	case 'E':
1825 	  /* Two vectors must have the same length.  */
1826 	  if (XVECLEN (x, i) != XVECLEN (y, i))
1827 	    return 0;
1828 
1829 	  /* And the corresponding elements must match.  */
1830 	  for (j = 0; j < XVECLEN (x, i); j++)
1831 	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1832 					canon_rtx (XVECEXP (y, i, j))) == 0)
1833 	      return 0;
1834 	  break;
1835 
1836 	case 'e':
1837 	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1838 				      canon_rtx (XEXP (y, i))) == 0)
1839 	    return 0;
1840 	  break;
1841 
1842 	  /* This can happen for asm operands.  */
1843 	case 's':
1844 	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1845 	    return 0;
1846 	  break;
1847 
1848 	/* This can happen for an asm which clobbers memory.  */
1849 	case '0':
1850 	  break;
1851 
1852 	  /* It is believed that rtx's at this level will never
1853 	     contain anything but integers and other rtx's,
1854 	     except for within LABEL_REFs and SYMBOL_REFs.  */
1855 	default:
1856 	  gcc_unreachable ();
1857 	}
1858     }
1859   return 1;
1860 }
1861 
1862 static rtx
find_base_term(rtx x)1863 find_base_term (rtx x)
1864 {
1865   cselib_val *val;
1866   struct elt_loc_list *l, *f;
1867   rtx ret;
1868 
1869 #if defined (FIND_BASE_TERM)
1870   /* Try machine-dependent ways to find the base term.  */
1871   x = FIND_BASE_TERM (x);
1872 #endif
1873 
1874   switch (GET_CODE (x))
1875     {
1876     case REG:
1877       return REG_BASE_VALUE (x);
1878 
1879     case TRUNCATE:
1880       /* As we do not know which address space the pointer is referring to, we can
1881 	 handle this only if the target does not support different pointer or
1882 	 address modes depending on the address space.  */
1883       if (!target_default_pointer_address_modes_p ())
1884 	return 0;
1885       if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1886 	return 0;
1887       /* Fall through.  */
1888     case HIGH:
1889     case PRE_INC:
1890     case PRE_DEC:
1891     case POST_INC:
1892     case POST_DEC:
1893     case PRE_MODIFY:
1894     case POST_MODIFY:
1895       return find_base_term (XEXP (x, 0));
1896 
1897     case ZERO_EXTEND:
1898     case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
1899       /* As we do not know which address space the pointer is referring to, we can
1900 	 handle this only if the target does not support different pointer or
1901 	 address modes depending on the address space.  */
1902       if (!target_default_pointer_address_modes_p ())
1903 	return 0;
1904 
1905       {
1906 	rtx temp = find_base_term (XEXP (x, 0));
1907 
1908 	if (temp != 0 && CONSTANT_P (temp))
1909 	  temp = convert_memory_address (Pmode, temp);
1910 
1911 	return temp;
1912       }
1913 
1914     case VALUE:
1915       val = CSELIB_VAL_PTR (x);
1916       ret = NULL_RTX;
1917 
1918       if (!val)
1919 	return ret;
1920 
1921       if (cselib_sp_based_value_p (val))
1922 	return static_reg_base_value[STACK_POINTER_REGNUM];
1923 
1924       f = val->locs;
1925       /* Temporarily reset val->locs to avoid infinite recursion.  */
1926       val->locs = NULL;
1927 
1928       for (l = f; l; l = l->next)
1929 	if (GET_CODE (l->loc) == VALUE
1930 	    && CSELIB_VAL_PTR (l->loc)->locs
1931 	    && !CSELIB_VAL_PTR (l->loc)->locs->next
1932 	    && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1933 	  continue;
1934 	else if ((ret = find_base_term (l->loc)) != 0)
1935 	  break;
1936 
1937       val->locs = f;
1938       return ret;
1939 
1940     case LO_SUM:
1941       /* The standard form is (lo_sum reg sym) so look only at the
1942          second operand.  */
1943       return find_base_term (XEXP (x, 1));
1944 
1945     case CONST:
1946       x = XEXP (x, 0);
1947       if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1948 	return 0;
1949       /* Fall through.  */
1950     case PLUS:
1951     case MINUS:
1952       {
1953 	rtx tmp1 = XEXP (x, 0);
1954 	rtx tmp2 = XEXP (x, 1);
1955 
1956 	/* This is a little bit tricky since we have to determine which of
1957 	   the two operands represents the real base address.  Otherwise this
1958 	   routine may return the index register instead of the base register.
1959 
1960 	   That may cause us to believe no aliasing was possible, when in
1961 	   fact aliasing is possible.
1962 
1963 	   We use a few simple tests to guess the base register.  Additional
1964 	   tests can certainly be added.  For example, if one of the operands
1965 	   is a shift or multiply, then it must be the index register and the
1966 	   other operand is the base register.  */
1967 
1968 	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1969 	  return find_base_term (tmp2);
1970 
1971 	/* If either operand is known to be a pointer, then prefer it
1972 	   to determine the base term.  */
1973 	if (REG_P (tmp1) && REG_POINTER (tmp1))
1974 	  ;
1975 	else if (REG_P (tmp2) && REG_POINTER (tmp2))
1976 	  std::swap (tmp1, tmp2);
1977 	/* If second argument is constant which has base term, prefer it
1978 	   over variable tmp1.  See PR64025.  */
1979 	else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2))
1980 	  std::swap (tmp1, tmp2);
1981 
1982 	/* Go ahead and find the base term for both operands.  If either base
1983 	   term is from a pointer or is a named object or a special address
1984 	   (like an argument or stack reference), then use it for the
1985 	   base term.  */
1986 	rtx base = find_base_term (tmp1);
1987 	if (base != NULL_RTX
1988 	    && ((REG_P (tmp1) && REG_POINTER (tmp1))
1989 		 || known_base_value_p (base)))
1990 	  return base;
1991 	base = find_base_term (tmp2);
1992 	if (base != NULL_RTX
1993 	    && ((REG_P (tmp2) && REG_POINTER (tmp2))
1994 		 || known_base_value_p (base)))
1995 	  return base;
1996 
1997 	/* We could not determine which of the two operands was the
1998 	   base register and which was the index.  So we can determine
1999 	   nothing from the base alias check.  */
2000 	return 0;
2001       }
2002 
2003     case AND:
2004       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
2005 	return find_base_term (XEXP (x, 0));
2006       return 0;
2007 
2008     case SYMBOL_REF:
2009     case LABEL_REF:
2010       return x;
2011 
2012     default:
2013       return 0;
2014     }
2015 }
2016 
2017 /* Return true if accesses to address X may alias accesses based
2018    on the stack pointer.  */
2019 
2020 bool
may_be_sp_based_p(rtx x)2021 may_be_sp_based_p (rtx x)
2022 {
2023   rtx base = find_base_term (x);
2024   return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
2025 }
2026 
2027 /* BASE1 and BASE2 are decls.  Return 1 if they refer to same object, 0
2028    if they refer to different objects and -1 if we can not decide.  */
2029 
2030 int
compare_base_decls(tree base1,tree base2)2031 compare_base_decls (tree base1, tree base2)
2032 {
2033   int ret;
2034   gcc_checking_assert (DECL_P (base1) && DECL_P (base2));
2035   if (base1 == base2)
2036     return 1;
2037 
2038   /* If we have two register decls with register specification we
2039      cannot decide unless their assembler name is the same.  */
2040   if (DECL_REGISTER (base1)
2041       && DECL_REGISTER (base2)
2042       && DECL_ASSEMBLER_NAME_SET_P (base1)
2043       && DECL_ASSEMBLER_NAME_SET_P (base2))
2044     {
2045       if (DECL_ASSEMBLER_NAME (base1) == DECL_ASSEMBLER_NAME (base2))
2046 	return 1;
2047       return -1;
2048     }
2049 
2050   /* Declarations of non-automatic variables may have aliases.  All other
2051      decls are unique.  */
2052   if (!decl_in_symtab_p (base1)
2053       || !decl_in_symtab_p (base2))
2054     return 0;
2055 
2056   /* Don't cause symbols to be inserted by the act of checking.  */
2057   symtab_node *node1 = symtab_node::get (base1);
2058   if (!node1)
2059     return 0;
2060   symtab_node *node2 = symtab_node::get (base2);
2061   if (!node2)
2062     return 0;
2063 
2064   ret = node1->equal_address_to (node2, true);
2065   return ret;
2066 }
2067 
2068 /* Same as compare_base_decls but for SYMBOL_REF.  */
2069 
2070 static int
compare_base_symbol_refs(const_rtx x_base,const_rtx y_base)2071 compare_base_symbol_refs (const_rtx x_base, const_rtx y_base)
2072 {
2073   tree x_decl = SYMBOL_REF_DECL (x_base);
2074   tree y_decl = SYMBOL_REF_DECL (y_base);
2075   bool binds_def = true;
2076 
2077   if (XSTR (x_base, 0) == XSTR (y_base, 0))
2078     return 1;
2079   if (x_decl && y_decl)
2080     return compare_base_decls (x_decl, y_decl);
2081   if (x_decl || y_decl)
2082     {
2083       if (!x_decl)
2084 	{
2085 	  std::swap (x_decl, y_decl);
2086 	  std::swap (x_base, y_base);
2087 	}
2088       /* We handle specially only section anchors and assume that other
2089  	 labels may overlap with user variables in an arbitrary way.  */
2090       if (!SYMBOL_REF_HAS_BLOCK_INFO_P (y_base))
2091         return -1;
2092       /* Anchors contains static VAR_DECLs and CONST_DECLs.  We are safe
2093 	 to ignore CONST_DECLs because they are readonly.  */
2094       if (TREE_CODE (x_decl) != VAR_DECL
2095 	  || (!TREE_STATIC (x_decl) && !TREE_PUBLIC (x_decl)))
2096 	return 0;
2097 
2098       symtab_node *x_node = symtab_node::get_create (x_decl)
2099 			    ->ultimate_alias_target ();
2100       /* External variable can not be in section anchor.  */
2101       if (!x_node->definition)
2102 	return 0;
2103       x_base = XEXP (DECL_RTL (x_node->decl), 0);
2104       /* If not in anchor, we can disambiguate.  */
2105       if (!SYMBOL_REF_HAS_BLOCK_INFO_P (x_base))
2106 	return 0;
2107 
2108       /* We have an alias of anchored variable.  If it can be interposed;
2109  	 we must assume it may or may not alias its anchor.  */
2110       binds_def = decl_binds_to_current_def_p (x_decl);
2111     }
2112   /* If we have variable in section anchor, we can compare by offset.  */
2113   if (SYMBOL_REF_HAS_BLOCK_INFO_P (x_base)
2114       && SYMBOL_REF_HAS_BLOCK_INFO_P (y_base))
2115     {
2116       if (SYMBOL_REF_BLOCK (x_base) != SYMBOL_REF_BLOCK (y_base))
2117 	return 0;
2118       if (SYMBOL_REF_BLOCK_OFFSET (x_base) == SYMBOL_REF_BLOCK_OFFSET (y_base))
2119 	return binds_def ? 1 : -1;
2120       if (SYMBOL_REF_ANCHOR_P (x_base) != SYMBOL_REF_ANCHOR_P (y_base))
2121 	return -1;
2122       return 0;
2123     }
2124   /* In general we assume that memory locations pointed to by different labels
2125      may overlap in undefined ways.  */
2126   return -1;
2127 }
2128 
2129 /* Return 0 if the addresses X and Y are known to point to different
2130    objects, 1 if they might be pointers to the same object.  */
2131 
2132 static int
base_alias_check(rtx x,rtx x_base,rtx y,rtx y_base,machine_mode x_mode,machine_mode y_mode)2133 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
2134 		  machine_mode x_mode, machine_mode y_mode)
2135 {
2136   /* If the address itself has no known base see if a known equivalent
2137      value has one.  If either address still has no known base, nothing
2138      is known about aliasing.  */
2139   if (x_base == 0)
2140     {
2141       rtx x_c;
2142 
2143       if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
2144 	return 1;
2145 
2146       x_base = find_base_term (x_c);
2147       if (x_base == 0)
2148 	return 1;
2149     }
2150 
2151   if (y_base == 0)
2152     {
2153       rtx y_c;
2154       if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
2155 	return 1;
2156 
2157       y_base = find_base_term (y_c);
2158       if (y_base == 0)
2159 	return 1;
2160     }
2161 
2162   /* If the base addresses are equal nothing is known about aliasing.  */
2163   if (rtx_equal_p (x_base, y_base))
2164     return 1;
2165 
2166   /* The base addresses are different expressions.  If they are not accessed
2167      via AND, there is no conflict.  We can bring knowledge of object
2168      alignment into play here.  For example, on alpha, "char a, b;" can
2169      alias one another, though "char a; long b;" cannot.  AND addesses may
2170      implicitly alias surrounding objects; i.e. unaligned access in DImode
2171      via AND address can alias all surrounding object types except those
2172      with aligment 8 or higher.  */
2173   if (GET_CODE (x) == AND && GET_CODE (y) == AND)
2174     return 1;
2175   if (GET_CODE (x) == AND
2176       && (!CONST_INT_P (XEXP (x, 1))
2177 	  || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
2178     return 1;
2179   if (GET_CODE (y) == AND
2180       && (!CONST_INT_P (XEXP (y, 1))
2181 	  || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
2182     return 1;
2183 
2184   /* Differing symbols not accessed via AND never alias.  */
2185   if (GET_CODE (x_base) == SYMBOL_REF && GET_CODE (y_base) == SYMBOL_REF)
2186     return compare_base_symbol_refs (x_base, y_base) != 0;
2187 
2188   if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
2189     return 0;
2190 
2191   if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
2192     return 0;
2193 
2194   return 1;
2195 }
2196 
2197 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
2198    (or equal to) that of V.  */
2199 
2200 static bool
refs_newer_value_p(const_rtx expr,rtx v)2201 refs_newer_value_p (const_rtx expr, rtx v)
2202 {
2203   int minuid = CSELIB_VAL_PTR (v)->uid;
2204   subrtx_iterator::array_type array;
2205   FOR_EACH_SUBRTX (iter, array, expr, NONCONST)
2206     if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid >= minuid)
2207       return true;
2208   return false;
2209 }
2210 
2211 /* Convert the address X into something we can use.  This is done by returning
2212    it unchanged unless it is a VALUE or VALUE +/- constant; for VALUE
2213    we call cselib to get a more useful rtx.  */
2214 
2215 rtx
get_addr(rtx x)2216 get_addr (rtx x)
2217 {
2218   cselib_val *v;
2219   struct elt_loc_list *l;
2220 
2221   if (GET_CODE (x) != VALUE)
2222     {
2223       if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2224 	  && GET_CODE (XEXP (x, 0)) == VALUE
2225 	  && CONST_SCALAR_INT_P (XEXP (x, 1)))
2226 	{
2227 	  rtx op0 = get_addr (XEXP (x, 0));
2228 	  if (op0 != XEXP (x, 0))
2229 	    {
2230 	      if (GET_CODE (x) == PLUS
2231 		  && GET_CODE (XEXP (x, 1)) == CONST_INT)
2232 		return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1)));
2233 	      return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
2234 					  op0, XEXP (x, 1));
2235 	    }
2236 	}
2237       return x;
2238     }
2239   v = CSELIB_VAL_PTR (x);
2240   if (v)
2241     {
2242       bool have_equivs = cselib_have_permanent_equivalences ();
2243       if (have_equivs)
2244 	v = canonical_cselib_val (v);
2245       for (l = v->locs; l; l = l->next)
2246 	if (CONSTANT_P (l->loc))
2247 	  return l->loc;
2248       for (l = v->locs; l; l = l->next)
2249 	if (!REG_P (l->loc) && !MEM_P (l->loc)
2250 	    /* Avoid infinite recursion when potentially dealing with
2251 	       var-tracking artificial equivalences, by skipping the
2252 	       equivalences themselves, and not choosing expressions
2253 	       that refer to newer VALUEs.  */
2254 	    && (!have_equivs
2255 		|| (GET_CODE (l->loc) != VALUE
2256 		    && !refs_newer_value_p (l->loc, x))))
2257 	  return l->loc;
2258       if (have_equivs)
2259 	{
2260 	  for (l = v->locs; l; l = l->next)
2261 	    if (REG_P (l->loc)
2262 		|| (GET_CODE (l->loc) != VALUE
2263 		    && !refs_newer_value_p (l->loc, x)))
2264 	      return l->loc;
2265 	  /* Return the canonical value.  */
2266 	  return v->val_rtx;
2267 	}
2268       if (v->locs)
2269 	return v->locs->loc;
2270     }
2271   return x;
2272 }
2273 
2274 /*  Return the address of the (N_REFS + 1)th memory reference to ADDR
2275     where SIZE is the size in bytes of the memory reference.  If ADDR
2276     is not modified by the memory reference then ADDR is returned.  */
2277 
2278 static rtx
addr_side_effect_eval(rtx addr,int size,int n_refs)2279 addr_side_effect_eval (rtx addr, int size, int n_refs)
2280 {
2281   int offset = 0;
2282 
2283   switch (GET_CODE (addr))
2284     {
2285     case PRE_INC:
2286       offset = (n_refs + 1) * size;
2287       break;
2288     case PRE_DEC:
2289       offset = -(n_refs + 1) * size;
2290       break;
2291     case POST_INC:
2292       offset = n_refs * size;
2293       break;
2294     case POST_DEC:
2295       offset = -n_refs * size;
2296       break;
2297 
2298     default:
2299       return addr;
2300     }
2301 
2302   if (offset)
2303     addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
2304 			 gen_int_mode (offset, GET_MODE (addr)));
2305   else
2306     addr = XEXP (addr, 0);
2307   addr = canon_rtx (addr);
2308 
2309   return addr;
2310 }
2311 
2312 /* Return TRUE if an object X sized at XSIZE bytes and another object
2313    Y sized at YSIZE bytes, starting C bytes after X, may overlap.  If
2314    any of the sizes is zero, assume an overlap, otherwise use the
2315    absolute value of the sizes as the actual sizes.  */
2316 
2317 static inline bool
offset_overlap_p(HOST_WIDE_INT c,int xsize,int ysize)2318 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
2319 {
2320   return (xsize == 0 || ysize == 0
2321 	  || (c >= 0
2322 	      ? (abs (xsize) > c)
2323 	      : (abs (ysize) > -c)));
2324 }
2325 
2326 /* Return one if X and Y (memory addresses) reference the
2327    same location in memory or if the references overlap.
2328    Return zero if they do not overlap, else return
2329    minus one in which case they still might reference the same location.
2330 
2331    C is an offset accumulator.  When
2332    C is nonzero, we are testing aliases between X and Y + C.
2333    XSIZE is the size in bytes of the X reference,
2334    similarly YSIZE is the size in bytes for Y.
2335    Expect that canon_rtx has been already called for X and Y.
2336 
2337    If XSIZE or YSIZE is zero, we do not know the amount of memory being
2338    referenced (the reference was BLKmode), so make the most pessimistic
2339    assumptions.
2340 
2341    If XSIZE or YSIZE is negative, we may access memory outside the object
2342    being referenced as a side effect.  This can happen when using AND to
2343    align memory references, as is done on the Alpha.
2344 
2345    Nice to notice that varying addresses cannot conflict with fp if no
2346    local variables had their addresses taken, but that's too hard now.
2347 
2348    ???  Contrary to the tree alias oracle this does not return
2349    one for X + non-constant and Y + non-constant when X and Y are equal.
2350    If that is fixed the TBAA hack for union type-punning can be removed.  */
2351 
2352 static int
memrefs_conflict_p(int xsize,rtx x,int ysize,rtx y,HOST_WIDE_INT c)2353 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
2354 {
2355   if (GET_CODE (x) == VALUE)
2356     {
2357       if (REG_P (y))
2358 	{
2359 	  struct elt_loc_list *l = NULL;
2360 	  if (CSELIB_VAL_PTR (x))
2361 	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2362 		 l; l = l->next)
2363 	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2364 		break;
2365 	  if (l)
2366 	    x = y;
2367 	  else
2368 	    x = get_addr (x);
2369 	}
2370       /* Don't call get_addr if y is the same VALUE.  */
2371       else if (x != y)
2372 	x = get_addr (x);
2373     }
2374   if (GET_CODE (y) == VALUE)
2375     {
2376       if (REG_P (x))
2377 	{
2378 	  struct elt_loc_list *l = NULL;
2379 	  if (CSELIB_VAL_PTR (y))
2380 	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2381 		 l; l = l->next)
2382 	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2383 		break;
2384 	  if (l)
2385 	    y = x;
2386 	  else
2387 	    y = get_addr (y);
2388 	}
2389       /* Don't call get_addr if x is the same VALUE.  */
2390       else if (y != x)
2391 	y = get_addr (y);
2392     }
2393   if (GET_CODE (x) == HIGH)
2394     x = XEXP (x, 0);
2395   else if (GET_CODE (x) == LO_SUM)
2396     x = XEXP (x, 1);
2397   else
2398     x = addr_side_effect_eval (x, abs (xsize), 0);
2399   if (GET_CODE (y) == HIGH)
2400     y = XEXP (y, 0);
2401   else if (GET_CODE (y) == LO_SUM)
2402     y = XEXP (y, 1);
2403   else
2404     y = addr_side_effect_eval (y, abs (ysize), 0);
2405 
2406   if (GET_CODE (x) == SYMBOL_REF && GET_CODE (y) == SYMBOL_REF)
2407     {
2408       int cmp = compare_base_symbol_refs (x,y);
2409 
2410       /* If both decls are the same, decide by offsets.  */
2411       if (cmp == 1)
2412         return offset_overlap_p (c, xsize, ysize);
2413       /* Assume a potential overlap for symbolic addresses that went
2414 	 through alignment adjustments (i.e., that have negative
2415 	 sizes), because we can't know how far they are from each
2416 	 other.  */
2417       if (xsize < 0 || ysize < 0)
2418 	return -1;
2419       /* If decls are different or we know by offsets that there is no overlap,
2420 	 we win.  */
2421       if (!cmp || !offset_overlap_p (c, xsize, ysize))
2422 	return 0;
2423       /* Decls may or may not be different and offsets overlap....*/
2424       return -1;
2425     }
2426   else if (rtx_equal_for_memref_p (x, y))
2427     {
2428       return offset_overlap_p (c, xsize, ysize);
2429     }
2430 
2431   /* This code used to check for conflicts involving stack references and
2432      globals but the base address alias code now handles these cases.  */
2433 
2434   if (GET_CODE (x) == PLUS)
2435     {
2436       /* The fact that X is canonicalized means that this
2437 	 PLUS rtx is canonicalized.  */
2438       rtx x0 = XEXP (x, 0);
2439       rtx x1 = XEXP (x, 1);
2440 
2441       /* However, VALUEs might end up in different positions even in
2442 	 canonical PLUSes.  Comparing their addresses is enough.  */
2443       if (x0 == y)
2444 	return memrefs_conflict_p (xsize, x1, ysize, const0_rtx, c);
2445       else if (x1 == y)
2446 	return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c);
2447 
2448       if (GET_CODE (y) == PLUS)
2449 	{
2450 	  /* The fact that Y is canonicalized means that this
2451 	     PLUS rtx is canonicalized.  */
2452 	  rtx y0 = XEXP (y, 0);
2453 	  rtx y1 = XEXP (y, 1);
2454 
2455 	  if (x0 == y1)
2456 	    return memrefs_conflict_p (xsize, x1, ysize, y0, c);
2457 	  if (x1 == y0)
2458 	    return memrefs_conflict_p (xsize, x0, ysize, y1, c);
2459 
2460 	  if (rtx_equal_for_memref_p (x1, y1))
2461 	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2462 	  if (rtx_equal_for_memref_p (x0, y0))
2463 	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2464 	  if (CONST_INT_P (x1))
2465 	    {
2466 	      if (CONST_INT_P (y1))
2467 		return memrefs_conflict_p (xsize, x0, ysize, y0,
2468 					   c - INTVAL (x1) + INTVAL (y1));
2469 	      else
2470 		return memrefs_conflict_p (xsize, x0, ysize, y,
2471 					   c - INTVAL (x1));
2472 	    }
2473 	  else if (CONST_INT_P (y1))
2474 	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2475 
2476 	  return -1;
2477 	}
2478       else if (CONST_INT_P (x1))
2479 	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2480     }
2481   else if (GET_CODE (y) == PLUS)
2482     {
2483       /* The fact that Y is canonicalized means that this
2484 	 PLUS rtx is canonicalized.  */
2485       rtx y0 = XEXP (y, 0);
2486       rtx y1 = XEXP (y, 1);
2487 
2488       if (x == y0)
2489 	return memrefs_conflict_p (xsize, const0_rtx, ysize, y1, c);
2490       if (x == y1)
2491 	return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c);
2492 
2493       if (CONST_INT_P (y1))
2494 	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2495       else
2496 	return -1;
2497     }
2498 
2499   if (GET_CODE (x) == GET_CODE (y))
2500     switch (GET_CODE (x))
2501       {
2502       case MULT:
2503 	{
2504 	  /* Handle cases where we expect the second operands to be the
2505 	     same, and check only whether the first operand would conflict
2506 	     or not.  */
2507 	  rtx x0, y0;
2508 	  rtx x1 = canon_rtx (XEXP (x, 1));
2509 	  rtx y1 = canon_rtx (XEXP (y, 1));
2510 	  if (! rtx_equal_for_memref_p (x1, y1))
2511 	    return -1;
2512 	  x0 = canon_rtx (XEXP (x, 0));
2513 	  y0 = canon_rtx (XEXP (y, 0));
2514 	  if (rtx_equal_for_memref_p (x0, y0))
2515 	    return offset_overlap_p (c, xsize, ysize);
2516 
2517 	  /* Can't properly adjust our sizes.  */
2518 	  if (!CONST_INT_P (x1))
2519 	    return -1;
2520 	  xsize /= INTVAL (x1);
2521 	  ysize /= INTVAL (x1);
2522 	  c /= INTVAL (x1);
2523 	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2524 	}
2525 
2526       default:
2527 	break;
2528       }
2529 
2530   /* Deal with alignment ANDs by adjusting offset and size so as to
2531      cover the maximum range, without taking any previously known
2532      alignment into account.  Make a size negative after such an
2533      adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2534      assume a potential overlap, because they may end up in contiguous
2535      memory locations and the stricter-alignment access may span over
2536      part of both.  */
2537   if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2538     {
2539       HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2540       unsigned HOST_WIDE_INT uc = sc;
2541       if (sc < 0 && -uc == (uc & -uc))
2542 	{
2543 	  if (xsize > 0)
2544 	    xsize = -xsize;
2545 	  if (xsize)
2546 	    xsize += sc + 1;
2547 	  c -= sc + 1;
2548 	  return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2549 				     ysize, y, c);
2550 	}
2551     }
2552   if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2553     {
2554       HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2555       unsigned HOST_WIDE_INT uc = sc;
2556       if (sc < 0 && -uc == (uc & -uc))
2557 	{
2558 	  if (ysize > 0)
2559 	    ysize = -ysize;
2560 	  if (ysize)
2561 	    ysize += sc + 1;
2562 	  c += sc + 1;
2563 	  return memrefs_conflict_p (xsize, x,
2564 				     ysize, canon_rtx (XEXP (y, 0)), c);
2565 	}
2566     }
2567 
2568   if (CONSTANT_P (x))
2569     {
2570       if (CONST_INT_P (x) && CONST_INT_P (y))
2571 	{
2572 	  c += (INTVAL (y) - INTVAL (x));
2573 	  return offset_overlap_p (c, xsize, ysize);
2574 	}
2575 
2576       if (GET_CODE (x) == CONST)
2577 	{
2578 	  if (GET_CODE (y) == CONST)
2579 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2580 				       ysize, canon_rtx (XEXP (y, 0)), c);
2581 	  else
2582 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2583 				       ysize, y, c);
2584 	}
2585       if (GET_CODE (y) == CONST)
2586 	return memrefs_conflict_p (xsize, x, ysize,
2587 				   canon_rtx (XEXP (y, 0)), c);
2588 
2589       /* Assume a potential overlap for symbolic addresses that went
2590 	 through alignment adjustments (i.e., that have negative
2591 	 sizes), because we can't know how far they are from each
2592 	 other.  */
2593       if (CONSTANT_P (y))
2594 	return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2595 
2596       return -1;
2597     }
2598 
2599   return -1;
2600 }
2601 
2602 /* Functions to compute memory dependencies.
2603 
2604    Since we process the insns in execution order, we can build tables
2605    to keep track of what registers are fixed (and not aliased), what registers
2606    are varying in known ways, and what registers are varying in unknown
2607    ways.
2608 
2609    If both memory references are volatile, then there must always be a
2610    dependence between the two references, since their order can not be
2611    changed.  A volatile and non-volatile reference can be interchanged
2612    though.
2613 
2614    We also must allow AND addresses, because they may generate accesses
2615    outside the object being referenced.  This is used to generate aligned
2616    addresses from unaligned addresses, for instance, the alpha
2617    storeqi_unaligned pattern.  */
2618 
2619 /* Read dependence: X is read after read in MEM takes place.  There can
2620    only be a dependence here if both reads are volatile, or if either is
2621    an explicit barrier.  */
2622 
2623 int
read_dependence(const_rtx mem,const_rtx x)2624 read_dependence (const_rtx mem, const_rtx x)
2625 {
2626   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2627     return true;
2628   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2629       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2630     return true;
2631   return false;
2632 }
2633 
2634 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2635 
2636 static tree
decl_for_component_ref(tree x)2637 decl_for_component_ref (tree x)
2638 {
2639   do
2640     {
2641       x = TREE_OPERAND (x, 0);
2642     }
2643   while (x && TREE_CODE (x) == COMPONENT_REF);
2644 
2645   return x && DECL_P (x) ? x : NULL_TREE;
2646 }
2647 
2648 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2649    for the offset of the field reference.  *KNOWN_P says whether the
2650    offset is known.  */
2651 
2652 static void
adjust_offset_for_component_ref(tree x,bool * known_p,HOST_WIDE_INT * offset)2653 adjust_offset_for_component_ref (tree x, bool *known_p,
2654 				 HOST_WIDE_INT *offset)
2655 {
2656   if (!*known_p)
2657     return;
2658   do
2659     {
2660       tree xoffset = component_ref_field_offset (x);
2661       tree field = TREE_OPERAND (x, 1);
2662       if (TREE_CODE (xoffset) != INTEGER_CST)
2663 	{
2664 	  *known_p = false;
2665 	  return;
2666 	}
2667 
2668       offset_int woffset
2669 	= (wi::to_offset (xoffset)
2670 	   + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
2671 			  LOG2_BITS_PER_UNIT));
2672       if (!wi::fits_uhwi_p (woffset))
2673 	{
2674 	  *known_p = false;
2675 	  return;
2676 	}
2677       *offset += woffset.to_uhwi ();
2678 
2679       x = TREE_OPERAND (x, 0);
2680     }
2681   while (x && TREE_CODE (x) == COMPONENT_REF);
2682 }
2683 
2684 /* Return nonzero if we can determine the exprs corresponding to memrefs
2685    X and Y and they do not overlap.
2686    If LOOP_VARIANT is set, skip offset-based disambiguation */
2687 
2688 int
nonoverlapping_memrefs_p(const_rtx x,const_rtx y,bool loop_invariant)2689 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2690 {
2691   tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2692   rtx rtlx, rtly;
2693   rtx basex, basey;
2694   bool moffsetx_known_p, moffsety_known_p;
2695   HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2696   HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey;
2697 
2698   /* Unless both have exprs, we can't tell anything.  */
2699   if (exprx == 0 || expry == 0)
2700     return 0;
2701 
2702   /* For spill-slot accesses make sure we have valid offsets.  */
2703   if ((exprx == get_spill_slot_decl (false)
2704        && ! MEM_OFFSET_KNOWN_P (x))
2705       || (expry == get_spill_slot_decl (false)
2706 	  && ! MEM_OFFSET_KNOWN_P (y)))
2707     return 0;
2708 
2709   /* If the field reference test failed, look at the DECLs involved.  */
2710   moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2711   if (moffsetx_known_p)
2712     moffsetx = MEM_OFFSET (x);
2713   if (TREE_CODE (exprx) == COMPONENT_REF)
2714     {
2715       tree t = decl_for_component_ref (exprx);
2716       if (! t)
2717 	return 0;
2718       adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2719       exprx = t;
2720     }
2721 
2722   moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2723   if (moffsety_known_p)
2724     moffsety = MEM_OFFSET (y);
2725   if (TREE_CODE (expry) == COMPONENT_REF)
2726     {
2727       tree t = decl_for_component_ref (expry);
2728       if (! t)
2729 	return 0;
2730       adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2731       expry = t;
2732     }
2733 
2734   if (! DECL_P (exprx) || ! DECL_P (expry))
2735     return 0;
2736 
2737   /* If we refer to different gimple registers, or one gimple register
2738      and one non-gimple-register, we know they can't overlap.  First,
2739      gimple registers don't have their addresses taken.  Now, there
2740      could be more than one stack slot for (different versions of) the
2741      same gimple register, but we can presumably tell they don't
2742      overlap based on offsets from stack base addresses elsewhere.
2743      It's important that we don't proceed to DECL_RTL, because gimple
2744      registers may not pass DECL_RTL_SET_P, and make_decl_rtl won't be
2745      able to do anything about them since no SSA information will have
2746      remained to guide it.  */
2747   if (is_gimple_reg (exprx) || is_gimple_reg (expry))
2748     return exprx != expry
2749       || (moffsetx_known_p && moffsety_known_p
2750 	  && MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y)
2751 	  && !offset_overlap_p (moffsety - moffsetx,
2752 				MEM_SIZE (x), MEM_SIZE (y)));
2753 
2754   /* With invalid code we can end up storing into the constant pool.
2755      Bail out to avoid ICEing when creating RTL for this.
2756      See gfortran.dg/lto/20091028-2_0.f90.  */
2757   if (TREE_CODE (exprx) == CONST_DECL
2758       || TREE_CODE (expry) == CONST_DECL)
2759     return 1;
2760 
2761   /* If either of the decls doesn't have DECL_RTL set (e.g. marked as
2762      living in multiple places), we can't tell anything.  Exception
2763      are FUNCTION_DECLs for which we can create DECL_RTL on demand.  */
2764   if ((!DECL_RTL_SET_P (exprx) && TREE_CODE (exprx) != FUNCTION_DECL)
2765       || (!DECL_RTL_SET_P (expry) && TREE_CODE (expry) != FUNCTION_DECL))
2766     return 0;
2767 
2768   rtlx = DECL_RTL (exprx);
2769   rtly = DECL_RTL (expry);
2770 
2771   /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2772      can't overlap unless they are the same because we never reuse that part
2773      of the stack frame used for locals for spilled pseudos.  */
2774   if ((!MEM_P (rtlx) || !MEM_P (rtly))
2775       && ! rtx_equal_p (rtlx, rtly))
2776     return 1;
2777 
2778   /* If we have MEMs referring to different address spaces (which can
2779      potentially overlap), we cannot easily tell from the addresses
2780      whether the references overlap.  */
2781   if (MEM_P (rtlx) && MEM_P (rtly)
2782       && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2783     return 0;
2784 
2785   /* Get the base and offsets of both decls.  If either is a register, we
2786      know both are and are the same, so use that as the base.  The only
2787      we can avoid overlap is if we can deduce that they are nonoverlapping
2788      pieces of that decl, which is very rare.  */
2789   basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2790   if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2791     offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2792 
2793   basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2794   if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2795     offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2796 
2797   /* If the bases are different, we know they do not overlap if both
2798      are constants or if one is a constant and the other a pointer into the
2799      stack frame.  Otherwise a different base means we can't tell if they
2800      overlap or not.  */
2801   if (compare_base_decls (exprx, expry) == 0)
2802     return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2803 	    || (CONSTANT_P (basex) && REG_P (basey)
2804 		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2805 	    || (CONSTANT_P (basey) && REG_P (basex)
2806 		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2807 
2808   /* Offset based disambiguation not appropriate for loop invariant */
2809   if (loop_invariant)
2810     return 0;
2811 
2812   /* Offset based disambiguation is OK even if we do not know that the
2813      declarations are necessarily different
2814     (i.e. compare_base_decls (exprx, expry) == -1)  */
2815 
2816   sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2817 	   : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2818 	   : -1);
2819   sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2820 	   : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2821 	   : -1);
2822 
2823   /* If we have an offset for either memref, it can update the values computed
2824      above.  */
2825   if (moffsetx_known_p)
2826     offsetx += moffsetx, sizex -= moffsetx;
2827   if (moffsety_known_p)
2828     offsety += moffsety, sizey -= moffsety;
2829 
2830   /* If a memref has both a size and an offset, we can use the smaller size.
2831      We can't do this if the offset isn't known because we must view this
2832      memref as being anywhere inside the DECL's MEM.  */
2833   if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2834     sizex = MEM_SIZE (x);
2835   if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2836     sizey = MEM_SIZE (y);
2837 
2838   /* Put the values of the memref with the lower offset in X's values.  */
2839   if (offsetx > offsety)
2840     {
2841       std::swap (offsetx, offsety);
2842       std::swap (sizex, sizey);
2843     }
2844 
2845   /* If we don't know the size of the lower-offset value, we can't tell
2846      if they conflict.  Otherwise, we do the test.  */
2847   return sizex >= 0 && offsety >= offsetx + sizex;
2848 }
2849 
2850 /* Helper for true_dependence and canon_true_dependence.
2851    Checks for true dependence: X is read after store in MEM takes place.
2852 
2853    If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2854    NULL_RTX, and the canonical addresses of MEM and X are both computed
2855    here.  If MEM_CANONICALIZED, then MEM must be already canonicalized.
2856 
2857    If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2858 
2859    Returns 1 if there is a true dependence, 0 otherwise.  */
2860 
2861 static int
true_dependence_1(const_rtx mem,machine_mode mem_mode,rtx mem_addr,const_rtx x,rtx x_addr,bool mem_canonicalized)2862 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2863 		   const_rtx x, rtx x_addr, bool mem_canonicalized)
2864 {
2865   rtx true_mem_addr;
2866   rtx base;
2867   int ret;
2868 
2869   gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2870 		       : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2871 
2872   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2873     return 1;
2874 
2875   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2876      This is used in epilogue deallocation functions, and in cselib.  */
2877   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2878     return 1;
2879   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2880     return 1;
2881   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2882       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2883     return 1;
2884 
2885   if (! x_addr)
2886     x_addr = XEXP (x, 0);
2887   x_addr = get_addr (x_addr);
2888 
2889   if (! mem_addr)
2890     {
2891       mem_addr = XEXP (mem, 0);
2892       if (mem_mode == VOIDmode)
2893 	mem_mode = GET_MODE (mem);
2894     }
2895   true_mem_addr = get_addr (mem_addr);
2896 
2897   /* Read-only memory is by definition never modified, and therefore can't
2898      conflict with anything.  However, don't assume anything when AND
2899      addresses are involved and leave to the code below to determine
2900      dependence.  We don't expect to find read-only set on MEM, but
2901      stupid user tricks can produce them, so don't die.  */
2902   if (MEM_READONLY_P (x)
2903       && GET_CODE (x_addr) != AND
2904       && GET_CODE (true_mem_addr) != AND)
2905     return 0;
2906 
2907   /* If we have MEMs referring to different address spaces (which can
2908      potentially overlap), we cannot easily tell from the addresses
2909      whether the references overlap.  */
2910   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2911     return 1;
2912 
2913   base = find_base_term (x_addr);
2914   if (base && (GET_CODE (base) == LABEL_REF
2915 	       || (GET_CODE (base) == SYMBOL_REF
2916 		   && CONSTANT_POOL_ADDRESS_P (base))))
2917     return 0;
2918 
2919   rtx mem_base = find_base_term (true_mem_addr);
2920   if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
2921 			  GET_MODE (x), mem_mode))
2922     return 0;
2923 
2924   x_addr = canon_rtx (x_addr);
2925   if (!mem_canonicalized)
2926     mem_addr = canon_rtx (true_mem_addr);
2927 
2928   if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2929 				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2930     return ret;
2931 
2932   if (mems_in_disjoint_alias_sets_p (x, mem))
2933     return 0;
2934 
2935   if (nonoverlapping_memrefs_p (mem, x, false))
2936     return 0;
2937 
2938   return rtx_refs_may_alias_p (x, mem, true);
2939 }
2940 
2941 /* True dependence: X is read after store in MEM takes place.  */
2942 
2943 int
true_dependence(const_rtx mem,machine_mode mem_mode,const_rtx x)2944 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x)
2945 {
2946   return true_dependence_1 (mem, mem_mode, NULL_RTX,
2947 			    x, NULL_RTX, /*mem_canonicalized=*/false);
2948 }
2949 
2950 /* Canonical true dependence: X is read after store in MEM takes place.
2951    Variant of true_dependence which assumes MEM has already been
2952    canonicalized (hence we no longer do that here).
2953    The mem_addr argument has been added, since true_dependence_1 computed
2954    this value prior to canonicalizing.  */
2955 
2956 int
canon_true_dependence(const_rtx mem,machine_mode mem_mode,rtx mem_addr,const_rtx x,rtx x_addr)2957 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2958 		       const_rtx x, rtx x_addr)
2959 {
2960   return true_dependence_1 (mem, mem_mode, mem_addr,
2961 			    x, x_addr, /*mem_canonicalized=*/true);
2962 }
2963 
2964 /* Returns nonzero if a write to X might alias a previous read from
2965    (or, if WRITEP is true, a write to) MEM.
2966    If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2967    and X_MODE the mode for that access.
2968    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
2969 
2970 static int
write_dependence_p(const_rtx mem,const_rtx x,machine_mode x_mode,rtx x_addr,bool mem_canonicalized,bool x_canonicalized,bool writep)2971 write_dependence_p (const_rtx mem,
2972 		    const_rtx x, machine_mode x_mode, rtx x_addr,
2973 		    bool mem_canonicalized, bool x_canonicalized, bool writep)
2974 {
2975   rtx mem_addr;
2976   rtx true_mem_addr, true_x_addr;
2977   rtx base;
2978   int ret;
2979 
2980   gcc_checking_assert (x_canonicalized
2981 		       ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2982 		       : (x_addr == NULL_RTX && x_mode == VOIDmode));
2983 
2984   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2985     return 1;
2986 
2987   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2988      This is used in epilogue deallocation functions.  */
2989   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2990     return 1;
2991   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2992     return 1;
2993   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2994       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2995     return 1;
2996 
2997   if (!x_addr)
2998     x_addr = XEXP (x, 0);
2999   true_x_addr = get_addr (x_addr);
3000 
3001   mem_addr = XEXP (mem, 0);
3002   true_mem_addr = get_addr (mem_addr);
3003 
3004   /* A read from read-only memory can't conflict with read-write memory.
3005      Don't assume anything when AND addresses are involved and leave to
3006      the code below to determine dependence.  */
3007   if (!writep
3008       && MEM_READONLY_P (mem)
3009       && GET_CODE (true_x_addr) != AND
3010       && GET_CODE (true_mem_addr) != AND)
3011     return 0;
3012 
3013   /* If we have MEMs referring to different address spaces (which can
3014      potentially overlap), we cannot easily tell from the addresses
3015      whether the references overlap.  */
3016   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3017     return 1;
3018 
3019   base = find_base_term (true_mem_addr);
3020   if (! writep
3021       && base
3022       && (GET_CODE (base) == LABEL_REF
3023 	  || (GET_CODE (base) == SYMBOL_REF
3024 	      && CONSTANT_POOL_ADDRESS_P (base))))
3025     return 0;
3026 
3027   rtx x_base = find_base_term (true_x_addr);
3028   if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
3029 			  GET_MODE (x), GET_MODE (mem)))
3030     return 0;
3031 
3032   if (!x_canonicalized)
3033     {
3034       x_addr = canon_rtx (true_x_addr);
3035       x_mode = GET_MODE (x);
3036     }
3037   if (!mem_canonicalized)
3038     mem_addr = canon_rtx (true_mem_addr);
3039 
3040   if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
3041 				 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
3042     return ret;
3043 
3044   if (nonoverlapping_memrefs_p (x, mem, false))
3045     return 0;
3046 
3047   return rtx_refs_may_alias_p (x, mem, false);
3048 }
3049 
3050 /* Anti dependence: X is written after read in MEM takes place.  */
3051 
3052 int
anti_dependence(const_rtx mem,const_rtx x)3053 anti_dependence (const_rtx mem, const_rtx x)
3054 {
3055   return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
3056 			     /*mem_canonicalized=*/false,
3057 			     /*x_canonicalized*/false, /*writep=*/false);
3058 }
3059 
3060 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
3061    Also, consider X in X_MODE (which might be from an enclosing
3062    STRICT_LOW_PART / ZERO_EXTRACT).
3063    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
3064 
3065 int
canon_anti_dependence(const_rtx mem,bool mem_canonicalized,const_rtx x,machine_mode x_mode,rtx x_addr)3066 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
3067 		       const_rtx x, machine_mode x_mode, rtx x_addr)
3068 {
3069   return write_dependence_p (mem, x, x_mode, x_addr,
3070 			     mem_canonicalized, /*x_canonicalized=*/true,
3071 			     /*writep=*/false);
3072 }
3073 
3074 /* Output dependence: X is written after store in MEM takes place.  */
3075 
3076 int
output_dependence(const_rtx mem,const_rtx x)3077 output_dependence (const_rtx mem, const_rtx x)
3078 {
3079   return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
3080 			     /*mem_canonicalized=*/false,
3081 			     /*x_canonicalized*/false, /*writep=*/true);
3082 }
3083 
3084 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
3085    Also, consider X in X_MODE (which might be from an enclosing
3086    STRICT_LOW_PART / ZERO_EXTRACT).
3087    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
3088 
3089 int
canon_output_dependence(const_rtx mem,bool mem_canonicalized,const_rtx x,machine_mode x_mode,rtx x_addr)3090 canon_output_dependence (const_rtx mem, bool mem_canonicalized,
3091 			 const_rtx x, machine_mode x_mode, rtx x_addr)
3092 {
3093   return write_dependence_p (mem, x, x_mode, x_addr,
3094 			     mem_canonicalized, /*x_canonicalized=*/true,
3095 			     /*writep=*/true);
3096 }
3097 
3098 
3099 
3100 /* Check whether X may be aliased with MEM.  Don't do offset-based
3101   memory disambiguation & TBAA.  */
3102 int
may_alias_p(const_rtx mem,const_rtx x)3103 may_alias_p (const_rtx mem, const_rtx x)
3104 {
3105   rtx x_addr, mem_addr;
3106 
3107   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
3108     return 1;
3109 
3110   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
3111      This is used in epilogue deallocation functions.  */
3112   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
3113     return 1;
3114   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
3115     return 1;
3116   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
3117       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
3118     return 1;
3119 
3120   x_addr = XEXP (x, 0);
3121   x_addr = get_addr (x_addr);
3122 
3123   mem_addr = XEXP (mem, 0);
3124   mem_addr = get_addr (mem_addr);
3125 
3126   /* Read-only memory is by definition never modified, and therefore can't
3127      conflict with anything.  However, don't assume anything when AND
3128      addresses are involved and leave to the code below to determine
3129      dependence.  We don't expect to find read-only set on MEM, but
3130      stupid user tricks can produce them, so don't die.  */
3131   if (MEM_READONLY_P (x)
3132       && GET_CODE (x_addr) != AND
3133       && GET_CODE (mem_addr) != AND)
3134     return 0;
3135 
3136   /* If we have MEMs referring to different address spaces (which can
3137      potentially overlap), we cannot easily tell from the addresses
3138      whether the references overlap.  */
3139   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3140     return 1;
3141 
3142   rtx x_base = find_base_term (x_addr);
3143   rtx mem_base = find_base_term (mem_addr);
3144   if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
3145 			  GET_MODE (x), GET_MODE (mem_addr)))
3146     return 0;
3147 
3148   if (nonoverlapping_memrefs_p (mem, x, true))
3149     return 0;
3150 
3151   /* TBAA not valid for loop_invarint */
3152   return rtx_refs_may_alias_p (x, mem, false);
3153 }
3154 
3155 void
init_alias_target(void)3156 init_alias_target (void)
3157 {
3158   int i;
3159 
3160   if (!arg_base_value)
3161     arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
3162 
3163   memset (static_reg_base_value, 0, sizeof static_reg_base_value);
3164 
3165   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3166     /* Check whether this register can hold an incoming pointer
3167        argument.  FUNCTION_ARG_REGNO_P tests outgoing register
3168        numbers, so translate if necessary due to register windows.  */
3169     if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
3170 	&& HARD_REGNO_MODE_OK (i, Pmode))
3171       static_reg_base_value[i] = arg_base_value;
3172 
3173   static_reg_base_value[STACK_POINTER_REGNUM]
3174     = unique_base_value (UNIQUE_BASE_VALUE_SP);
3175   static_reg_base_value[ARG_POINTER_REGNUM]
3176     = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
3177   static_reg_base_value[FRAME_POINTER_REGNUM]
3178     = unique_base_value (UNIQUE_BASE_VALUE_FP);
3179   if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3180     static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
3181       = unique_base_value (UNIQUE_BASE_VALUE_HFP);
3182 }
3183 
3184 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
3185    to be memory reference.  */
3186 static bool memory_modified;
3187 static void
memory_modified_1(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)3188 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3189 {
3190   if (MEM_P (x))
3191     {
3192       if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
3193 	memory_modified = true;
3194     }
3195 }
3196 
3197 
3198 /* Return true when INSN possibly modify memory contents of MEM
3199    (i.e. address can be modified).  */
3200 bool
memory_modified_in_insn_p(const_rtx mem,const_rtx insn)3201 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
3202 {
3203   if (!INSN_P (insn))
3204     return false;
3205   memory_modified = false;
3206   note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
3207   return memory_modified;
3208 }
3209 
3210 /* Return TRUE if the destination of a set is rtx identical to
3211    ITEM.  */
3212 static inline bool
set_dest_equal_p(const_rtx set,const_rtx item)3213 set_dest_equal_p (const_rtx set, const_rtx item)
3214 {
3215   rtx dest = SET_DEST (set);
3216   return rtx_equal_p (dest, item);
3217 }
3218 
3219 /* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
3220    array.  */
3221 
3222 void
init_alias_analysis(void)3223 init_alias_analysis (void)
3224 {
3225   unsigned int maxreg = max_reg_num ();
3226   int changed, pass;
3227   int i;
3228   unsigned int ui;
3229   rtx_insn *insn;
3230   rtx val;
3231   int rpo_cnt;
3232   int *rpo;
3233 
3234   timevar_push (TV_ALIAS_ANALYSIS);
3235 
3236   vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
3237   reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
3238   bitmap_clear (reg_known_equiv_p);
3239 
3240   /* If we have memory allocated from the previous run, use it.  */
3241   if (old_reg_base_value)
3242     reg_base_value = old_reg_base_value;
3243 
3244   if (reg_base_value)
3245     reg_base_value->truncate (0);
3246 
3247   vec_safe_grow_cleared (reg_base_value, maxreg);
3248 
3249   new_reg_base_value = XNEWVEC (rtx, maxreg);
3250   reg_seen = sbitmap_alloc (maxreg);
3251 
3252   /* The basic idea is that each pass through this loop will use the
3253      "constant" information from the previous pass to propagate alias
3254      information through another level of assignments.
3255 
3256      The propagation is done on the CFG in reverse post-order, to propagate
3257      things forward as far as possible in each iteration.
3258 
3259      This could get expensive if the assignment chains are long.  Maybe
3260      we should throttle the number of iterations, possibly based on
3261      the optimization level or flag_expensive_optimizations.
3262 
3263      We could propagate more information in the first pass by making use
3264      of DF_REG_DEF_COUNT to determine immediately that the alias information
3265      for a pseudo is "constant".
3266 
3267      A program with an uninitialized variable can cause an infinite loop
3268      here.  Instead of doing a full dataflow analysis to detect such problems
3269      we just cap the number of iterations for the loop.
3270 
3271      The state of the arrays for the set chain in question does not matter
3272      since the program has undefined behavior.  */
3273 
3274   rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
3275   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3276 
3277   /* The prologue/epilogue insns are not threaded onto the
3278      insn chain until after reload has completed.  Thus,
3279      there is no sense wasting time checking if INSN is in
3280      the prologue/epilogue until after reload has completed.  */
3281   bool could_be_prologue_epilogue = ((targetm.have_prologue ()
3282 				      || targetm.have_epilogue ())
3283 				     && reload_completed);
3284 
3285   pass = 0;
3286   do
3287     {
3288       /* Assume nothing will change this iteration of the loop.  */
3289       changed = 0;
3290 
3291       /* We want to assign the same IDs each iteration of this loop, so
3292 	 start counting from one each iteration of the loop.  */
3293       unique_id = 1;
3294 
3295       /* We're at the start of the function each iteration through the
3296 	 loop, so we're copying arguments.  */
3297       copying_arguments = true;
3298 
3299       /* Wipe the potential alias information clean for this pass.  */
3300       memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
3301 
3302       /* Wipe the reg_seen array clean.  */
3303       bitmap_clear (reg_seen);
3304 
3305       /* Initialize the alias information for this pass.  */
3306       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3307 	if (static_reg_base_value[i])
3308 	  {
3309 	    new_reg_base_value[i] = static_reg_base_value[i];
3310 	    bitmap_set_bit (reg_seen, i);
3311 	  }
3312 
3313       /* Walk the insns adding values to the new_reg_base_value array.  */
3314       for (i = 0; i < rpo_cnt; i++)
3315 	{
3316 	  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3317 	  FOR_BB_INSNS (bb, insn)
3318 	    {
3319 	      if (NONDEBUG_INSN_P (insn))
3320 		{
3321 		  rtx note, set;
3322 
3323 		  if (could_be_prologue_epilogue
3324 		      && prologue_epilogue_contains (insn))
3325 		    continue;
3326 
3327 		  /* If this insn has a noalias note, process it,  Otherwise,
3328 		     scan for sets.  A simple set will have no side effects
3329 		     which could change the base value of any other register.  */
3330 
3331 		  if (GET_CODE (PATTERN (insn)) == SET
3332 		      && REG_NOTES (insn) != 0
3333 		      && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
3334 		    record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
3335 		  else
3336 		    note_stores (PATTERN (insn), record_set, NULL);
3337 
3338 		  set = single_set (insn);
3339 
3340 		  if (set != 0
3341 		      && REG_P (SET_DEST (set))
3342 		      && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3343 		    {
3344 		      unsigned int regno = REGNO (SET_DEST (set));
3345 		      rtx src = SET_SRC (set);
3346 		      rtx t;
3347 
3348 		      note = find_reg_equal_equiv_note (insn);
3349 		      if (note && REG_NOTE_KIND (note) == REG_EQUAL
3350 			  && DF_REG_DEF_COUNT (regno) != 1)
3351 			note = NULL_RTX;
3352 
3353 		      if (note != NULL_RTX
3354 			  && GET_CODE (XEXP (note, 0)) != EXPR_LIST
3355 			  && ! rtx_varies_p (XEXP (note, 0), 1)
3356 			  && ! reg_overlap_mentioned_p (SET_DEST (set),
3357 							XEXP (note, 0)))
3358 			{
3359 			  set_reg_known_value (regno, XEXP (note, 0));
3360 			  set_reg_known_equiv_p (regno,
3361 						 REG_NOTE_KIND (note) == REG_EQUIV);
3362 			}
3363 		      else if (DF_REG_DEF_COUNT (regno) == 1
3364 			       && GET_CODE (src) == PLUS
3365 			       && REG_P (XEXP (src, 0))
3366 			       && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3367 			       && CONST_INT_P (XEXP (src, 1)))
3368 			{
3369 			  t = plus_constant (GET_MODE (src), t,
3370 					     INTVAL (XEXP (src, 1)));
3371 			  set_reg_known_value (regno, t);
3372 			  set_reg_known_equiv_p (regno, false);
3373 			}
3374 		      else if (DF_REG_DEF_COUNT (regno) == 1
3375 			       && ! rtx_varies_p (src, 1))
3376 			{
3377 			  set_reg_known_value (regno, src);
3378 			  set_reg_known_equiv_p (regno, false);
3379 			}
3380 		    }
3381 		}
3382 	      else if (NOTE_P (insn)
3383 		       && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3384 		copying_arguments = false;
3385 	    }
3386 	}
3387 
3388       /* Now propagate values from new_reg_base_value to reg_base_value.  */
3389       gcc_assert (maxreg == (unsigned int) max_reg_num ());
3390 
3391       for (ui = 0; ui < maxreg; ui++)
3392 	{
3393 	  if (new_reg_base_value[ui]
3394 	      && new_reg_base_value[ui] != (*reg_base_value)[ui]
3395 	      && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3396 	    {
3397 	      (*reg_base_value)[ui] = new_reg_base_value[ui];
3398 	      changed = 1;
3399 	    }
3400 	}
3401     }
3402   while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3403   XDELETEVEC (rpo);
3404 
3405   /* Fill in the remaining entries.  */
3406   FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3407     {
3408       int regno = i + FIRST_PSEUDO_REGISTER;
3409       if (! val)
3410 	set_reg_known_value (regno, regno_reg_rtx[regno]);
3411     }
3412 
3413   /* Clean up.  */
3414   free (new_reg_base_value);
3415   new_reg_base_value = 0;
3416   sbitmap_free (reg_seen);
3417   reg_seen = 0;
3418   timevar_pop (TV_ALIAS_ANALYSIS);
3419 }
3420 
3421 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3422    Special API for var-tracking pass purposes.  */
3423 
3424 void
vt_equate_reg_base_value(const_rtx reg1,const_rtx reg2)3425 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3426 {
3427   (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3428 }
3429 
3430 void
end_alias_analysis(void)3431 end_alias_analysis (void)
3432 {
3433   old_reg_base_value = reg_base_value;
3434   vec_free (reg_known_value);
3435   sbitmap_free (reg_known_equiv_p);
3436 }
3437 
3438 void
dump_alias_stats_in_alias_c(FILE * s)3439 dump_alias_stats_in_alias_c (FILE *s)
3440 {
3441   fprintf (s, "  TBAA oracle: %llu disambiguations %llu queries\n"
3442 	      "               %llu are in alias set 0\n"
3443 	      "               %llu queries asked about the same object\n"
3444 	      "               %llu queries asked about the same alias set\n"
3445 	      "               %llu access volatile\n"
3446 	      "               %llu are dependent in the DAG\n"
3447 	      "               %llu are aritificially in conflict with void *\n",
3448 	   alias_stats.num_disambiguated,
3449 	   alias_stats.num_alias_zero + alias_stats.num_same_alias_set
3450 	   + alias_stats.num_same_objects + alias_stats.num_volatile
3451 	   + alias_stats.num_dag + alias_stats.num_disambiguated
3452 	   + alias_stats.num_universal,
3453 	   alias_stats.num_alias_zero, alias_stats.num_same_alias_set,
3454 	   alias_stats.num_same_objects, alias_stats.num_volatile,
3455 	   alias_stats.num_dag, alias_stats.num_universal);
3456 }
3457 #include "gt-alias.h"
3458