xref: /openbsd/gnu/gcc/gcc/alias.c (revision 404b540a)
1 /* Alias analysis for GNU C
2    Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3    Free Software Foundation, Inc.
4    Contributed by John Carr (jfc@mit.edu).
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING.  If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "function.h"
31 #include "alias.h"
32 #include "emit-rtl.h"
33 #include "regs.h"
34 #include "hard-reg-set.h"
35 #include "basic-block.h"
36 #include "flags.h"
37 #include "output.h"
38 #include "toplev.h"
39 #include "cselib.h"
40 #include "splay-tree.h"
41 #include "ggc.h"
42 #include "langhooks.h"
43 #include "timevar.h"
44 #include "target.h"
45 #include "cgraph.h"
46 #include "varray.h"
47 #include "tree-pass.h"
48 #include "ipa-type-escape.h"
49 
50 /* The aliasing API provided here solves related but different problems:
51 
52    Say there exists (in c)
53 
54    struct X {
55      struct Y y1;
56      struct Z z2;
57    } x1, *px1,  *px2;
58 
59    struct Y y2, *py;
60    struct Z z2, *pz;
61 
62 
63    py = &px1.y1;
64    px2 = &x1;
65 
66    Consider the four questions:
67 
68    Can a store to x1 interfere with px2->y1?
69    Can a store to x1 interfere with px2->z2?
70    (*px2).z2
71    Can a store to x1 change the value pointed to by with py?
72    Can a store to x1 change the value pointed to by with pz?
73 
74    The answer to these questions can be yes, yes, yes, and maybe.
75 
76    The first two questions can be answered with a simple examination
77    of the type system.  If structure X contains a field of type Y then
78    a store thru a pointer to an X can overwrite any field that is
79    contained (recursively) in an X (unless we know that px1 != px2).
80 
81    The last two of the questions can be solved in the same way as the
82    first two questions but this is too conservative.  The observation
83    is that in some cases analysis we can know if which (if any) fields
84    are addressed and if those addresses are used in bad ways.  This
85    analysis may be language specific.  In C, arbitrary operations may
86    be applied to pointers.  However, there is some indication that
87    this may be too conservative for some C++ types.
88 
89    The pass ipa-type-escape does this analysis for the types whose
90    instances do not escape across the compilation boundary.
91 
92    Historically in GCC, these two problems were combined and a single
93    data structure was used to represent the solution to these
94    problems.  We now have two similar but different data structures,
95    The data structure to solve the last two question is similar to the
96    first, but does not contain have the fields in it whose address are
97    never taken.  For types that do escape the compilation unit, the
98    data structures will have identical information.
99 */
100 
101 /* The alias sets assigned to MEMs assist the back-end in determining
102    which MEMs can alias which other MEMs.  In general, two MEMs in
103    different alias sets cannot alias each other, with one important
104    exception.  Consider something like:
105 
106      struct S { int i; double d; };
107 
108    a store to an `S' can alias something of either type `int' or type
109    `double'.  (However, a store to an `int' cannot alias a `double'
110    and vice versa.)  We indicate this via a tree structure that looks
111    like:
112 	   struct S
113 	    /   \
114 	   /     \
115 	 |/_     _\|
116 	 int    double
117 
118    (The arrows are directed and point downwards.)
119     In this situation we say the alias set for `struct S' is the
120    `superset' and that those for `int' and `double' are `subsets'.
121 
122    To see whether two alias sets can point to the same memory, we must
123    see if either alias set is a subset of the other. We need not trace
124    past immediate descendants, however, since we propagate all
125    grandchildren up one level.
126 
127    Alias set zero is implicitly a superset of all other alias sets.
128    However, this is no actual entry for alias set zero.  It is an
129    error to attempt to explicitly construct a subset of zero.  */
130 
131 struct alias_set_entry GTY(())
132 {
133   /* The alias set number, as stored in MEM_ALIAS_SET.  */
134   HOST_WIDE_INT alias_set;
135 
136   /* The children of the alias set.  These are not just the immediate
137      children, but, in fact, all descendants.  So, if we have:
138 
139        struct T { struct S s; float f; }
140 
141      continuing our example above, the children here will be all of
142      `int', `double', `float', and `struct S'.  */
143   splay_tree GTY((param1_is (int), param2_is (int))) children;
144 
145   /* Nonzero if would have a child of zero: this effectively makes this
146      alias set the same as alias set zero.  */
147   int has_zero_child;
148 };
149 typedef struct alias_set_entry *alias_set_entry;
150 
151 static int rtx_equal_for_memref_p (rtx, rtx);
152 static rtx find_symbolic_term (rtx);
153 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
154 static void record_set (rtx, rtx, void *);
155 static int base_alias_check (rtx, rtx, enum machine_mode,
156 			     enum machine_mode);
157 static rtx find_base_value (rtx);
158 static int mems_in_disjoint_alias_sets_p (rtx, rtx);
159 static int insert_subset_children (splay_tree_node, void*);
160 static tree find_base_decl (tree);
161 static alias_set_entry get_alias_set_entry (HOST_WIDE_INT);
162 static rtx fixed_scalar_and_varying_struct_p (rtx, rtx, rtx, rtx,
163 					      int (*) (rtx, int));
164 static int aliases_everything_p (rtx);
165 static bool nonoverlapping_component_refs_p (tree, tree);
166 static tree decl_for_component_ref (tree);
167 static rtx adjust_offset_for_component_ref (tree, rtx);
168 static int nonoverlapping_memrefs_p (rtx, rtx);
169 static int write_dependence_p (rtx, rtx, int);
170 
171 static void memory_modified_1 (rtx, rtx, void *);
172 static void record_alias_subset (HOST_WIDE_INT, HOST_WIDE_INT);
173 
174 /* Set up all info needed to perform alias analysis on memory references.  */
175 
176 /* Returns the size in bytes of the mode of X.  */
177 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
178 
179 /* Returns nonzero if MEM1 and MEM2 do not alias because they are in
180    different alias sets.  We ignore alias sets in functions making use
181    of variable arguments because the va_arg macros on some systems are
182    not legal ANSI C.  */
183 #define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2)			\
184   mems_in_disjoint_alias_sets_p (MEM1, MEM2)
185 
186 /* Cap the number of passes we make over the insns propagating alias
187    information through set chains.   10 is a completely arbitrary choice.  */
188 #define MAX_ALIAS_LOOP_PASSES 10
189 
190 /* reg_base_value[N] gives an address to which register N is related.
191    If all sets after the first add or subtract to the current value
192    or otherwise modify it so it does not point to a different top level
193    object, reg_base_value[N] is equal to the address part of the source
194    of the first set.
195 
196    A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
197    expressions represent certain special values: function arguments and
198    the stack, frame, and argument pointers.
199 
200    The contents of an ADDRESS is not normally used, the mode of the
201    ADDRESS determines whether the ADDRESS is a function argument or some
202    other special value.  Pointer equality, not rtx_equal_p, determines whether
203    two ADDRESS expressions refer to the same base address.
204 
205    The only use of the contents of an ADDRESS is for determining if the
206    current function performs nonlocal memory memory references for the
207    purposes of marking the function as a constant function.  */
208 
209 static GTY(()) VEC(rtx,gc) *reg_base_value;
210 static rtx *new_reg_base_value;
211 
212 /* We preserve the copy of old array around to avoid amount of garbage
213    produced.  About 8% of garbage produced were attributed to this
214    array.  */
215 static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
216 
217 /* Static hunks of RTL used by the aliasing code; these are initialized
218    once per function to avoid unnecessary RTL allocations.  */
219 static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
220 
221 #define REG_BASE_VALUE(X)				\
222   (REGNO (X) < VEC_length (rtx, reg_base_value)		\
223    ? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
224 
225 /* Vector indexed by N giving the initial (unchanging) value known for
226    pseudo-register N.  This array is initialized in init_alias_analysis,
227    and does not change until end_alias_analysis is called.  */
228 static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
229 
230 /* Indicates number of valid entries in reg_known_value.  */
231 static GTY(()) unsigned int reg_known_value_size;
232 
233 /* Vector recording for each reg_known_value whether it is due to a
234    REG_EQUIV note.  Future passes (viz., reload) may replace the
235    pseudo with the equivalent expression and so we account for the
236    dependences that would be introduced if that happens.
237 
238    The REG_EQUIV notes created in assign_parms may mention the arg
239    pointer, and there are explicit insns in the RTL that modify the
240    arg pointer.  Thus we must ensure that such insns don't get
241    scheduled across each other because that would invalidate the
242    REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
243    wrong, but solving the problem in the scheduler will likely give
244    better code, so we do it here.  */
245 static bool *reg_known_equiv_p;
246 
247 /* True when scanning insns from the start of the rtl to the
248    NOTE_INSN_FUNCTION_BEG note.  */
249 static bool copying_arguments;
250 
251 DEF_VEC_P(alias_set_entry);
252 DEF_VEC_ALLOC_P(alias_set_entry,gc);
253 
254 /* The splay-tree used to store the various alias set entries.  */
255 static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
256 
257 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
258    such an entry, or NULL otherwise.  */
259 
260 static inline alias_set_entry
get_alias_set_entry(HOST_WIDE_INT alias_set)261 get_alias_set_entry (HOST_WIDE_INT alias_set)
262 {
263   return VEC_index (alias_set_entry, alias_sets, alias_set);
264 }
265 
266 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
267    the two MEMs cannot alias each other.  */
268 
269 static inline int
mems_in_disjoint_alias_sets_p(rtx mem1,rtx mem2)270 mems_in_disjoint_alias_sets_p (rtx mem1, rtx mem2)
271 {
272 /* Perform a basic sanity check.  Namely, that there are no alias sets
273    if we're not using strict aliasing.  This helps to catch bugs
274    whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
275    where a MEM is allocated in some way other than by the use of
276    gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared.  If we begin to
277    use alias sets to indicate that spilled registers cannot alias each
278    other, we might need to remove this check.  */
279   gcc_assert (flag_strict_aliasing
280 	      || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
281 
282   return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
283 }
284 
285 /* Insert the NODE into the splay tree given by DATA.  Used by
286    record_alias_subset via splay_tree_foreach.  */
287 
288 static int
insert_subset_children(splay_tree_node node,void * data)289 insert_subset_children (splay_tree_node node, void *data)
290 {
291   splay_tree_insert ((splay_tree) data, node->key, node->value);
292 
293   return 0;
294 }
295 
296 /* Return 1 if the two specified alias sets may conflict.  */
297 
298 int
alias_sets_conflict_p(HOST_WIDE_INT set1,HOST_WIDE_INT set2)299 alias_sets_conflict_p (HOST_WIDE_INT set1, HOST_WIDE_INT set2)
300 {
301   alias_set_entry ase;
302 
303   /* If have no alias set information for one of the operands, we have
304      to assume it can alias anything.  */
305   if (set1 == 0 || set2 == 0
306       /* If the two alias sets are the same, they may alias.  */
307       || set1 == set2)
308     return 1;
309 
310   /* See if the first alias set is a subset of the second.  */
311   ase = get_alias_set_entry (set1);
312   if (ase != 0
313       && (ase->has_zero_child
314 	  || splay_tree_lookup (ase->children,
315 				(splay_tree_key) set2)))
316     return 1;
317 
318   /* Now do the same, but with the alias sets reversed.  */
319   ase = get_alias_set_entry (set2);
320   if (ase != 0
321       && (ase->has_zero_child
322 	  || splay_tree_lookup (ase->children,
323 				(splay_tree_key) set1)))
324     return 1;
325 
326   /* The two alias sets are distinct and neither one is the
327      child of the other.  Therefore, they cannot alias.  */
328   return 0;
329 }
330 
331 /* Return 1 if the two specified alias sets might conflict, or if any subtype
332    of these alias sets might conflict.  */
333 
334 int
alias_sets_might_conflict_p(HOST_WIDE_INT set1,HOST_WIDE_INT set2)335 alias_sets_might_conflict_p (HOST_WIDE_INT set1, HOST_WIDE_INT set2)
336 {
337   if (set1 == 0 || set2 == 0 || set1 == set2)
338     return 1;
339 
340   return 0;
341 }
342 
343 
344 /* Return 1 if any MEM object of type T1 will always conflict (using the
345    dependency routines in this file) with any MEM object of type T2.
346    This is used when allocating temporary storage.  If T1 and/or T2 are
347    NULL_TREE, it means we know nothing about the storage.  */
348 
349 int
objects_must_conflict_p(tree t1,tree t2)350 objects_must_conflict_p (tree t1, tree t2)
351 {
352   HOST_WIDE_INT set1, set2;
353 
354   /* If neither has a type specified, we don't know if they'll conflict
355      because we may be using them to store objects of various types, for
356      example the argument and local variables areas of inlined functions.  */
357   if (t1 == 0 && t2 == 0)
358     return 0;
359 
360   /* If they are the same type, they must conflict.  */
361   if (t1 == t2
362       /* Likewise if both are volatile.  */
363       || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
364     return 1;
365 
366   set1 = t1 ? get_alias_set (t1) : 0;
367   set2 = t2 ? get_alias_set (t2) : 0;
368 
369   /* Otherwise they conflict if they have no alias set or the same. We
370      can't simply use alias_sets_conflict_p here, because we must make
371      sure that every subtype of t1 will conflict with every subtype of
372      t2 for which a pair of subobjects of these respective subtypes
373      overlaps on the stack.  */
374   return set1 == 0 || set2 == 0 || set1 == set2;
375 }
376 
377 /* T is an expression with pointer type.  Find the DECL on which this
378    expression is based.  (For example, in `a[i]' this would be `a'.)
379    If there is no such DECL, or a unique decl cannot be determined,
380    NULL_TREE is returned.  */
381 
382 static tree
find_base_decl(tree t)383 find_base_decl (tree t)
384 {
385   tree d0, d1;
386 
387   if (t == 0 || t == error_mark_node || ! POINTER_TYPE_P (TREE_TYPE (t)))
388     return 0;
389 
390   /* If this is a declaration, return it.  If T is based on a restrict
391      qualified decl, return that decl.  */
392   if (DECL_P (t))
393     {
394       if (TREE_CODE (t) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (t))
395 	t = DECL_GET_RESTRICT_BASE (t);
396       return t;
397     }
398 
399   /* Handle general expressions.  It would be nice to deal with
400      COMPONENT_REFs here.  If we could tell that `a' and `b' were the
401      same, then `a->f' and `b->f' are also the same.  */
402   switch (TREE_CODE_CLASS (TREE_CODE (t)))
403     {
404     case tcc_unary:
405       return find_base_decl (TREE_OPERAND (t, 0));
406 
407     case tcc_binary:
408       /* Return 0 if found in neither or both are the same.  */
409       d0 = find_base_decl (TREE_OPERAND (t, 0));
410       d1 = find_base_decl (TREE_OPERAND (t, 1));
411       if (d0 == d1)
412 	return d0;
413       else if (d0 == 0)
414 	return d1;
415       else if (d1 == 0)
416 	return d0;
417       else
418 	return 0;
419 
420     default:
421       return 0;
422     }
423 }
424 
425 /* Return true if all nested component references handled by
426    get_inner_reference in T are such that we should use the alias set
427    provided by the object at the heart of T.
428 
429    This is true for non-addressable components (which don't have their
430    own alias set), as well as components of objects in alias set zero.
431    This later point is a special case wherein we wish to override the
432    alias set used by the component, but we don't have per-FIELD_DECL
433    assignable alias sets.  */
434 
435 bool
component_uses_parent_alias_set(tree t)436 component_uses_parent_alias_set (tree t)
437 {
438   while (1)
439     {
440       /* If we're at the end, it vacuously uses its own alias set.  */
441       if (!handled_component_p (t))
442 	return false;
443 
444       switch (TREE_CODE (t))
445 	{
446 	case COMPONENT_REF:
447 	  if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
448 	    return true;
449 	  break;
450 
451 	case ARRAY_REF:
452 	case ARRAY_RANGE_REF:
453 	  if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
454 	    return true;
455 	  break;
456 
457 	case REALPART_EXPR:
458 	case IMAGPART_EXPR:
459 	  break;
460 
461 	default:
462 	  /* Bitfields and casts are never addressable.  */
463 	  return true;
464 	}
465 
466       t = TREE_OPERAND (t, 0);
467       if (get_alias_set (TREE_TYPE (t)) == 0)
468 	return true;
469     }
470 }
471 
472 /* Return the alias set for T, which may be either a type or an
473    expression.  Call language-specific routine for help, if needed.  */
474 
475 HOST_WIDE_INT
get_alias_set(tree t)476 get_alias_set (tree t)
477 {
478   HOST_WIDE_INT set;
479 
480   /* If we're not doing any alias analysis, just assume everything
481      aliases everything else.  Also return 0 if this or its type is
482      an error.  */
483   if (! flag_strict_aliasing || t == error_mark_node
484       || (! TYPE_P (t)
485 	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
486     return 0;
487 
488   /* We can be passed either an expression or a type.  This and the
489      language-specific routine may make mutually-recursive calls to each other
490      to figure out what to do.  At each juncture, we see if this is a tree
491      that the language may need to handle specially.  First handle things that
492      aren't types.  */
493   if (! TYPE_P (t))
494     {
495       tree inner = t;
496 
497       /* Remove any nops, then give the language a chance to do
498 	 something with this tree before we look at it.  */
499       STRIP_NOPS (t);
500       set = lang_hooks.get_alias_set (t);
501       if (set != -1)
502 	return set;
503 
504       /* First see if the actual object referenced is an INDIRECT_REF from a
505 	 restrict-qualified pointer or a "void *".  */
506       while (handled_component_p (inner))
507 	{
508 	  inner = TREE_OPERAND (inner, 0);
509 	  STRIP_NOPS (inner);
510 	}
511 
512       /* Check for accesses through restrict-qualified pointers.  */
513       if (INDIRECT_REF_P (inner))
514 	{
515 	  tree decl = find_base_decl (TREE_OPERAND (inner, 0));
516 
517 	  if (decl && DECL_POINTER_ALIAS_SET_KNOWN_P (decl))
518 	    {
519 	      /* If we haven't computed the actual alias set, do it now.  */
520 	      if (DECL_POINTER_ALIAS_SET (decl) == -2)
521 		{
522 		  tree pointed_to_type = TREE_TYPE (TREE_TYPE (decl));
523 
524 		  /* No two restricted pointers can point at the same thing.
525 		     However, a restricted pointer can point at the same thing
526 		     as an unrestricted pointer, if that unrestricted pointer
527 		     is based on the restricted pointer.  So, we make the
528 		     alias set for the restricted pointer a subset of the
529 		     alias set for the type pointed to by the type of the
530 		     decl.  */
531 		  HOST_WIDE_INT pointed_to_alias_set
532 		    = get_alias_set (pointed_to_type);
533 
534 		  if (pointed_to_alias_set == 0)
535 		    /* It's not legal to make a subset of alias set zero.  */
536 		    DECL_POINTER_ALIAS_SET (decl) = 0;
537 		  else if (AGGREGATE_TYPE_P (pointed_to_type))
538 		    /* For an aggregate, we must treat the restricted
539 		       pointer the same as an ordinary pointer.  If we
540 		       were to make the type pointed to by the
541 		       restricted pointer a subset of the pointed-to
542 		       type, then we would believe that other subsets
543 		       of the pointed-to type (such as fields of that
544 		       type) do not conflict with the type pointed to
545 		       by the restricted pointer.  */
546 		    DECL_POINTER_ALIAS_SET (decl)
547 		      = pointed_to_alias_set;
548 		  else
549 		    {
550 		      DECL_POINTER_ALIAS_SET (decl) = new_alias_set ();
551 		      record_alias_subset (pointed_to_alias_set,
552 					   DECL_POINTER_ALIAS_SET (decl));
553 		    }
554 		}
555 
556 	      /* We use the alias set indicated in the declaration.  */
557 	      return DECL_POINTER_ALIAS_SET (decl);
558 	    }
559 
560 	  /* If we have an INDIRECT_REF via a void pointer, we don't
561 	     know anything about what that might alias.  Likewise if the
562 	     pointer is marked that way.  */
563 	  else if (TREE_CODE (TREE_TYPE (inner)) == VOID_TYPE
564 		   || (TYPE_REF_CAN_ALIAS_ALL
565 		       (TREE_TYPE (TREE_OPERAND (inner, 0)))))
566 	    return 0;
567 	}
568 
569       /* Otherwise, pick up the outermost object that we could have a pointer
570 	 to, processing conversions as above.  */
571       while (component_uses_parent_alias_set (t))
572 	{
573 	  t = TREE_OPERAND (t, 0);
574 	  STRIP_NOPS (t);
575 	}
576 
577       /* If we've already determined the alias set for a decl, just return
578 	 it.  This is necessary for C++ anonymous unions, whose component
579 	 variables don't look like union members (boo!).  */
580       if (TREE_CODE (t) == VAR_DECL
581 	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
582 	return MEM_ALIAS_SET (DECL_RTL (t));
583 
584       /* Now all we care about is the type.  */
585       t = TREE_TYPE (t);
586     }
587 
588   /* Variant qualifiers don't affect the alias set, so get the main
589      variant. If this is a type with a known alias set, return it.  */
590   t = TYPE_MAIN_VARIANT (t);
591   if (TYPE_ALIAS_SET_KNOWN_P (t))
592     return TYPE_ALIAS_SET (t);
593 
594   /* See if the language has special handling for this type.  */
595   set = lang_hooks.get_alias_set (t);
596   if (set != -1)
597     return set;
598 
599   /* There are no objects of FUNCTION_TYPE, so there's no point in
600      using up an alias set for them.  (There are, of course, pointers
601      and references to functions, but that's different.)  */
602   else if (TREE_CODE (t) == FUNCTION_TYPE)
603     set = 0;
604 
605   /* Unless the language specifies otherwise, let vector types alias
606      their components.  This avoids some nasty type punning issues in
607      normal usage.  And indeed lets vectors be treated more like an
608      array slice.  */
609   else if (TREE_CODE (t) == VECTOR_TYPE)
610     set = get_alias_set (TREE_TYPE (t));
611 
612   else
613     /* Otherwise make a new alias set for this type.  */
614     set = new_alias_set ();
615 
616   TYPE_ALIAS_SET (t) = set;
617 
618   /* If this is an aggregate type, we must record any component aliasing
619      information.  */
620   if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
621     record_component_aliases (t);
622 
623   return set;
624 }
625 
626 /* Return a brand-new alias set.  */
627 
628 HOST_WIDE_INT
new_alias_set(void)629 new_alias_set (void)
630 {
631   if (flag_strict_aliasing)
632     {
633       if (alias_sets == 0)
634 	VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
635       VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
636       return VEC_length (alias_set_entry, alias_sets) - 1;
637     }
638   else
639     return 0;
640 }
641 
642 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
643    not everything that aliases SUPERSET also aliases SUBSET.  For example,
644    in C, a store to an `int' can alias a load of a structure containing an
645    `int', and vice versa.  But it can't alias a load of a 'double' member
646    of the same structure.  Here, the structure would be the SUPERSET and
647    `int' the SUBSET.  This relationship is also described in the comment at
648    the beginning of this file.
649 
650    This function should be called only once per SUPERSET/SUBSET pair.
651 
652    It is illegal for SUPERSET to be zero; everything is implicitly a
653    subset of alias set zero.  */
654 
655 static void
record_alias_subset(HOST_WIDE_INT superset,HOST_WIDE_INT subset)656 record_alias_subset (HOST_WIDE_INT superset, HOST_WIDE_INT subset)
657 {
658   alias_set_entry superset_entry;
659   alias_set_entry subset_entry;
660 
661   /* It is possible in complex type situations for both sets to be the same,
662      in which case we can ignore this operation.  */
663   if (superset == subset)
664     return;
665 
666   gcc_assert (superset);
667 
668   superset_entry = get_alias_set_entry (superset);
669   if (superset_entry == 0)
670     {
671       /* Create an entry for the SUPERSET, so that we have a place to
672 	 attach the SUBSET.  */
673       superset_entry = ggc_alloc (sizeof (struct alias_set_entry));
674       superset_entry->alias_set = superset;
675       superset_entry->children
676 	= splay_tree_new_ggc (splay_tree_compare_ints);
677       superset_entry->has_zero_child = 0;
678       VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
679     }
680 
681   if (subset == 0)
682     superset_entry->has_zero_child = 1;
683   else
684     {
685       subset_entry = get_alias_set_entry (subset);
686       /* If there is an entry for the subset, enter all of its children
687 	 (if they are not already present) as children of the SUPERSET.  */
688       if (subset_entry)
689 	{
690 	  if (subset_entry->has_zero_child)
691 	    superset_entry->has_zero_child = 1;
692 
693 	  splay_tree_foreach (subset_entry->children, insert_subset_children,
694 			      superset_entry->children);
695 	}
696 
697       /* Enter the SUBSET itself as a child of the SUPERSET.  */
698       splay_tree_insert (superset_entry->children,
699 			 (splay_tree_key) subset, 0);
700     }
701 }
702 
703 /* Record that component types of TYPE, if any, are part of that type for
704    aliasing purposes.  For record types, we only record component types
705    for fields that are marked addressable.  For array types, we always
706    record the component types, so the front end should not call this
707    function if the individual component aren't addressable.  */
708 
709 void
record_component_aliases(tree type)710 record_component_aliases (tree type)
711 {
712   HOST_WIDE_INT superset = get_alias_set (type);
713   tree field;
714 
715   if (superset == 0)
716     return;
717 
718   switch (TREE_CODE (type))
719     {
720     case ARRAY_TYPE:
721       if (! TYPE_NONALIASED_COMPONENT (type))
722 	record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
723       break;
724 
725     case RECORD_TYPE:
726     case UNION_TYPE:
727     case QUAL_UNION_TYPE:
728       /* Recursively record aliases for the base classes, if there are any.  */
729       if (TYPE_BINFO (type))
730 	{
731 	  int i;
732 	  tree binfo, base_binfo;
733 
734 	  for (binfo = TYPE_BINFO (type), i = 0;
735 	       BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
736 	    record_alias_subset (superset,
737 				 get_alias_set (BINFO_TYPE (base_binfo)));
738 	}
739       for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
740 	if (TREE_CODE (field) == FIELD_DECL && ! DECL_NONADDRESSABLE_P (field))
741 	  record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
742       break;
743 
744     case COMPLEX_TYPE:
745       record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
746       break;
747 
748     default:
749       break;
750     }
751 }
752 
753 /* Allocate an alias set for use in storing and reading from the varargs
754    spill area.  */
755 
756 static GTY(()) HOST_WIDE_INT varargs_set = -1;
757 
758 HOST_WIDE_INT
get_varargs_alias_set(void)759 get_varargs_alias_set (void)
760 {
761 #if 1
762   /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
763      varargs alias set to an INDIRECT_REF (FIXME!), so we can't
764      consistently use the varargs alias set for loads from the varargs
765      area.  So don't use it anywhere.  */
766   return 0;
767 #else
768   if (varargs_set == -1)
769     varargs_set = new_alias_set ();
770 
771   return varargs_set;
772 #endif
773 }
774 
775 /* Likewise, but used for the fixed portions of the frame, e.g., register
776    save areas.  */
777 
778 static GTY(()) HOST_WIDE_INT frame_set = -1;
779 
780 HOST_WIDE_INT
get_frame_alias_set(void)781 get_frame_alias_set (void)
782 {
783   if (frame_set == -1)
784     frame_set = new_alias_set ();
785 
786   return frame_set;
787 }
788 
789 /* Inside SRC, the source of a SET, find a base address.  */
790 
791 static rtx
find_base_value(rtx src)792 find_base_value (rtx src)
793 {
794   unsigned int regno;
795 
796   switch (GET_CODE (src))
797     {
798     case SYMBOL_REF:
799     case LABEL_REF:
800       return src;
801 
802     case REG:
803       regno = REGNO (src);
804       /* At the start of a function, argument registers have known base
805 	 values which may be lost later.  Returning an ADDRESS
806 	 expression here allows optimization based on argument values
807 	 even when the argument registers are used for other purposes.  */
808       if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
809 	return new_reg_base_value[regno];
810 
811       /* If a pseudo has a known base value, return it.  Do not do this
812 	 for non-fixed hard regs since it can result in a circular
813 	 dependency chain for registers which have values at function entry.
814 
815 	 The test above is not sufficient because the scheduler may move
816 	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
817       if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
818 	  && regno < VEC_length (rtx, reg_base_value))
819 	{
820 	  /* If we're inside init_alias_analysis, use new_reg_base_value
821 	     to reduce the number of relaxation iterations.  */
822 	  if (new_reg_base_value && new_reg_base_value[regno]
823 	      && REG_N_SETS (regno) == 1)
824 	    return new_reg_base_value[regno];
825 
826 	  if (VEC_index (rtx, reg_base_value, regno))
827 	    return VEC_index (rtx, reg_base_value, regno);
828 	}
829 
830       return 0;
831 
832     case MEM:
833       /* Check for an argument passed in memory.  Only record in the
834 	 copying-arguments block; it is too hard to track changes
835 	 otherwise.  */
836       if (copying_arguments
837 	  && (XEXP (src, 0) == arg_pointer_rtx
838 	      || (GET_CODE (XEXP (src, 0)) == PLUS
839 		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
840 	return gen_rtx_ADDRESS (VOIDmode, src);
841       return 0;
842 
843     case CONST:
844       src = XEXP (src, 0);
845       if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
846 	break;
847 
848       /* ... fall through ...  */
849 
850     case PLUS:
851     case MINUS:
852       {
853 	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
854 
855 	/* If either operand is a REG that is a known pointer, then it
856 	   is the base.  */
857 	if (REG_P (src_0) && REG_POINTER (src_0))
858 	  return find_base_value (src_0);
859 	if (REG_P (src_1) && REG_POINTER (src_1))
860 	  return find_base_value (src_1);
861 
862 	/* If either operand is a REG, then see if we already have
863 	   a known value for it.  */
864 	if (REG_P (src_0))
865 	  {
866 	    temp = find_base_value (src_0);
867 	    if (temp != 0)
868 	      src_0 = temp;
869 	  }
870 
871 	if (REG_P (src_1))
872 	  {
873 	    temp = find_base_value (src_1);
874 	    if (temp!= 0)
875 	      src_1 = temp;
876 	  }
877 
878 	/* If either base is named object or a special address
879 	   (like an argument or stack reference), then use it for the
880 	   base term.  */
881 	if (src_0 != 0
882 	    && (GET_CODE (src_0) == SYMBOL_REF
883 		|| GET_CODE (src_0) == LABEL_REF
884 		|| (GET_CODE (src_0) == ADDRESS
885 		    && GET_MODE (src_0) != VOIDmode)))
886 	  return src_0;
887 
888 	if (src_1 != 0
889 	    && (GET_CODE (src_1) == SYMBOL_REF
890 		|| GET_CODE (src_1) == LABEL_REF
891 		|| (GET_CODE (src_1) == ADDRESS
892 		    && GET_MODE (src_1) != VOIDmode)))
893 	  return src_1;
894 
895 	/* Guess which operand is the base address:
896 	   If either operand is a symbol, then it is the base.  If
897 	   either operand is a CONST_INT, then the other is the base.  */
898 	if (GET_CODE (src_1) == CONST_INT || CONSTANT_P (src_0))
899 	  return find_base_value (src_0);
900 	else if (GET_CODE (src_0) == CONST_INT || CONSTANT_P (src_1))
901 	  return find_base_value (src_1);
902 
903 	return 0;
904       }
905 
906     case LO_SUM:
907       /* The standard form is (lo_sum reg sym) so look only at the
908 	 second operand.  */
909       return find_base_value (XEXP (src, 1));
910 
911     case AND:
912       /* If the second operand is constant set the base
913 	 address to the first operand.  */
914       if (GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) != 0)
915 	return find_base_value (XEXP (src, 0));
916       return 0;
917 
918     case TRUNCATE:
919       if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
920 	break;
921       /* Fall through.  */
922     case HIGH:
923     case PRE_INC:
924     case PRE_DEC:
925     case POST_INC:
926     case POST_DEC:
927     case PRE_MODIFY:
928     case POST_MODIFY:
929       return find_base_value (XEXP (src, 0));
930 
931     case ZERO_EXTEND:
932     case SIGN_EXTEND:	/* used for NT/Alpha pointers */
933       {
934 	rtx temp = find_base_value (XEXP (src, 0));
935 
936 	if (temp != 0 && CONSTANT_P (temp))
937 	  temp = convert_memory_address (Pmode, temp);
938 
939 	return temp;
940       }
941 
942     default:
943       break;
944     }
945 
946   return 0;
947 }
948 
949 /* Called from init_alias_analysis indirectly through note_stores.  */
950 
951 /* While scanning insns to find base values, reg_seen[N] is nonzero if
952    register N has been set in this function.  */
953 static char *reg_seen;
954 
955 /* Addresses which are known not to alias anything else are identified
956    by a unique integer.  */
957 static int unique_id;
958 
959 static void
record_set(rtx dest,rtx set,void * data ATTRIBUTE_UNUSED)960 record_set (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
961 {
962   unsigned regno;
963   rtx src;
964   int n;
965 
966   if (!REG_P (dest))
967     return;
968 
969   regno = REGNO (dest);
970 
971   gcc_assert (regno < VEC_length (rtx, reg_base_value));
972 
973   /* If this spans multiple hard registers, then we must indicate that every
974      register has an unusable value.  */
975   if (regno < FIRST_PSEUDO_REGISTER)
976     n = hard_regno_nregs[regno][GET_MODE (dest)];
977   else
978     n = 1;
979   if (n != 1)
980     {
981       while (--n >= 0)
982 	{
983 	  reg_seen[regno + n] = 1;
984 	  new_reg_base_value[regno + n] = 0;
985 	}
986       return;
987     }
988 
989   if (set)
990     {
991       /* A CLOBBER wipes out any old value but does not prevent a previously
992 	 unset register from acquiring a base address (i.e. reg_seen is not
993 	 set).  */
994       if (GET_CODE (set) == CLOBBER)
995 	{
996 	  new_reg_base_value[regno] = 0;
997 	  return;
998 	}
999       src = SET_SRC (set);
1000     }
1001   else
1002     {
1003       if (reg_seen[regno])
1004 	{
1005 	  new_reg_base_value[regno] = 0;
1006 	  return;
1007 	}
1008       reg_seen[regno] = 1;
1009       new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
1010 						   GEN_INT (unique_id++));
1011       return;
1012     }
1013 
1014   /* If this is not the first set of REGNO, see whether the new value
1015      is related to the old one.  There are two cases of interest:
1016 
1017 	(1) The register might be assigned an entirely new value
1018 	    that has the same base term as the original set.
1019 
1020 	(2) The set might be a simple self-modification that
1021 	    cannot change REGNO's base value.
1022 
1023      If neither case holds, reject the original base value as invalid.
1024      Note that the following situation is not detected:
1025 
1026 	 extern int x, y;  int *p = &x; p += (&y-&x);
1027 
1028      ANSI C does not allow computing the difference of addresses
1029      of distinct top level objects.  */
1030   if (new_reg_base_value[regno] != 0
1031       && find_base_value (src) != new_reg_base_value[regno])
1032     switch (GET_CODE (src))
1033       {
1034       case LO_SUM:
1035       case MINUS:
1036 	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1037 	  new_reg_base_value[regno] = 0;
1038 	break;
1039       case PLUS:
1040 	/* If the value we add in the PLUS is also a valid base value,
1041 	   this might be the actual base value, and the original value
1042 	   an index.  */
1043 	{
1044 	  rtx other = NULL_RTX;
1045 
1046 	  if (XEXP (src, 0) == dest)
1047 	    other = XEXP (src, 1);
1048 	  else if (XEXP (src, 1) == dest)
1049 	    other = XEXP (src, 0);
1050 
1051 	  if (! other || find_base_value (other))
1052 	    new_reg_base_value[regno] = 0;
1053 	  break;
1054 	}
1055       case AND:
1056 	if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT)
1057 	  new_reg_base_value[regno] = 0;
1058 	break;
1059       default:
1060 	new_reg_base_value[regno] = 0;
1061 	break;
1062       }
1063   /* If this is the first set of a register, record the value.  */
1064   else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1065 	   && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
1066     new_reg_base_value[regno] = find_base_value (src);
1067 
1068   reg_seen[regno] = 1;
1069 }
1070 
1071 /* Clear alias info for a register.  This is used if an RTL transformation
1072    changes the value of a register.  This is used in flow by AUTO_INC_DEC
1073    optimizations.  We don't need to clear reg_base_value, since flow only
1074    changes the offset.  */
1075 
1076 void
clear_reg_alias_info(rtx reg)1077 clear_reg_alias_info (rtx reg)
1078 {
1079   unsigned int regno = REGNO (reg);
1080 
1081   if (regno >= FIRST_PSEUDO_REGISTER)
1082     {
1083       regno -= FIRST_PSEUDO_REGISTER;
1084       if (regno < reg_known_value_size)
1085 	{
1086 	  reg_known_value[regno] = reg;
1087 	  reg_known_equiv_p[regno] = false;
1088 	}
1089     }
1090 }
1091 
1092 /* If a value is known for REGNO, return it.  */
1093 
1094 rtx
get_reg_known_value(unsigned int regno)1095 get_reg_known_value (unsigned int regno)
1096 {
1097   if (regno >= FIRST_PSEUDO_REGISTER)
1098     {
1099       regno -= FIRST_PSEUDO_REGISTER;
1100       if (regno < reg_known_value_size)
1101 	return reg_known_value[regno];
1102     }
1103   return NULL;
1104 }
1105 
1106 /* Set it.  */
1107 
1108 static void
set_reg_known_value(unsigned int regno,rtx val)1109 set_reg_known_value (unsigned int regno, rtx val)
1110 {
1111   if (regno >= FIRST_PSEUDO_REGISTER)
1112     {
1113       regno -= FIRST_PSEUDO_REGISTER;
1114       if (regno < reg_known_value_size)
1115 	reg_known_value[regno] = val;
1116     }
1117 }
1118 
1119 /* Similarly for reg_known_equiv_p.  */
1120 
1121 bool
get_reg_known_equiv_p(unsigned int regno)1122 get_reg_known_equiv_p (unsigned int regno)
1123 {
1124   if (regno >= FIRST_PSEUDO_REGISTER)
1125     {
1126       regno -= FIRST_PSEUDO_REGISTER;
1127       if (regno < reg_known_value_size)
1128 	return reg_known_equiv_p[regno];
1129     }
1130   return false;
1131 }
1132 
1133 static void
set_reg_known_equiv_p(unsigned int regno,bool val)1134 set_reg_known_equiv_p (unsigned int regno, bool val)
1135 {
1136   if (regno >= FIRST_PSEUDO_REGISTER)
1137     {
1138       regno -= FIRST_PSEUDO_REGISTER;
1139       if (regno < reg_known_value_size)
1140 	reg_known_equiv_p[regno] = val;
1141     }
1142 }
1143 
1144 
1145 /* Returns a canonical version of X, from the point of view alias
1146    analysis.  (For example, if X is a MEM whose address is a register,
1147    and the register has a known value (say a SYMBOL_REF), then a MEM
1148    whose address is the SYMBOL_REF is returned.)  */
1149 
1150 rtx
canon_rtx(rtx x)1151 canon_rtx (rtx x)
1152 {
1153   /* Recursively look for equivalences.  */
1154   if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1155     {
1156       rtx t = get_reg_known_value (REGNO (x));
1157       if (t == x)
1158 	return x;
1159       if (t)
1160 	return canon_rtx (t);
1161     }
1162 
1163   if (GET_CODE (x) == PLUS)
1164     {
1165       rtx x0 = canon_rtx (XEXP (x, 0));
1166       rtx x1 = canon_rtx (XEXP (x, 1));
1167 
1168       if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1169 	{
1170 	  if (GET_CODE (x0) == CONST_INT)
1171 	    return plus_constant (x1, INTVAL (x0));
1172 	  else if (GET_CODE (x1) == CONST_INT)
1173 	    return plus_constant (x0, INTVAL (x1));
1174 	  return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1175 	}
1176     }
1177 
1178   /* This gives us much better alias analysis when called from
1179      the loop optimizer.   Note we want to leave the original
1180      MEM alone, but need to return the canonicalized MEM with
1181      all the flags with their original values.  */
1182   else if (MEM_P (x))
1183     x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1184 
1185   return x;
1186 }
1187 
1188 /* Return 1 if X and Y are identical-looking rtx's.
1189    Expect that X and Y has been already canonicalized.
1190 
1191    We use the data in reg_known_value above to see if two registers with
1192    different numbers are, in fact, equivalent.  */
1193 
1194 static int
rtx_equal_for_memref_p(rtx x,rtx y)1195 rtx_equal_for_memref_p (rtx x, rtx y)
1196 {
1197   int i;
1198   int j;
1199   enum rtx_code code;
1200   const char *fmt;
1201 
1202   if (x == 0 && y == 0)
1203     return 1;
1204   if (x == 0 || y == 0)
1205     return 0;
1206 
1207   if (x == y)
1208     return 1;
1209 
1210   code = GET_CODE (x);
1211   /* Rtx's of different codes cannot be equal.  */
1212   if (code != GET_CODE (y))
1213     return 0;
1214 
1215   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1216      (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1217 
1218   if (GET_MODE (x) != GET_MODE (y))
1219     return 0;
1220 
1221   /* Some RTL can be compared without a recursive examination.  */
1222   switch (code)
1223     {
1224     case REG:
1225       return REGNO (x) == REGNO (y);
1226 
1227     case LABEL_REF:
1228       return XEXP (x, 0) == XEXP (y, 0);
1229 
1230     case SYMBOL_REF:
1231       return XSTR (x, 0) == XSTR (y, 0);
1232 
1233     case VALUE:
1234     case CONST_INT:
1235     case CONST_DOUBLE:
1236       /* There's no need to compare the contents of CONST_DOUBLEs or
1237 	 CONST_INTs because pointer equality is a good enough
1238 	 comparison for these nodes.  */
1239       return 0;
1240 
1241     default:
1242       break;
1243     }
1244 
1245   /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1246   if (code == PLUS)
1247     return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1248 	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1249 	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1250 		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1251   /* For commutative operations, the RTX match if the operand match in any
1252      order.  Also handle the simple binary and unary cases without a loop.  */
1253   if (COMMUTATIVE_P (x))
1254     {
1255       rtx xop0 = canon_rtx (XEXP (x, 0));
1256       rtx yop0 = canon_rtx (XEXP (y, 0));
1257       rtx yop1 = canon_rtx (XEXP (y, 1));
1258 
1259       return ((rtx_equal_for_memref_p (xop0, yop0)
1260 	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1261 	      || (rtx_equal_for_memref_p (xop0, yop1)
1262 		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1263     }
1264   else if (NON_COMMUTATIVE_P (x))
1265     {
1266       return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1267 				      canon_rtx (XEXP (y, 0)))
1268 	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1269 					 canon_rtx (XEXP (y, 1))));
1270     }
1271   else if (UNARY_P (x))
1272     return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1273 				   canon_rtx (XEXP (y, 0)));
1274 
1275   /* Compare the elements.  If any pair of corresponding elements
1276      fail to match, return 0 for the whole things.
1277 
1278      Limit cases to types which actually appear in addresses.  */
1279 
1280   fmt = GET_RTX_FORMAT (code);
1281   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1282     {
1283       switch (fmt[i])
1284 	{
1285 	case 'i':
1286 	  if (XINT (x, i) != XINT (y, i))
1287 	    return 0;
1288 	  break;
1289 
1290 	case 'E':
1291 	  /* Two vectors must have the same length.  */
1292 	  if (XVECLEN (x, i) != XVECLEN (y, i))
1293 	    return 0;
1294 
1295 	  /* And the corresponding elements must match.  */
1296 	  for (j = 0; j < XVECLEN (x, i); j++)
1297 	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1298 					canon_rtx (XVECEXP (y, i, j))) == 0)
1299 	      return 0;
1300 	  break;
1301 
1302 	case 'e':
1303 	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1304 				      canon_rtx (XEXP (y, i))) == 0)
1305 	    return 0;
1306 	  break;
1307 
1308 	  /* This can happen for asm operands.  */
1309 	case 's':
1310 	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1311 	    return 0;
1312 	  break;
1313 
1314 	/* This can happen for an asm which clobbers memory.  */
1315 	case '0':
1316 	  break;
1317 
1318 	  /* It is believed that rtx's at this level will never
1319 	     contain anything but integers and other rtx's,
1320 	     except for within LABEL_REFs and SYMBOL_REFs.  */
1321 	default:
1322 	  gcc_unreachable ();
1323 	}
1324     }
1325   return 1;
1326 }
1327 
1328 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
1329    X and return it, or return 0 if none found.  */
1330 
1331 static rtx
find_symbolic_term(rtx x)1332 find_symbolic_term (rtx x)
1333 {
1334   int i;
1335   enum rtx_code code;
1336   const char *fmt;
1337 
1338   code = GET_CODE (x);
1339   if (code == SYMBOL_REF || code == LABEL_REF)
1340     return x;
1341   if (OBJECT_P (x))
1342     return 0;
1343 
1344   fmt = GET_RTX_FORMAT (code);
1345   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1346     {
1347       rtx t;
1348 
1349       if (fmt[i] == 'e')
1350 	{
1351 	  t = find_symbolic_term (XEXP (x, i));
1352 	  if (t != 0)
1353 	    return t;
1354 	}
1355       else if (fmt[i] == 'E')
1356 	break;
1357     }
1358   return 0;
1359 }
1360 
1361 rtx
find_base_term(rtx x)1362 find_base_term (rtx x)
1363 {
1364   cselib_val *val;
1365   struct elt_loc_list *l;
1366 
1367 #if defined (FIND_BASE_TERM)
1368   /* Try machine-dependent ways to find the base term.  */
1369   x = FIND_BASE_TERM (x);
1370 #endif
1371 
1372   switch (GET_CODE (x))
1373     {
1374     case REG:
1375       return REG_BASE_VALUE (x);
1376 
1377     case TRUNCATE:
1378       if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1379 	return 0;
1380       /* Fall through.  */
1381     case HIGH:
1382     case PRE_INC:
1383     case PRE_DEC:
1384     case POST_INC:
1385     case POST_DEC:
1386     case PRE_MODIFY:
1387     case POST_MODIFY:
1388       return find_base_term (XEXP (x, 0));
1389 
1390     case ZERO_EXTEND:
1391     case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
1392       {
1393 	rtx temp = find_base_term (XEXP (x, 0));
1394 
1395 	if (temp != 0 && CONSTANT_P (temp))
1396 	  temp = convert_memory_address (Pmode, temp);
1397 
1398 	return temp;
1399       }
1400 
1401     case VALUE:
1402       val = CSELIB_VAL_PTR (x);
1403       if (!val)
1404 	return 0;
1405       for (l = val->locs; l; l = l->next)
1406 	if ((x = find_base_term (l->loc)) != 0)
1407 	  return x;
1408       return 0;
1409 
1410     case CONST:
1411       x = XEXP (x, 0);
1412       if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1413 	return 0;
1414       /* Fall through.  */
1415     case LO_SUM:
1416     case PLUS:
1417     case MINUS:
1418       {
1419 	rtx tmp1 = XEXP (x, 0);
1420 	rtx tmp2 = XEXP (x, 1);
1421 
1422 	/* This is a little bit tricky since we have to determine which of
1423 	   the two operands represents the real base address.  Otherwise this
1424 	   routine may return the index register instead of the base register.
1425 
1426 	   That may cause us to believe no aliasing was possible, when in
1427 	   fact aliasing is possible.
1428 
1429 	   We use a few simple tests to guess the base register.  Additional
1430 	   tests can certainly be added.  For example, if one of the operands
1431 	   is a shift or multiply, then it must be the index register and the
1432 	   other operand is the base register.  */
1433 
1434 	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1435 	  return find_base_term (tmp2);
1436 
1437 	/* If either operand is known to be a pointer, then use it
1438 	   to determine the base term.  */
1439 	if (REG_P (tmp1) && REG_POINTER (tmp1))
1440 	  return find_base_term (tmp1);
1441 
1442 	if (REG_P (tmp2) && REG_POINTER (tmp2))
1443 	  return find_base_term (tmp2);
1444 
1445 	/* Neither operand was known to be a pointer.  Go ahead and find the
1446 	   base term for both operands.  */
1447 	tmp1 = find_base_term (tmp1);
1448 	tmp2 = find_base_term (tmp2);
1449 
1450 	/* If either base term is named object or a special address
1451 	   (like an argument or stack reference), then use it for the
1452 	   base term.  */
1453 	if (tmp1 != 0
1454 	    && (GET_CODE (tmp1) == SYMBOL_REF
1455 		|| GET_CODE (tmp1) == LABEL_REF
1456 		|| (GET_CODE (tmp1) == ADDRESS
1457 		    && GET_MODE (tmp1) != VOIDmode)))
1458 	  return tmp1;
1459 
1460 	if (tmp2 != 0
1461 	    && (GET_CODE (tmp2) == SYMBOL_REF
1462 		|| GET_CODE (tmp2) == LABEL_REF
1463 		|| (GET_CODE (tmp2) == ADDRESS
1464 		    && GET_MODE (tmp2) != VOIDmode)))
1465 	  return tmp2;
1466 
1467 	/* We could not determine which of the two operands was the
1468 	   base register and which was the index.  So we can determine
1469 	   nothing from the base alias check.  */
1470 	return 0;
1471       }
1472 
1473     case AND:
1474       if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) != 0)
1475 	return find_base_term (XEXP (x, 0));
1476       return 0;
1477 
1478     case SYMBOL_REF:
1479     case LABEL_REF:
1480       return x;
1481 
1482     default:
1483       return 0;
1484     }
1485 }
1486 
1487 /* Return 0 if the addresses X and Y are known to point to different
1488    objects, 1 if they might be pointers to the same object.  */
1489 
1490 static int
base_alias_check(rtx x,rtx y,enum machine_mode x_mode,enum machine_mode y_mode)1491 base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
1492 		  enum machine_mode y_mode)
1493 {
1494   rtx x_base = find_base_term (x);
1495   rtx y_base = find_base_term (y);
1496 
1497   /* If the address itself has no known base see if a known equivalent
1498      value has one.  If either address still has no known base, nothing
1499      is known about aliasing.  */
1500   if (x_base == 0)
1501     {
1502       rtx x_c;
1503 
1504       if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1505 	return 1;
1506 
1507       x_base = find_base_term (x_c);
1508       if (x_base == 0)
1509 	return 1;
1510     }
1511 
1512   if (y_base == 0)
1513     {
1514       rtx y_c;
1515       if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1516 	return 1;
1517 
1518       y_base = find_base_term (y_c);
1519       if (y_base == 0)
1520 	return 1;
1521     }
1522 
1523   /* If the base addresses are equal nothing is known about aliasing.  */
1524   if (rtx_equal_p (x_base, y_base))
1525     return 1;
1526 
1527   /* The base addresses of the read and write are different expressions.
1528      If they are both symbols and they are not accessed via AND, there is
1529      no conflict.  We can bring knowledge of object alignment into play
1530      here.  For example, on alpha, "char a, b;" can alias one another,
1531      though "char a; long b;" cannot.  */
1532   if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1533     {
1534       if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1535 	return 1;
1536       if (GET_CODE (x) == AND
1537 	  && (GET_CODE (XEXP (x, 1)) != CONST_INT
1538 	      || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1539 	return 1;
1540       if (GET_CODE (y) == AND
1541 	  && (GET_CODE (XEXP (y, 1)) != CONST_INT
1542 	      || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1543 	return 1;
1544       /* Differing symbols never alias.  */
1545       return 0;
1546     }
1547 
1548   /* If one address is a stack reference there can be no alias:
1549      stack references using different base registers do not alias,
1550      a stack reference can not alias a parameter, and a stack reference
1551      can not alias a global.  */
1552   if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
1553       || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
1554     return 0;
1555 
1556   if (! flag_argument_noalias)
1557     return 1;
1558 
1559   if (flag_argument_noalias > 1)
1560     return 0;
1561 
1562   /* Weak noalias assertion (arguments are distinct, but may match globals).  */
1563   return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
1564 }
1565 
1566 /* Convert the address X into something we can use.  This is done by returning
1567    it unchanged unless it is a value; in the latter case we call cselib to get
1568    a more useful rtx.  */
1569 
1570 rtx
get_addr(rtx x)1571 get_addr (rtx x)
1572 {
1573   cselib_val *v;
1574   struct elt_loc_list *l;
1575 
1576   if (GET_CODE (x) != VALUE)
1577     return x;
1578   v = CSELIB_VAL_PTR (x);
1579   if (v)
1580     {
1581       for (l = v->locs; l; l = l->next)
1582 	if (CONSTANT_P (l->loc))
1583 	  return l->loc;
1584       for (l = v->locs; l; l = l->next)
1585 	if (!REG_P (l->loc) && !MEM_P (l->loc))
1586 	  return l->loc;
1587       if (v->locs)
1588 	return v->locs->loc;
1589     }
1590   return x;
1591 }
1592 
1593 /*  Return the address of the (N_REFS + 1)th memory reference to ADDR
1594     where SIZE is the size in bytes of the memory reference.  If ADDR
1595     is not modified by the memory reference then ADDR is returned.  */
1596 
1597 static rtx
addr_side_effect_eval(rtx addr,int size,int n_refs)1598 addr_side_effect_eval (rtx addr, int size, int n_refs)
1599 {
1600   int offset = 0;
1601 
1602   switch (GET_CODE (addr))
1603     {
1604     case PRE_INC:
1605       offset = (n_refs + 1) * size;
1606       break;
1607     case PRE_DEC:
1608       offset = -(n_refs + 1) * size;
1609       break;
1610     case POST_INC:
1611       offset = n_refs * size;
1612       break;
1613     case POST_DEC:
1614       offset = -n_refs * size;
1615       break;
1616 
1617     default:
1618       return addr;
1619     }
1620 
1621   if (offset)
1622     addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1623 			 GEN_INT (offset));
1624   else
1625     addr = XEXP (addr, 0);
1626   addr = canon_rtx (addr);
1627 
1628   return addr;
1629 }
1630 
1631 /* Return nonzero if X and Y (memory addresses) could reference the
1632    same location in memory.  C is an offset accumulator.  When
1633    C is nonzero, we are testing aliases between X and Y + C.
1634    XSIZE is the size in bytes of the X reference,
1635    similarly YSIZE is the size in bytes for Y.
1636    Expect that canon_rtx has been already called for X and Y.
1637 
1638    If XSIZE or YSIZE is zero, we do not know the amount of memory being
1639    referenced (the reference was BLKmode), so make the most pessimistic
1640    assumptions.
1641 
1642    If XSIZE or YSIZE is negative, we may access memory outside the object
1643    being referenced as a side effect.  This can happen when using AND to
1644    align memory references, as is done on the Alpha.
1645 
1646    Nice to notice that varying addresses cannot conflict with fp if no
1647    local variables had their addresses taken, but that's too hard now.  */
1648 
1649 static int
memrefs_conflict_p(int xsize,rtx x,int ysize,rtx y,HOST_WIDE_INT c)1650 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1651 {
1652   if (GET_CODE (x) == VALUE)
1653     x = get_addr (x);
1654   if (GET_CODE (y) == VALUE)
1655     y = get_addr (y);
1656   if (GET_CODE (x) == HIGH)
1657     x = XEXP (x, 0);
1658   else if (GET_CODE (x) == LO_SUM)
1659     x = XEXP (x, 1);
1660   else
1661     x = addr_side_effect_eval (x, xsize, 0);
1662   if (GET_CODE (y) == HIGH)
1663     y = XEXP (y, 0);
1664   else if (GET_CODE (y) == LO_SUM)
1665     y = XEXP (y, 1);
1666   else
1667     y = addr_side_effect_eval (y, ysize, 0);
1668 
1669   if (rtx_equal_for_memref_p (x, y))
1670     {
1671       if (xsize <= 0 || ysize <= 0)
1672 	return 1;
1673       if (c >= 0 && xsize > c)
1674 	return 1;
1675       if (c < 0 && ysize+c > 0)
1676 	return 1;
1677       return 0;
1678     }
1679 
1680   /* This code used to check for conflicts involving stack references and
1681      globals but the base address alias code now handles these cases.  */
1682 
1683   if (GET_CODE (x) == PLUS)
1684     {
1685       /* The fact that X is canonicalized means that this
1686 	 PLUS rtx is canonicalized.  */
1687       rtx x0 = XEXP (x, 0);
1688       rtx x1 = XEXP (x, 1);
1689 
1690       if (GET_CODE (y) == PLUS)
1691 	{
1692 	  /* The fact that Y is canonicalized means that this
1693 	     PLUS rtx is canonicalized.  */
1694 	  rtx y0 = XEXP (y, 0);
1695 	  rtx y1 = XEXP (y, 1);
1696 
1697 	  if (rtx_equal_for_memref_p (x1, y1))
1698 	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1699 	  if (rtx_equal_for_memref_p (x0, y0))
1700 	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
1701 	  if (GET_CODE (x1) == CONST_INT)
1702 	    {
1703 	      if (GET_CODE (y1) == CONST_INT)
1704 		return memrefs_conflict_p (xsize, x0, ysize, y0,
1705 					   c - INTVAL (x1) + INTVAL (y1));
1706 	      else
1707 		return memrefs_conflict_p (xsize, x0, ysize, y,
1708 					   c - INTVAL (x1));
1709 	    }
1710 	  else if (GET_CODE (y1) == CONST_INT)
1711 	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1712 
1713 	  return 1;
1714 	}
1715       else if (GET_CODE (x1) == CONST_INT)
1716 	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
1717     }
1718   else if (GET_CODE (y) == PLUS)
1719     {
1720       /* The fact that Y is canonicalized means that this
1721 	 PLUS rtx is canonicalized.  */
1722       rtx y0 = XEXP (y, 0);
1723       rtx y1 = XEXP (y, 1);
1724 
1725       if (GET_CODE (y1) == CONST_INT)
1726 	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1727       else
1728 	return 1;
1729     }
1730 
1731   if (GET_CODE (x) == GET_CODE (y))
1732     switch (GET_CODE (x))
1733       {
1734       case MULT:
1735 	{
1736 	  /* Handle cases where we expect the second operands to be the
1737 	     same, and check only whether the first operand would conflict
1738 	     or not.  */
1739 	  rtx x0, y0;
1740 	  rtx x1 = canon_rtx (XEXP (x, 1));
1741 	  rtx y1 = canon_rtx (XEXP (y, 1));
1742 	  if (! rtx_equal_for_memref_p (x1, y1))
1743 	    return 1;
1744 	  x0 = canon_rtx (XEXP (x, 0));
1745 	  y0 = canon_rtx (XEXP (y, 0));
1746 	  if (rtx_equal_for_memref_p (x0, y0))
1747 	    return (xsize == 0 || ysize == 0
1748 		    || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1749 
1750 	  /* Can't properly adjust our sizes.  */
1751 	  if (GET_CODE (x1) != CONST_INT)
1752 	    return 1;
1753 	  xsize /= INTVAL (x1);
1754 	  ysize /= INTVAL (x1);
1755 	  c /= INTVAL (x1);
1756 	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1757 	}
1758 
1759       default:
1760 	break;
1761       }
1762 
1763   /* Treat an access through an AND (e.g. a subword access on an Alpha)
1764      as an access with indeterminate size.  Assume that references
1765      besides AND are aligned, so if the size of the other reference is
1766      at least as large as the alignment, assume no other overlap.  */
1767   if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT)
1768     {
1769       if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
1770 	xsize = -1;
1771       return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
1772     }
1773   if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT)
1774     {
1775       /* ??? If we are indexing far enough into the array/structure, we
1776 	 may yet be able to determine that we can not overlap.  But we
1777 	 also need to that we are far enough from the end not to overlap
1778 	 a following reference, so we do nothing with that for now.  */
1779       if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
1780 	ysize = -1;
1781       return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
1782     }
1783 
1784   if (CONSTANT_P (x))
1785     {
1786       if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
1787 	{
1788 	  c += (INTVAL (y) - INTVAL (x));
1789 	  return (xsize <= 0 || ysize <= 0
1790 		  || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1791 	}
1792 
1793       if (GET_CODE (x) == CONST)
1794 	{
1795 	  if (GET_CODE (y) == CONST)
1796 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1797 				       ysize, canon_rtx (XEXP (y, 0)), c);
1798 	  else
1799 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1800 				       ysize, y, c);
1801 	}
1802       if (GET_CODE (y) == CONST)
1803 	return memrefs_conflict_p (xsize, x, ysize,
1804 				   canon_rtx (XEXP (y, 0)), c);
1805 
1806       if (CONSTANT_P (y))
1807 	return (xsize <= 0 || ysize <= 0
1808 		|| (rtx_equal_for_memref_p (x, y)
1809 		    && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
1810 
1811       return 1;
1812     }
1813   return 1;
1814 }
1815 
1816 /* Functions to compute memory dependencies.
1817 
1818    Since we process the insns in execution order, we can build tables
1819    to keep track of what registers are fixed (and not aliased), what registers
1820    are varying in known ways, and what registers are varying in unknown
1821    ways.
1822 
1823    If both memory references are volatile, then there must always be a
1824    dependence between the two references, since their order can not be
1825    changed.  A volatile and non-volatile reference can be interchanged
1826    though.
1827 
1828    A MEM_IN_STRUCT reference at a non-AND varying address can never
1829    conflict with a non-MEM_IN_STRUCT reference at a fixed address.  We
1830    also must allow AND addresses, because they may generate accesses
1831    outside the object being referenced.  This is used to generate
1832    aligned addresses from unaligned addresses, for instance, the alpha
1833    storeqi_unaligned pattern.  */
1834 
1835 /* Read dependence: X is read after read in MEM takes place.  There can
1836    only be a dependence here if both reads are volatile.  */
1837 
1838 int
read_dependence(rtx mem,rtx x)1839 read_dependence (rtx mem, rtx x)
1840 {
1841   return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
1842 }
1843 
1844 /* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
1845    MEM2 is a reference to a structure at a varying address, or returns
1846    MEM2 if vice versa.  Otherwise, returns NULL_RTX.  If a non-NULL
1847    value is returned MEM1 and MEM2 can never alias.  VARIES_P is used
1848    to decide whether or not an address may vary; it should return
1849    nonzero whenever variation is possible.
1850    MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2.  */
1851 
1852 static rtx
fixed_scalar_and_varying_struct_p(rtx mem1,rtx mem2,rtx mem1_addr,rtx mem2_addr,int (* varies_p)(rtx,int))1853 fixed_scalar_and_varying_struct_p (rtx mem1, rtx mem2, rtx mem1_addr,
1854 				   rtx mem2_addr,
1855 				   int (*varies_p) (rtx, int))
1856 {
1857   if (! flag_strict_aliasing)
1858     return NULL_RTX;
1859 
1860   if (MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
1861       && !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1))
1862     /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
1863        varying address.  */
1864     return mem1;
1865 
1866   if (MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
1867       && varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1))
1868     /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
1869        varying address.  */
1870     return mem2;
1871 
1872   return NULL_RTX;
1873 }
1874 
1875 /* Returns nonzero if something about the mode or address format MEM1
1876    indicates that it might well alias *anything*.  */
1877 
1878 static int
aliases_everything_p(rtx mem)1879 aliases_everything_p (rtx mem)
1880 {
1881   if (GET_CODE (XEXP (mem, 0)) == AND)
1882     /* If the address is an AND, it's very hard to know at what it is
1883        actually pointing.  */
1884     return 1;
1885 
1886   return 0;
1887 }
1888 
1889 /* Return true if we can determine that the fields referenced cannot
1890    overlap for any pair of objects.  */
1891 
1892 static bool
nonoverlapping_component_refs_p(tree x,tree y)1893 nonoverlapping_component_refs_p (tree x, tree y)
1894 {
1895   tree fieldx, fieldy, typex, typey, orig_y;
1896 
1897   do
1898     {
1899       /* The comparison has to be done at a common type, since we don't
1900 	 know how the inheritance hierarchy works.  */
1901       orig_y = y;
1902       do
1903 	{
1904 	  fieldx = TREE_OPERAND (x, 1);
1905 	  typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
1906 
1907 	  y = orig_y;
1908 	  do
1909 	    {
1910 	      fieldy = TREE_OPERAND (y, 1);
1911 	      typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
1912 
1913 	      if (typex == typey)
1914 		goto found;
1915 
1916 	      y = TREE_OPERAND (y, 0);
1917 	    }
1918 	  while (y && TREE_CODE (y) == COMPONENT_REF);
1919 
1920 	  x = TREE_OPERAND (x, 0);
1921 	}
1922       while (x && TREE_CODE (x) == COMPONENT_REF);
1923       /* Never found a common type.  */
1924       return false;
1925 
1926     found:
1927       /* If we're left with accessing different fields of a structure,
1928 	 then no overlap.  */
1929       if (TREE_CODE (typex) == RECORD_TYPE
1930 	  && fieldx != fieldy)
1931 	return true;
1932 
1933       /* The comparison on the current field failed.  If we're accessing
1934 	 a very nested structure, look at the next outer level.  */
1935       x = TREE_OPERAND (x, 0);
1936       y = TREE_OPERAND (y, 0);
1937     }
1938   while (x && y
1939 	 && TREE_CODE (x) == COMPONENT_REF
1940 	 && TREE_CODE (y) == COMPONENT_REF);
1941 
1942   return false;
1943 }
1944 
1945 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
1946 
1947 static tree
decl_for_component_ref(tree x)1948 decl_for_component_ref (tree x)
1949 {
1950   do
1951     {
1952       x = TREE_OPERAND (x, 0);
1953     }
1954   while (x && TREE_CODE (x) == COMPONENT_REF);
1955 
1956   return x && DECL_P (x) ? x : NULL_TREE;
1957 }
1958 
1959 /* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the
1960    offset of the field reference.  */
1961 
1962 static rtx
adjust_offset_for_component_ref(tree x,rtx offset)1963 adjust_offset_for_component_ref (tree x, rtx offset)
1964 {
1965   HOST_WIDE_INT ioffset;
1966 
1967   if (! offset)
1968     return NULL_RTX;
1969 
1970   ioffset = INTVAL (offset);
1971   do
1972     {
1973       tree offset = component_ref_field_offset (x);
1974       tree field = TREE_OPERAND (x, 1);
1975 
1976       if (! host_integerp (offset, 1))
1977 	return NULL_RTX;
1978       ioffset += (tree_low_cst (offset, 1)
1979 		  + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1980 		     / BITS_PER_UNIT));
1981 
1982       x = TREE_OPERAND (x, 0);
1983     }
1984   while (x && TREE_CODE (x) == COMPONENT_REF);
1985 
1986   return GEN_INT (ioffset);
1987 }
1988 
1989 /* Return nonzero if we can determine the exprs corresponding to memrefs
1990    X and Y and they do not overlap.  */
1991 
1992 static int
nonoverlapping_memrefs_p(rtx x,rtx y)1993 nonoverlapping_memrefs_p (rtx x, rtx y)
1994 {
1995   tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
1996   rtx rtlx, rtly;
1997   rtx basex, basey;
1998   rtx moffsetx, moffsety;
1999   HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2000 
2001   /* Unless both have exprs, we can't tell anything.  */
2002   if (exprx == 0 || expry == 0)
2003     return 0;
2004 
2005   /* If both are field references, we may be able to determine something.  */
2006   if (TREE_CODE (exprx) == COMPONENT_REF
2007       && TREE_CODE (expry) == COMPONENT_REF
2008       && nonoverlapping_component_refs_p (exprx, expry))
2009     return 1;
2010 
2011 
2012   /* If the field reference test failed, look at the DECLs involved.  */
2013   moffsetx = MEM_OFFSET (x);
2014   if (TREE_CODE (exprx) == COMPONENT_REF)
2015     {
2016       if (TREE_CODE (expry) == VAR_DECL
2017 	  && POINTER_TYPE_P (TREE_TYPE (expry)))
2018 	{
2019 	 tree field = TREE_OPERAND (exprx, 1);
2020 	 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2021 	 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2022 						       TREE_TYPE (field)))
2023 	   return 1;
2024 	}
2025       {
2026 	tree t = decl_for_component_ref (exprx);
2027 	if (! t)
2028 	  return 0;
2029 	moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
2030 	exprx = t;
2031       }
2032     }
2033   else if (INDIRECT_REF_P (exprx))
2034     {
2035       exprx = TREE_OPERAND (exprx, 0);
2036       if (flag_argument_noalias < 2
2037 	  || TREE_CODE (exprx) != PARM_DECL)
2038 	return 0;
2039     }
2040 
2041   moffsety = MEM_OFFSET (y);
2042   if (TREE_CODE (expry) == COMPONENT_REF)
2043     {
2044       if (TREE_CODE (exprx) == VAR_DECL
2045 	  && POINTER_TYPE_P (TREE_TYPE (exprx)))
2046 	{
2047 	 tree field = TREE_OPERAND (expry, 1);
2048 	 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2049 	 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2050 						       TREE_TYPE (field)))
2051 	   return 1;
2052 	}
2053       {
2054 	tree t = decl_for_component_ref (expry);
2055 	if (! t)
2056 	  return 0;
2057 	moffsety = adjust_offset_for_component_ref (expry, moffsety);
2058 	expry = t;
2059       }
2060     }
2061   else if (INDIRECT_REF_P (expry))
2062     {
2063       expry = TREE_OPERAND (expry, 0);
2064       if (flag_argument_noalias < 2
2065 	  || TREE_CODE (expry) != PARM_DECL)
2066 	return 0;
2067     }
2068 
2069   if (! DECL_P (exprx) || ! DECL_P (expry))
2070     return 0;
2071 
2072   rtlx = DECL_RTL (exprx);
2073   rtly = DECL_RTL (expry);
2074 
2075   /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2076      can't overlap unless they are the same because we never reuse that part
2077      of the stack frame used for locals for spilled pseudos.  */
2078   if ((!MEM_P (rtlx) || !MEM_P (rtly))
2079       && ! rtx_equal_p (rtlx, rtly))
2080     return 1;
2081 
2082   /* Get the base and offsets of both decls.  If either is a register, we
2083      know both are and are the same, so use that as the base.  The only
2084      we can avoid overlap is if we can deduce that they are nonoverlapping
2085      pieces of that decl, which is very rare.  */
2086   basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2087   if (GET_CODE (basex) == PLUS && GET_CODE (XEXP (basex, 1)) == CONST_INT)
2088     offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2089 
2090   basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2091   if (GET_CODE (basey) == PLUS && GET_CODE (XEXP (basey, 1)) == CONST_INT)
2092     offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2093 
2094   /* If the bases are different, we know they do not overlap if both
2095      are constants or if one is a constant and the other a pointer into the
2096      stack frame.  Otherwise a different base means we can't tell if they
2097      overlap or not.  */
2098   if (! rtx_equal_p (basex, basey))
2099     return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2100 	    || (CONSTANT_P (basex) && REG_P (basey)
2101 		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2102 	    || (CONSTANT_P (basey) && REG_P (basex)
2103 		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2104 
2105   sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2106 	   : MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
2107 	   : -1);
2108   sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2109 	   : MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
2110 	   -1);
2111 
2112   /* If we have an offset for either memref, it can update the values computed
2113      above.  */
2114   if (moffsetx)
2115     offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx);
2116   if (moffsety)
2117     offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety);
2118 
2119   /* If a memref has both a size and an offset, we can use the smaller size.
2120      We can't do this if the offset isn't known because we must view this
2121      memref as being anywhere inside the DECL's MEM.  */
2122   if (MEM_SIZE (x) && moffsetx)
2123     sizex = INTVAL (MEM_SIZE (x));
2124   if (MEM_SIZE (y) && moffsety)
2125     sizey = INTVAL (MEM_SIZE (y));
2126 
2127   /* Put the values of the memref with the lower offset in X's values.  */
2128   if (offsetx > offsety)
2129     {
2130       tem = offsetx, offsetx = offsety, offsety = tem;
2131       tem = sizex, sizex = sizey, sizey = tem;
2132     }
2133 
2134   /* If we don't know the size of the lower-offset value, we can't tell
2135      if they conflict.  Otherwise, we do the test.  */
2136   return sizex >= 0 && offsety >= offsetx + sizex;
2137 }
2138 
2139 /* True dependence: X is read after store in MEM takes place.  */
2140 
2141 int
true_dependence(rtx mem,enum machine_mode mem_mode,rtx x,int (* varies)(rtx,int))2142 true_dependence (rtx mem, enum machine_mode mem_mode, rtx x,
2143 		 int (*varies) (rtx, int))
2144 {
2145   rtx x_addr, mem_addr;
2146   rtx base;
2147 
2148   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2149     return 1;
2150 
2151   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2152      This is used in epilogue deallocation functions.  */
2153   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2154     return 1;
2155   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2156     return 1;
2157   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2158       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2159     return 1;
2160 
2161   if (DIFFERENT_ALIAS_SETS_P (x, mem))
2162     return 0;
2163 
2164   /* Read-only memory is by definition never modified, and therefore can't
2165      conflict with anything.  We don't expect to find read-only set on MEM,
2166      but stupid user tricks can produce them, so don't die.  */
2167   if (MEM_READONLY_P (x))
2168     return 0;
2169 
2170   if (nonoverlapping_memrefs_p (mem, x))
2171     return 0;
2172 
2173   if (mem_mode == VOIDmode)
2174     mem_mode = GET_MODE (mem);
2175 
2176   x_addr = get_addr (XEXP (x, 0));
2177   mem_addr = get_addr (XEXP (mem, 0));
2178 
2179   base = find_base_term (x_addr);
2180   if (base && (GET_CODE (base) == LABEL_REF
2181 	       || (GET_CODE (base) == SYMBOL_REF
2182 		   && CONSTANT_POOL_ADDRESS_P (base))))
2183     return 0;
2184 
2185   if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2186     return 0;
2187 
2188   x_addr = canon_rtx (x_addr);
2189   mem_addr = canon_rtx (mem_addr);
2190 
2191   if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2192 			    SIZE_FOR_MODE (x), x_addr, 0))
2193     return 0;
2194 
2195   if (aliases_everything_p (x))
2196     return 1;
2197 
2198   /* We cannot use aliases_everything_p to test MEM, since we must look
2199      at MEM_MODE, rather than GET_MODE (MEM).  */
2200   if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2201     return 1;
2202 
2203   /* In true_dependence we also allow BLKmode to alias anything.  Why
2204      don't we do this in anti_dependence and output_dependence?  */
2205   if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2206     return 1;
2207 
2208   return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2209 					      varies);
2210 }
2211 
2212 /* Canonical true dependence: X is read after store in MEM takes place.
2213    Variant of true_dependence which assumes MEM has already been
2214    canonicalized (hence we no longer do that here).
2215    The mem_addr argument has been added, since true_dependence computed
2216    this value prior to canonicalizing.  */
2217 
2218 int
canon_true_dependence(rtx mem,enum machine_mode mem_mode,rtx mem_addr,rtx x,int (* varies)(rtx,int))2219 canon_true_dependence (rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2220 		       rtx x, int (*varies) (rtx, int))
2221 {
2222   rtx x_addr;
2223 
2224   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2225     return 1;
2226 
2227   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2228      This is used in epilogue deallocation functions.  */
2229   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2230     return 1;
2231   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2232     return 1;
2233   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2234       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2235     return 1;
2236 
2237   if (DIFFERENT_ALIAS_SETS_P (x, mem))
2238     return 0;
2239 
2240   /* Read-only memory is by definition never modified, and therefore can't
2241      conflict with anything.  We don't expect to find read-only set on MEM,
2242      but stupid user tricks can produce them, so don't die.  */
2243   if (MEM_READONLY_P (x))
2244     return 0;
2245 
2246   if (nonoverlapping_memrefs_p (x, mem))
2247     return 0;
2248 
2249   x_addr = get_addr (XEXP (x, 0));
2250 
2251   if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2252     return 0;
2253 
2254   x_addr = canon_rtx (x_addr);
2255   if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2256 			    SIZE_FOR_MODE (x), x_addr, 0))
2257     return 0;
2258 
2259   if (aliases_everything_p (x))
2260     return 1;
2261 
2262   /* We cannot use aliases_everything_p to test MEM, since we must look
2263      at MEM_MODE, rather than GET_MODE (MEM).  */
2264   if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2265     return 1;
2266 
2267   /* In true_dependence we also allow BLKmode to alias anything.  Why
2268      don't we do this in anti_dependence and output_dependence?  */
2269   if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2270     return 1;
2271 
2272   return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2273 					      varies);
2274 }
2275 
2276 /* Returns nonzero if a write to X might alias a previous read from
2277    (or, if WRITEP is nonzero, a write to) MEM.  */
2278 
2279 static int
write_dependence_p(rtx mem,rtx x,int writep)2280 write_dependence_p (rtx mem, rtx x, int writep)
2281 {
2282   rtx x_addr, mem_addr;
2283   rtx fixed_scalar;
2284   rtx base;
2285 
2286   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2287     return 1;
2288 
2289   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2290      This is used in epilogue deallocation functions.  */
2291   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2292     return 1;
2293   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2294     return 1;
2295   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2296       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2297     return 1;
2298 
2299   if (DIFFERENT_ALIAS_SETS_P (x, mem))
2300     return 0;
2301 
2302   /* A read from read-only memory can't conflict with read-write memory.  */
2303   if (!writep && MEM_READONLY_P (mem))
2304     return 0;
2305 
2306   if (nonoverlapping_memrefs_p (x, mem))
2307     return 0;
2308 
2309   x_addr = get_addr (XEXP (x, 0));
2310   mem_addr = get_addr (XEXP (mem, 0));
2311 
2312   if (! writep)
2313     {
2314       base = find_base_term (mem_addr);
2315       if (base && (GET_CODE (base) == LABEL_REF
2316 		   || (GET_CODE (base) == SYMBOL_REF
2317 		       && CONSTANT_POOL_ADDRESS_P (base))))
2318 	return 0;
2319     }
2320 
2321   if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
2322 			  GET_MODE (mem)))
2323     return 0;
2324 
2325   x_addr = canon_rtx (x_addr);
2326   mem_addr = canon_rtx (mem_addr);
2327 
2328   if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2329 			   SIZE_FOR_MODE (x), x_addr, 0))
2330     return 0;
2331 
2332   fixed_scalar
2333     = fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2334 					 rtx_addr_varies_p);
2335 
2336   return (!(fixed_scalar == mem && !aliases_everything_p (x))
2337 	  && !(fixed_scalar == x && !aliases_everything_p (mem)));
2338 }
2339 
2340 /* Anti dependence: X is written after read in MEM takes place.  */
2341 
2342 int
anti_dependence(rtx mem,rtx x)2343 anti_dependence (rtx mem, rtx x)
2344 {
2345   return write_dependence_p (mem, x, /*writep=*/0);
2346 }
2347 
2348 /* Output dependence: X is written after store in MEM takes place.  */
2349 
2350 int
output_dependence(rtx mem,rtx x)2351 output_dependence (rtx mem, rtx x)
2352 {
2353   return write_dependence_p (mem, x, /*writep=*/1);
2354 }
2355 
2356 
2357 void
init_alias_once(void)2358 init_alias_once (void)
2359 {
2360   int i;
2361 
2362   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2363     /* Check whether this register can hold an incoming pointer
2364        argument.  FUNCTION_ARG_REGNO_P tests outgoing register
2365        numbers, so translate if necessary due to register windows.  */
2366     if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2367 	&& HARD_REGNO_MODE_OK (i, Pmode))
2368       static_reg_base_value[i]
2369 	= gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
2370 
2371   static_reg_base_value[STACK_POINTER_REGNUM]
2372     = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
2373   static_reg_base_value[ARG_POINTER_REGNUM]
2374     = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
2375   static_reg_base_value[FRAME_POINTER_REGNUM]
2376     = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
2377 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2378   static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2379     = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
2380 #endif
2381 }
2382 
2383 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2384    to be memory reference.  */
2385 static bool memory_modified;
2386 static void
memory_modified_1(rtx x,rtx pat ATTRIBUTE_UNUSED,void * data)2387 memory_modified_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
2388 {
2389   if (MEM_P (x))
2390     {
2391       if (anti_dependence (x, (rtx)data) || output_dependence (x, (rtx)data))
2392 	memory_modified = true;
2393     }
2394 }
2395 
2396 
2397 /* Return true when INSN possibly modify memory contents of MEM
2398    (i.e. address can be modified).  */
2399 bool
memory_modified_in_insn_p(rtx mem,rtx insn)2400 memory_modified_in_insn_p (rtx mem, rtx insn)
2401 {
2402   if (!INSN_P (insn))
2403     return false;
2404   memory_modified = false;
2405   note_stores (PATTERN (insn), memory_modified_1, mem);
2406   return memory_modified;
2407 }
2408 
2409 /* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
2410    array.  */
2411 
2412 void
init_alias_analysis(void)2413 init_alias_analysis (void)
2414 {
2415   unsigned int maxreg = max_reg_num ();
2416   int changed, pass;
2417   int i;
2418   unsigned int ui;
2419   rtx insn;
2420 
2421   timevar_push (TV_ALIAS_ANALYSIS);
2422 
2423   reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
2424   reg_known_value = ggc_calloc (reg_known_value_size, sizeof (rtx));
2425   reg_known_equiv_p = xcalloc (reg_known_value_size, sizeof (bool));
2426 
2427   /* If we have memory allocated from the previous run, use it.  */
2428   if (old_reg_base_value)
2429     reg_base_value = old_reg_base_value;
2430 
2431   if (reg_base_value)
2432     VEC_truncate (rtx, reg_base_value, 0);
2433 
2434   VEC_safe_grow (rtx, gc, reg_base_value, maxreg);
2435   memset (VEC_address (rtx, reg_base_value), 0,
2436 	  sizeof (rtx) * VEC_length (rtx, reg_base_value));
2437 
2438   new_reg_base_value = XNEWVEC (rtx, maxreg);
2439   reg_seen = XNEWVEC (char, maxreg);
2440 
2441   /* The basic idea is that each pass through this loop will use the
2442      "constant" information from the previous pass to propagate alias
2443      information through another level of assignments.
2444 
2445      This could get expensive if the assignment chains are long.  Maybe
2446      we should throttle the number of iterations, possibly based on
2447      the optimization level or flag_expensive_optimizations.
2448 
2449      We could propagate more information in the first pass by making use
2450      of REG_N_SETS to determine immediately that the alias information
2451      for a pseudo is "constant".
2452 
2453      A program with an uninitialized variable can cause an infinite loop
2454      here.  Instead of doing a full dataflow analysis to detect such problems
2455      we just cap the number of iterations for the loop.
2456 
2457      The state of the arrays for the set chain in question does not matter
2458      since the program has undefined behavior.  */
2459 
2460   pass = 0;
2461   do
2462     {
2463       /* Assume nothing will change this iteration of the loop.  */
2464       changed = 0;
2465 
2466       /* We want to assign the same IDs each iteration of this loop, so
2467 	 start counting from zero each iteration of the loop.  */
2468       unique_id = 0;
2469 
2470       /* We're at the start of the function each iteration through the
2471 	 loop, so we're copying arguments.  */
2472       copying_arguments = true;
2473 
2474       /* Wipe the potential alias information clean for this pass.  */
2475       memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2476 
2477       /* Wipe the reg_seen array clean.  */
2478       memset (reg_seen, 0, maxreg);
2479 
2480       /* Mark all hard registers which may contain an address.
2481 	 The stack, frame and argument pointers may contain an address.
2482 	 An argument register which can hold a Pmode value may contain
2483 	 an address even if it is not in BASE_REGS.
2484 
2485 	 The address expression is VOIDmode for an argument and
2486 	 Pmode for other registers.  */
2487 
2488       memcpy (new_reg_base_value, static_reg_base_value,
2489 	      FIRST_PSEUDO_REGISTER * sizeof (rtx));
2490 
2491       /* Walk the insns adding values to the new_reg_base_value array.  */
2492       for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2493 	{
2494 	  if (INSN_P (insn))
2495 	    {
2496 	      rtx note, set;
2497 
2498 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2499 	      /* The prologue/epilogue insns are not threaded onto the
2500 		 insn chain until after reload has completed.  Thus,
2501 		 there is no sense wasting time checking if INSN is in
2502 		 the prologue/epilogue until after reload has completed.  */
2503 	      if (reload_completed
2504 		  && prologue_epilogue_contains (insn))
2505 		continue;
2506 #endif
2507 
2508 	      /* If this insn has a noalias note, process it,  Otherwise,
2509 		 scan for sets.  A simple set will have no side effects
2510 		 which could change the base value of any other register.  */
2511 
2512 	      if (GET_CODE (PATTERN (insn)) == SET
2513 		  && REG_NOTES (insn) != 0
2514 		  && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2515 		record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2516 	      else
2517 		note_stores (PATTERN (insn), record_set, NULL);
2518 
2519 	      set = single_set (insn);
2520 
2521 	      if (set != 0
2522 		  && REG_P (SET_DEST (set))
2523 		  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2524 		{
2525 		  unsigned int regno = REGNO (SET_DEST (set));
2526 		  rtx src = SET_SRC (set);
2527 		  rtx t;
2528 
2529 		  if (REG_NOTES (insn) != 0
2530 		      && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
2531 			   && REG_N_SETS (regno) == 1)
2532 			  || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
2533 		      && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2534 		      && ! rtx_varies_p (XEXP (note, 0), 1)
2535 		      && ! reg_overlap_mentioned_p (SET_DEST (set),
2536 						    XEXP (note, 0)))
2537 		    {
2538 		      set_reg_known_value (regno, XEXP (note, 0));
2539 		      set_reg_known_equiv_p (regno,
2540 			REG_NOTE_KIND (note) == REG_EQUIV);
2541 		    }
2542 		  else if (REG_N_SETS (regno) == 1
2543 			   && GET_CODE (src) == PLUS
2544 			   && REG_P (XEXP (src, 0))
2545 			   && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2546 			   && GET_CODE (XEXP (src, 1)) == CONST_INT)
2547 		    {
2548 		      t = plus_constant (t, INTVAL (XEXP (src, 1)));
2549 		      set_reg_known_value (regno, t);
2550 		      set_reg_known_equiv_p (regno, 0);
2551 		    }
2552 		  else if (REG_N_SETS (regno) == 1
2553 			   && ! rtx_varies_p (src, 1))
2554 		    {
2555 		      set_reg_known_value (regno, src);
2556 		      set_reg_known_equiv_p (regno, 0);
2557 		    }
2558 		}
2559 	    }
2560 	  else if (NOTE_P (insn)
2561 		   && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
2562 	    copying_arguments = false;
2563 	}
2564 
2565       /* Now propagate values from new_reg_base_value to reg_base_value.  */
2566       gcc_assert (maxreg == (unsigned int) max_reg_num());
2567 
2568       for (ui = 0; ui < maxreg; ui++)
2569 	{
2570 	  if (new_reg_base_value[ui]
2571 	      && new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
2572 	      && ! rtx_equal_p (new_reg_base_value[ui],
2573 				VEC_index (rtx, reg_base_value, ui)))
2574 	    {
2575 	      VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
2576 	      changed = 1;
2577 	    }
2578 	}
2579     }
2580   while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2581 
2582   /* Fill in the remaining entries.  */
2583   for (i = 0; i < (int)reg_known_value_size; i++)
2584     if (reg_known_value[i] == 0)
2585       reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
2586 
2587   /* Simplify the reg_base_value array so that no register refers to
2588      another register, except to special registers indirectly through
2589      ADDRESS expressions.
2590 
2591      In theory this loop can take as long as O(registers^2), but unless
2592      there are very long dependency chains it will run in close to linear
2593      time.
2594 
2595      This loop may not be needed any longer now that the main loop does
2596      a better job at propagating alias information.  */
2597   pass = 0;
2598   do
2599     {
2600       changed = 0;
2601       pass++;
2602       for (ui = 0; ui < maxreg; ui++)
2603 	{
2604 	  rtx base = VEC_index (rtx, reg_base_value, ui);
2605 	  if (base && REG_P (base))
2606 	    {
2607 	      unsigned int base_regno = REGNO (base);
2608 	      if (base_regno == ui)		/* register set from itself */
2609 		VEC_replace (rtx, reg_base_value, ui, 0);
2610 	      else
2611 		VEC_replace (rtx, reg_base_value, ui,
2612 			     VEC_index (rtx, reg_base_value, base_regno));
2613 	      changed = 1;
2614 	    }
2615 	}
2616     }
2617   while (changed && pass < MAX_ALIAS_LOOP_PASSES);
2618 
2619   /* Clean up.  */
2620   free (new_reg_base_value);
2621   new_reg_base_value = 0;
2622   free (reg_seen);
2623   reg_seen = 0;
2624   timevar_pop (TV_ALIAS_ANALYSIS);
2625 }
2626 
2627 void
end_alias_analysis(void)2628 end_alias_analysis (void)
2629 {
2630   old_reg_base_value = reg_base_value;
2631   ggc_free (reg_known_value);
2632   reg_known_value = 0;
2633   reg_known_value_size = 0;
2634   free (reg_known_equiv_p);
2635   reg_known_equiv_p = 0;
2636 }
2637 
2638 #include "gt-alias.h"
2639