1 /* Alias analysis for GNU C 2 Copyright (C) 1997-2018 Free Software Foundation, Inc. 3 Contributed by John Carr (jfc@mit.edu). 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "backend.h" 25 #include "target.h" 26 #include "rtl.h" 27 #include "tree.h" 28 #include "gimple.h" 29 #include "df.h" 30 #include "memmodel.h" 31 #include "tm_p.h" 32 #include "gimple-ssa.h" 33 #include "emit-rtl.h" 34 #include "alias.h" 35 #include "fold-const.h" 36 #include "varasm.h" 37 #include "cselib.h" 38 #include "langhooks.h" 39 #include "cfganal.h" 40 #include "rtl-iter.h" 41 #include "cgraph.h" 42 43 /* The aliasing API provided here solves related but different problems: 44 45 Say there exists (in c) 46 47 struct X { 48 struct Y y1; 49 struct Z z2; 50 } x1, *px1, *px2; 51 52 struct Y y2, *py; 53 struct Z z2, *pz; 54 55 56 py = &x1.y1; 57 px2 = &x1; 58 59 Consider the four questions: 60 61 Can a store to x1 interfere with px2->y1? 62 Can a store to x1 interfere with px2->z2? 63 Can a store to x1 change the value pointed to by with py? 64 Can a store to x1 change the value pointed to by with pz? 65 66 The answer to these questions can be yes, yes, yes, and maybe. 67 68 The first two questions can be answered with a simple examination 69 of the type system. If structure X contains a field of type Y then 70 a store through a pointer to an X can overwrite any field that is 71 contained (recursively) in an X (unless we know that px1 != px2). 72 73 The last two questions can be solved in the same way as the first 74 two questions but this is too conservative. The observation is 75 that in some cases we can know which (if any) fields are addressed 76 and if those addresses are used in bad ways. This analysis may be 77 language specific. In C, arbitrary operations may be applied to 78 pointers. However, there is some indication that this may be too 79 conservative for some C++ types. 80 81 The pass ipa-type-escape does this analysis for the types whose 82 instances do not escape across the compilation boundary. 83 84 Historically in GCC, these two problems were combined and a single 85 data structure that was used to represent the solution to these 86 problems. We now have two similar but different data structures, 87 The data structure to solve the last two questions is similar to 88 the first, but does not contain the fields whose address are never 89 taken. For types that do escape the compilation unit, the data 90 structures will have identical information. 91 */ 92 93 /* The alias sets assigned to MEMs assist the back-end in determining 94 which MEMs can alias which other MEMs. In general, two MEMs in 95 different alias sets cannot alias each other, with one important 96 exception. Consider something like: 97 98 struct S { int i; double d; }; 99 100 a store to an `S' can alias something of either type `int' or type 101 `double'. (However, a store to an `int' cannot alias a `double' 102 and vice versa.) We indicate this via a tree structure that looks 103 like: 104 struct S 105 / \ 106 / \ 107 |/_ _\| 108 int double 109 110 (The arrows are directed and point downwards.) 111 In this situation we say the alias set for `struct S' is the 112 `superset' and that those for `int' and `double' are `subsets'. 113 114 To see whether two alias sets can point to the same memory, we must 115 see if either alias set is a subset of the other. We need not trace 116 past immediate descendants, however, since we propagate all 117 grandchildren up one level. 118 119 Alias set zero is implicitly a superset of all other alias sets. 120 However, this is no actual entry for alias set zero. It is an 121 error to attempt to explicitly construct a subset of zero. */ 122 123 struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {}; 124 125 struct GTY(()) alias_set_entry { 126 /* The alias set number, as stored in MEM_ALIAS_SET. */ 127 alias_set_type alias_set; 128 129 /* Nonzero if would have a child of zero: this effectively makes this 130 alias set the same as alias set zero. */ 131 bool has_zero_child; 132 /* Nonzero if alias set corresponds to pointer type itself (i.e. not to 133 aggregate contaiing pointer. 134 This is used for a special case where we need an universal pointer type 135 compatible with all other pointer types. */ 136 bool is_pointer; 137 /* Nonzero if is_pointer or if one of childs have has_pointer set. */ 138 bool has_pointer; 139 140 /* The children of the alias set. These are not just the immediate 141 children, but, in fact, all descendants. So, if we have: 142 143 struct T { struct S s; float f; } 144 145 continuing our example above, the children here will be all of 146 `int', `double', `float', and `struct S'. */ 147 hash_map<alias_set_hash, int> *children; 148 }; 149 150 static int rtx_equal_for_memref_p (const_rtx, const_rtx); 151 static void record_set (rtx, const_rtx, void *); 152 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode, 153 machine_mode); 154 static rtx find_base_value (rtx); 155 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx); 156 static alias_set_entry *get_alias_set_entry (alias_set_type); 157 static tree decl_for_component_ref (tree); 158 static int write_dependence_p (const_rtx, 159 const_rtx, machine_mode, rtx, 160 bool, bool, bool); 161 static int compare_base_symbol_refs (const_rtx, const_rtx); 162 163 static void memory_modified_1 (rtx, const_rtx, void *); 164 165 /* Query statistics for the different low-level disambiguators. 166 A high-level query may trigger multiple of them. */ 167 168 static struct { 169 unsigned long long num_alias_zero; 170 unsigned long long num_same_alias_set; 171 unsigned long long num_same_objects; 172 unsigned long long num_volatile; 173 unsigned long long num_dag; 174 unsigned long long num_universal; 175 unsigned long long num_disambiguated; 176 } alias_stats; 177 178 179 /* Set up all info needed to perform alias analysis on memory references. */ 180 181 /* Returns the size in bytes of the mode of X. */ 182 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X))) 183 184 /* Cap the number of passes we make over the insns propagating alias 185 information through set chains. 186 ??? 10 is a completely arbitrary choice. This should be based on the 187 maximum loop depth in the CFG, but we do not have this information 188 available (even if current_loops _is_ available). */ 189 #define MAX_ALIAS_LOOP_PASSES 10 190 191 /* reg_base_value[N] gives an address to which register N is related. 192 If all sets after the first add or subtract to the current value 193 or otherwise modify it so it does not point to a different top level 194 object, reg_base_value[N] is equal to the address part of the source 195 of the first set. 196 197 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS 198 expressions represent three types of base: 199 200 1. incoming arguments. There is just one ADDRESS to represent all 201 arguments, since we do not know at this level whether accesses 202 based on different arguments can alias. The ADDRESS has id 0. 203 204 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx 205 (if distinct from frame_pointer_rtx) and arg_pointer_rtx. 206 Each of these rtxes has a separate ADDRESS associated with it, 207 each with a negative id. 208 209 GCC is (and is required to be) precise in which register it 210 chooses to access a particular region of stack. We can therefore 211 assume that accesses based on one of these rtxes do not alias 212 accesses based on another of these rtxes. 213 214 3. bases that are derived from malloc()ed memory (REG_NOALIAS). 215 Each such piece of memory has a separate ADDRESS associated 216 with it, each with an id greater than 0. 217 218 Accesses based on one ADDRESS do not alias accesses based on other 219 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not 220 alias globals either; the ADDRESSes have Pmode to indicate this. 221 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to 222 indicate this. */ 223 224 static GTY(()) vec<rtx, va_gc> *reg_base_value; 225 static rtx *new_reg_base_value; 226 227 /* The single VOIDmode ADDRESS that represents all argument bases. 228 It has id 0. */ 229 static GTY(()) rtx arg_base_value; 230 231 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */ 232 static int unique_id; 233 234 /* We preserve the copy of old array around to avoid amount of garbage 235 produced. About 8% of garbage produced were attributed to this 236 array. */ 237 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value; 238 239 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special 240 registers. */ 241 #define UNIQUE_BASE_VALUE_SP -1 242 #define UNIQUE_BASE_VALUE_ARGP -2 243 #define UNIQUE_BASE_VALUE_FP -3 244 #define UNIQUE_BASE_VALUE_HFP -4 245 246 #define static_reg_base_value \ 247 (this_target_rtl->x_static_reg_base_value) 248 249 #define REG_BASE_VALUE(X) \ 250 (REGNO (X) < vec_safe_length (reg_base_value) \ 251 ? (*reg_base_value)[REGNO (X)] : 0) 252 253 /* Vector indexed by N giving the initial (unchanging) value known for 254 pseudo-register N. This vector is initialized in init_alias_analysis, 255 and does not change until end_alias_analysis is called. */ 256 static GTY(()) vec<rtx, va_gc> *reg_known_value; 257 258 /* Vector recording for each reg_known_value whether it is due to a 259 REG_EQUIV note. Future passes (viz., reload) may replace the 260 pseudo with the equivalent expression and so we account for the 261 dependences that would be introduced if that happens. 262 263 The REG_EQUIV notes created in assign_parms may mention the arg 264 pointer, and there are explicit insns in the RTL that modify the 265 arg pointer. Thus we must ensure that such insns don't get 266 scheduled across each other because that would invalidate the 267 REG_EQUIV notes. One could argue that the REG_EQUIV notes are 268 wrong, but solving the problem in the scheduler will likely give 269 better code, so we do it here. */ 270 static sbitmap reg_known_equiv_p; 271 272 /* True when scanning insns from the start of the rtl to the 273 NOTE_INSN_FUNCTION_BEG note. */ 274 static bool copying_arguments; 275 276 277 /* The splay-tree used to store the various alias set entries. */ 278 static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets; 279 280 /* Build a decomposed reference object for querying the alias-oracle 281 from the MEM rtx and store it in *REF. 282 Returns false if MEM is not suitable for the alias-oracle. */ 283 284 static bool 285 ao_ref_from_mem (ao_ref *ref, const_rtx mem) 286 { 287 tree expr = MEM_EXPR (mem); 288 tree base; 289 290 if (!expr) 291 return false; 292 293 ao_ref_init (ref, expr); 294 295 /* Get the base of the reference and see if we have to reject or 296 adjust it. */ 297 base = ao_ref_base (ref); 298 if (base == NULL_TREE) 299 return false; 300 301 /* The tree oracle doesn't like bases that are neither decls 302 nor indirect references of SSA names. */ 303 if (!(DECL_P (base) 304 || (TREE_CODE (base) == MEM_REF 305 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) 306 || (TREE_CODE (base) == TARGET_MEM_REF 307 && TREE_CODE (TMR_BASE (base)) == SSA_NAME))) 308 return false; 309 310 /* If this is a reference based on a partitioned decl replace the 311 base with a MEM_REF of the pointer representative we 312 created during stack slot partitioning. */ 313 if (VAR_P (base) 314 && ! is_global_var (base) 315 && cfun->gimple_df->decls_to_pointers != NULL) 316 { 317 tree *namep = cfun->gimple_df->decls_to_pointers->get (base); 318 if (namep) 319 ref->base = build_simple_mem_ref (*namep); 320 } 321 322 ref->ref_alias_set = MEM_ALIAS_SET (mem); 323 324 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR 325 is conservative, so trust it. */ 326 if (!MEM_OFFSET_KNOWN_P (mem) 327 || !MEM_SIZE_KNOWN_P (mem)) 328 return true; 329 330 /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size 331 drop ref->ref. */ 332 if (maybe_lt (MEM_OFFSET (mem), 0) 333 || (ref->max_size_known_p () 334 && maybe_gt ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT, 335 ref->max_size))) 336 ref->ref = NULL_TREE; 337 338 /* Refine size and offset we got from analyzing MEM_EXPR by using 339 MEM_SIZE and MEM_OFFSET. */ 340 341 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT; 342 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT; 343 344 /* The MEM may extend into adjacent fields, so adjust max_size if 345 necessary. */ 346 if (ref->max_size_known_p ()) 347 ref->max_size = upper_bound (ref->max_size, ref->size); 348 349 /* If MEM_OFFSET and MEM_SIZE might get us outside of the base object of 350 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */ 351 if (MEM_EXPR (mem) != get_spill_slot_decl (false) 352 && (maybe_lt (ref->offset, 0) 353 || (DECL_P (ref->base) 354 && (DECL_SIZE (ref->base) == NULL_TREE 355 || !poly_int_tree_p (DECL_SIZE (ref->base)) 356 || maybe_lt (wi::to_poly_offset (DECL_SIZE (ref->base)), 357 ref->offset + ref->size))))) 358 return false; 359 360 return true; 361 } 362 363 /* Query the alias-oracle on whether the two memory rtx X and MEM may 364 alias. If TBAA_P is set also apply TBAA. Returns true if the 365 two rtxen may alias, false otherwise. */ 366 367 static bool 368 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p) 369 { 370 ao_ref ref1, ref2; 371 372 if (!ao_ref_from_mem (&ref1, x) 373 || !ao_ref_from_mem (&ref2, mem)) 374 return true; 375 376 return refs_may_alias_p_1 (&ref1, &ref2, 377 tbaa_p 378 && MEM_ALIAS_SET (x) != 0 379 && MEM_ALIAS_SET (mem) != 0); 380 } 381 382 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is 383 such an entry, or NULL otherwise. */ 384 385 static inline alias_set_entry * 386 get_alias_set_entry (alias_set_type alias_set) 387 { 388 return (*alias_sets)[alias_set]; 389 } 390 391 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that 392 the two MEMs cannot alias each other. */ 393 394 static inline int 395 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2) 396 { 397 return (flag_strict_aliasing 398 && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), 399 MEM_ALIAS_SET (mem2))); 400 } 401 402 /* Return true if the first alias set is a subset of the second. */ 403 404 bool 405 alias_set_subset_of (alias_set_type set1, alias_set_type set2) 406 { 407 alias_set_entry *ase2; 408 409 /* Disable TBAA oracle with !flag_strict_aliasing. */ 410 if (!flag_strict_aliasing) 411 return true; 412 413 /* Everything is a subset of the "aliases everything" set. */ 414 if (set2 == 0) 415 return true; 416 417 /* Check if set1 is a subset of set2. */ 418 ase2 = get_alias_set_entry (set2); 419 if (ase2 != 0 420 && (ase2->has_zero_child 421 || (ase2->children && ase2->children->get (set1)))) 422 return true; 423 424 /* As a special case we consider alias set of "void *" to be both subset 425 and superset of every alias set of a pointer. This extra symmetry does 426 not matter for alias_sets_conflict_p but it makes aliasing_component_refs_p 427 to return true on the following testcase: 428 429 void *ptr; 430 char **ptr2=(char **)&ptr; 431 *ptr2 = ... 432 433 Additionally if a set contains universal pointer, we consider every pointer 434 to be a subset of it, but we do not represent this explicitely - doing so 435 would require us to update transitive closure each time we introduce new 436 pointer type. This makes aliasing_component_refs_p to return true 437 on the following testcase: 438 439 struct a {void *ptr;} 440 char **ptr = (char **)&a.ptr; 441 ptr = ... 442 443 This makes void * truly universal pointer type. See pointer handling in 444 get_alias_set for more details. */ 445 if (ase2 && ase2->has_pointer) 446 { 447 alias_set_entry *ase1 = get_alias_set_entry (set1); 448 449 if (ase1 && ase1->is_pointer) 450 { 451 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node); 452 /* If one is ptr_type_node and other is pointer, then we consider 453 them subset of each other. */ 454 if (set1 == voidptr_set || set2 == voidptr_set) 455 return true; 456 /* If SET2 contains universal pointer's alias set, then we consdier 457 every (non-universal) pointer. */ 458 if (ase2->children && set1 != voidptr_set 459 && ase2->children->get (voidptr_set)) 460 return true; 461 } 462 } 463 return false; 464 } 465 466 /* Return 1 if the two specified alias sets may conflict. */ 467 468 int 469 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2) 470 { 471 alias_set_entry *ase1; 472 alias_set_entry *ase2; 473 474 /* The easy case. */ 475 if (alias_sets_must_conflict_p (set1, set2)) 476 return 1; 477 478 /* See if the first alias set is a subset of the second. */ 479 ase1 = get_alias_set_entry (set1); 480 if (ase1 != 0 481 && ase1->children && ase1->children->get (set2)) 482 { 483 ++alias_stats.num_dag; 484 return 1; 485 } 486 487 /* Now do the same, but with the alias sets reversed. */ 488 ase2 = get_alias_set_entry (set2); 489 if (ase2 != 0 490 && ase2->children && ase2->children->get (set1)) 491 { 492 ++alias_stats.num_dag; 493 return 1; 494 } 495 496 /* We want void * to be compatible with any other pointer without 497 really dropping it to alias set 0. Doing so would make it 498 compatible with all non-pointer types too. 499 500 This is not strictly necessary by the C/C++ language 501 standards, but avoids common type punning mistakes. In 502 addition to that, we need the existence of such universal 503 pointer to implement Fortran's C_PTR type (which is defined as 504 type compatible with all C pointers). */ 505 if (ase1 && ase2 && ase1->has_pointer && ase2->has_pointer) 506 { 507 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node); 508 509 /* If one of the sets corresponds to universal pointer, 510 we consider it to conflict with anything that is 511 or contains pointer. */ 512 if (set1 == voidptr_set || set2 == voidptr_set) 513 { 514 ++alias_stats.num_universal; 515 return true; 516 } 517 /* If one of sets is (non-universal) pointer and the other 518 contains universal pointer, we also get conflict. */ 519 if (ase1->is_pointer && set2 != voidptr_set 520 && ase2->children && ase2->children->get (voidptr_set)) 521 { 522 ++alias_stats.num_universal; 523 return true; 524 } 525 if (ase2->is_pointer && set1 != voidptr_set 526 && ase1->children && ase1->children->get (voidptr_set)) 527 { 528 ++alias_stats.num_universal; 529 return true; 530 } 531 } 532 533 ++alias_stats.num_disambiguated; 534 535 /* The two alias sets are distinct and neither one is the 536 child of the other. Therefore, they cannot conflict. */ 537 return 0; 538 } 539 540 /* Return 1 if the two specified alias sets will always conflict. */ 541 542 int 543 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2) 544 { 545 /* Disable TBAA oracle with !flag_strict_aliasing. */ 546 if (!flag_strict_aliasing) 547 return 1; 548 if (set1 == 0 || set2 == 0) 549 { 550 ++alias_stats.num_alias_zero; 551 return 1; 552 } 553 if (set1 == set2) 554 { 555 ++alias_stats.num_same_alias_set; 556 return 1; 557 } 558 559 return 0; 560 } 561 562 /* Return 1 if any MEM object of type T1 will always conflict (using the 563 dependency routines in this file) with any MEM object of type T2. 564 This is used when allocating temporary storage. If T1 and/or T2 are 565 NULL_TREE, it means we know nothing about the storage. */ 566 567 int 568 objects_must_conflict_p (tree t1, tree t2) 569 { 570 alias_set_type set1, set2; 571 572 /* If neither has a type specified, we don't know if they'll conflict 573 because we may be using them to store objects of various types, for 574 example the argument and local variables areas of inlined functions. */ 575 if (t1 == 0 && t2 == 0) 576 return 0; 577 578 /* If they are the same type, they must conflict. */ 579 if (t1 == t2) 580 { 581 ++alias_stats.num_same_objects; 582 return 1; 583 } 584 /* Likewise if both are volatile. */ 585 if (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)) 586 { 587 ++alias_stats.num_volatile; 588 return 1; 589 } 590 591 set1 = t1 ? get_alias_set (t1) : 0; 592 set2 = t2 ? get_alias_set (t2) : 0; 593 594 /* We can't use alias_sets_conflict_p because we must make sure 595 that every subtype of t1 will conflict with every subtype of 596 t2 for which a pair of subobjects of these respective subtypes 597 overlaps on the stack. */ 598 return alias_sets_must_conflict_p (set1, set2); 599 } 600 601 /* Return the outermost parent of component present in the chain of 602 component references handled by get_inner_reference in T with the 603 following property: 604 - the component is non-addressable, or 605 - the parent has alias set zero, 606 or NULL_TREE if no such parent exists. In the former cases, the alias 607 set of this parent is the alias set that must be used for T itself. */ 608 609 tree 610 component_uses_parent_alias_set_from (const_tree t) 611 { 612 const_tree found = NULL_TREE; 613 614 if (AGGREGATE_TYPE_P (TREE_TYPE (t)) 615 && TYPE_TYPELESS_STORAGE (TREE_TYPE (t))) 616 return const_cast <tree> (t); 617 618 while (handled_component_p (t)) 619 { 620 switch (TREE_CODE (t)) 621 { 622 case COMPONENT_REF: 623 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1))) 624 found = t; 625 /* Permit type-punning when accessing a union, provided the access 626 is directly through the union. For example, this code does not 627 permit taking the address of a union member and then storing 628 through it. Even the type-punning allowed here is a GCC 629 extension, albeit a common and useful one; the C standard says 630 that such accesses have implementation-defined behavior. */ 631 else if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) 632 found = t; 633 break; 634 635 case ARRAY_REF: 636 case ARRAY_RANGE_REF: 637 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0)))) 638 found = t; 639 break; 640 641 case REALPART_EXPR: 642 case IMAGPART_EXPR: 643 break; 644 645 case BIT_FIELD_REF: 646 case VIEW_CONVERT_EXPR: 647 /* Bitfields and casts are never addressable. */ 648 found = t; 649 break; 650 651 default: 652 gcc_unreachable (); 653 } 654 655 if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0) 656 found = t; 657 658 t = TREE_OPERAND (t, 0); 659 } 660 661 if (found) 662 return TREE_OPERAND (found, 0); 663 664 return NULL_TREE; 665 } 666 667 668 /* Return whether the pointer-type T effective for aliasing may 669 access everything and thus the reference has to be assigned 670 alias-set zero. */ 671 672 static bool 673 ref_all_alias_ptr_type_p (const_tree t) 674 { 675 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE 676 || TYPE_REF_CAN_ALIAS_ALL (t)); 677 } 678 679 /* Return the alias set for the memory pointed to by T, which may be 680 either a type or an expression. Return -1 if there is nothing 681 special about dereferencing T. */ 682 683 static alias_set_type 684 get_deref_alias_set_1 (tree t) 685 { 686 /* All we care about is the type. */ 687 if (! TYPE_P (t)) 688 t = TREE_TYPE (t); 689 690 /* If we have an INDIRECT_REF via a void pointer, we don't 691 know anything about what that might alias. Likewise if the 692 pointer is marked that way. */ 693 if (ref_all_alias_ptr_type_p (t)) 694 return 0; 695 696 return -1; 697 } 698 699 /* Return the alias set for the memory pointed to by T, which may be 700 either a type or an expression. */ 701 702 alias_set_type 703 get_deref_alias_set (tree t) 704 { 705 /* If we're not doing any alias analysis, just assume everything 706 aliases everything else. */ 707 if (!flag_strict_aliasing) 708 return 0; 709 710 alias_set_type set = get_deref_alias_set_1 (t); 711 712 /* Fall back to the alias-set of the pointed-to type. */ 713 if (set == -1) 714 { 715 if (! TYPE_P (t)) 716 t = TREE_TYPE (t); 717 set = get_alias_set (TREE_TYPE (t)); 718 } 719 720 return set; 721 } 722 723 /* Return the pointer-type relevant for TBAA purposes from the 724 memory reference tree *T or NULL_TREE in which case *T is 725 adjusted to point to the outermost component reference that 726 can be used for assigning an alias set. */ 727 728 static tree 729 reference_alias_ptr_type_1 (tree *t) 730 { 731 tree inner; 732 733 /* Get the base object of the reference. */ 734 inner = *t; 735 while (handled_component_p (inner)) 736 { 737 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use 738 the type of any component references that wrap it to 739 determine the alias-set. */ 740 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR) 741 *t = TREE_OPERAND (inner, 0); 742 inner = TREE_OPERAND (inner, 0); 743 } 744 745 /* Handle pointer dereferences here, they can override the 746 alias-set. */ 747 if (INDIRECT_REF_P (inner) 748 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0)))) 749 return TREE_TYPE (TREE_OPERAND (inner, 0)); 750 else if (TREE_CODE (inner) == TARGET_MEM_REF) 751 return TREE_TYPE (TMR_OFFSET (inner)); 752 else if (TREE_CODE (inner) == MEM_REF 753 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1)))) 754 return TREE_TYPE (TREE_OPERAND (inner, 1)); 755 756 /* If the innermost reference is a MEM_REF that has a 757 conversion embedded treat it like a VIEW_CONVERT_EXPR above, 758 using the memory access type for determining the alias-set. */ 759 if (TREE_CODE (inner) == MEM_REF 760 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner)) 761 != TYPE_MAIN_VARIANT 762 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1)))))) 763 return TREE_TYPE (TREE_OPERAND (inner, 1)); 764 765 /* Otherwise, pick up the outermost object that we could have 766 a pointer to. */ 767 tree tem = component_uses_parent_alias_set_from (*t); 768 if (tem) 769 *t = tem; 770 771 return NULL_TREE; 772 } 773 774 /* Return the pointer-type relevant for TBAA purposes from the 775 gimple memory reference tree T. This is the type to be used for 776 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T 777 and guarantees that get_alias_set will return the same alias 778 set for T and the replacement. */ 779 780 tree 781 reference_alias_ptr_type (tree t) 782 { 783 /* If the frontend assigns this alias-set zero, preserve that. */ 784 if (lang_hooks.get_alias_set (t) == 0) 785 return ptr_type_node; 786 787 tree ptype = reference_alias_ptr_type_1 (&t); 788 /* If there is a given pointer type for aliasing purposes, return it. */ 789 if (ptype != NULL_TREE) 790 return ptype; 791 792 /* Otherwise build one from the outermost component reference we 793 may use. */ 794 if (TREE_CODE (t) == MEM_REF 795 || TREE_CODE (t) == TARGET_MEM_REF) 796 return TREE_TYPE (TREE_OPERAND (t, 1)); 797 else 798 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t))); 799 } 800 801 /* Return whether the pointer-types T1 and T2 used to determine 802 two alias sets of two references will yield the same answer 803 from get_deref_alias_set. */ 804 805 bool 806 alias_ptr_types_compatible_p (tree t1, tree t2) 807 { 808 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2)) 809 return true; 810 811 if (ref_all_alias_ptr_type_p (t1) 812 || ref_all_alias_ptr_type_p (t2)) 813 return false; 814 815 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1)) 816 == TYPE_MAIN_VARIANT (TREE_TYPE (t2))); 817 } 818 819 /* Create emptry alias set entry. */ 820 821 alias_set_entry * 822 init_alias_set_entry (alias_set_type set) 823 { 824 alias_set_entry *ase = ggc_alloc<alias_set_entry> (); 825 ase->alias_set = set; 826 ase->children = NULL; 827 ase->has_zero_child = false; 828 ase->is_pointer = false; 829 ase->has_pointer = false; 830 gcc_checking_assert (!get_alias_set_entry (set)); 831 (*alias_sets)[set] = ase; 832 return ase; 833 } 834 835 /* Return the alias set for T, which may be either a type or an 836 expression. Call language-specific routine for help, if needed. */ 837 838 alias_set_type 839 get_alias_set (tree t) 840 { 841 alias_set_type set; 842 843 /* We can not give up with -fno-strict-aliasing because we need to build 844 proper type representation for possible functions which are build with 845 -fstrict-aliasing. */ 846 847 /* return 0 if this or its type is an error. */ 848 if (t == error_mark_node 849 || (! TYPE_P (t) 850 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node))) 851 return 0; 852 853 /* We can be passed either an expression or a type. This and the 854 language-specific routine may make mutually-recursive calls to each other 855 to figure out what to do. At each juncture, we see if this is a tree 856 that the language may need to handle specially. First handle things that 857 aren't types. */ 858 if (! TYPE_P (t)) 859 { 860 /* Give the language a chance to do something with this tree 861 before we look at it. */ 862 STRIP_NOPS (t); 863 set = lang_hooks.get_alias_set (t); 864 if (set != -1) 865 return set; 866 867 /* Get the alias pointer-type to use or the outermost object 868 that we could have a pointer to. */ 869 tree ptype = reference_alias_ptr_type_1 (&t); 870 if (ptype != NULL) 871 return get_deref_alias_set (ptype); 872 873 /* If we've already determined the alias set for a decl, just return 874 it. This is necessary for C++ anonymous unions, whose component 875 variables don't look like union members (boo!). */ 876 if (VAR_P (t) 877 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t))) 878 return MEM_ALIAS_SET (DECL_RTL (t)); 879 880 /* Now all we care about is the type. */ 881 t = TREE_TYPE (t); 882 } 883 884 /* Variant qualifiers don't affect the alias set, so get the main 885 variant. */ 886 t = TYPE_MAIN_VARIANT (t); 887 888 if (AGGREGATE_TYPE_P (t) 889 && TYPE_TYPELESS_STORAGE (t)) 890 return 0; 891 892 /* Always use the canonical type as well. If this is a type that 893 requires structural comparisons to identify compatible types 894 use alias set zero. */ 895 if (TYPE_STRUCTURAL_EQUALITY_P (t)) 896 { 897 /* Allow the language to specify another alias set for this 898 type. */ 899 set = lang_hooks.get_alias_set (t); 900 if (set != -1) 901 return set; 902 /* Handle structure type equality for pointer types, arrays and vectors. 903 This is easy to do, because the code bellow ignore canonical types on 904 these anyway. This is important for LTO, where TYPE_CANONICAL for 905 pointers can not be meaningfuly computed by the frotnend. */ 906 if (canonical_type_used_p (t)) 907 { 908 /* In LTO we set canonical types for all types where it makes 909 sense to do so. Double check we did not miss some type. */ 910 gcc_checking_assert (!in_lto_p || !type_with_alias_set_p (t)); 911 return 0; 912 } 913 } 914 else 915 { 916 t = TYPE_CANONICAL (t); 917 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t)); 918 } 919 920 /* If this is a type with a known alias set, return it. */ 921 gcc_checking_assert (t == TYPE_MAIN_VARIANT (t)); 922 if (TYPE_ALIAS_SET_KNOWN_P (t)) 923 return TYPE_ALIAS_SET (t); 924 925 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */ 926 if (!COMPLETE_TYPE_P (t)) 927 { 928 /* For arrays with unknown size the conservative answer is the 929 alias set of the element type. */ 930 if (TREE_CODE (t) == ARRAY_TYPE) 931 return get_alias_set (TREE_TYPE (t)); 932 933 /* But return zero as a conservative answer for incomplete types. */ 934 return 0; 935 } 936 937 /* See if the language has special handling for this type. */ 938 set = lang_hooks.get_alias_set (t); 939 if (set != -1) 940 return set; 941 942 /* There are no objects of FUNCTION_TYPE, so there's no point in 943 using up an alias set for them. (There are, of course, pointers 944 and references to functions, but that's different.) */ 945 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE) 946 set = 0; 947 948 /* Unless the language specifies otherwise, let vector types alias 949 their components. This avoids some nasty type punning issues in 950 normal usage. And indeed lets vectors be treated more like an 951 array slice. */ 952 else if (TREE_CODE (t) == VECTOR_TYPE) 953 set = get_alias_set (TREE_TYPE (t)); 954 955 /* Unless the language specifies otherwise, treat array types the 956 same as their components. This avoids the asymmetry we get 957 through recording the components. Consider accessing a 958 character(kind=1) through a reference to a character(kind=1)[1:1]. 959 Or consider if we want to assign integer(kind=4)[0:D.1387] and 960 integer(kind=4)[4] the same alias set or not. 961 Just be pragmatic here and make sure the array and its element 962 type get the same alias set assigned. */ 963 else if (TREE_CODE (t) == ARRAY_TYPE 964 && (!TYPE_NONALIASED_COMPONENT (t) 965 || TYPE_STRUCTURAL_EQUALITY_P (t))) 966 set = get_alias_set (TREE_TYPE (t)); 967 968 /* From the former common C and C++ langhook implementation: 969 970 Unfortunately, there is no canonical form of a pointer type. 971 In particular, if we have `typedef int I', then `int *', and 972 `I *' are different types. So, we have to pick a canonical 973 representative. We do this below. 974 975 Technically, this approach is actually more conservative that 976 it needs to be. In particular, `const int *' and `int *' 977 should be in different alias sets, according to the C and C++ 978 standard, since their types are not the same, and so, 979 technically, an `int **' and `const int **' cannot point at 980 the same thing. 981 982 But, the standard is wrong. In particular, this code is 983 legal C++: 984 985 int *ip; 986 int **ipp = &ip; 987 const int* const* cipp = ipp; 988 And, it doesn't make sense for that to be legal unless you 989 can dereference IPP and CIPP. So, we ignore cv-qualifiers on 990 the pointed-to types. This issue has been reported to the 991 C++ committee. 992 993 For this reason go to canonical type of the unqalified pointer type. 994 Until GCC 6 this code set all pointers sets to have alias set of 995 ptr_type_node but that is a bad idea, because it prevents disabiguations 996 in between pointers. For Firefox this accounts about 20% of all 997 disambiguations in the program. */ 998 else if (POINTER_TYPE_P (t) && t != ptr_type_node) 999 { 1000 tree p; 1001 auto_vec <bool, 8> reference; 1002 1003 /* Unnest all pointers and references. 1004 We also want to make pointer to array/vector equivalent to pointer to 1005 its element (see the reasoning above). Skip all those types, too. */ 1006 for (p = t; POINTER_TYPE_P (p) 1007 || (TREE_CODE (p) == ARRAY_TYPE 1008 && (!TYPE_NONALIASED_COMPONENT (p) 1009 || !COMPLETE_TYPE_P (p) 1010 || TYPE_STRUCTURAL_EQUALITY_P (p))) 1011 || TREE_CODE (p) == VECTOR_TYPE; 1012 p = TREE_TYPE (p)) 1013 { 1014 /* Ada supports recusive pointers. Instead of doing recrusion check 1015 just give up once the preallocated space of 8 elements is up. 1016 In this case just punt to void * alias set. */ 1017 if (reference.length () == 8) 1018 { 1019 p = ptr_type_node; 1020 break; 1021 } 1022 if (TREE_CODE (p) == REFERENCE_TYPE) 1023 /* In LTO we want languages that use references to be compatible 1024 with languages that use pointers. */ 1025 reference.safe_push (true && !in_lto_p); 1026 if (TREE_CODE (p) == POINTER_TYPE) 1027 reference.safe_push (false); 1028 } 1029 p = TYPE_MAIN_VARIANT (p); 1030 1031 /* Make void * compatible with char * and also void **. 1032 Programs are commonly violating TBAA by this. 1033 1034 We also make void * to conflict with every pointer 1035 (see record_component_aliases) and thus it is safe it to use it for 1036 pointers to types with TYPE_STRUCTURAL_EQUALITY_P. */ 1037 if (TREE_CODE (p) == VOID_TYPE || TYPE_STRUCTURAL_EQUALITY_P (p)) 1038 set = get_alias_set (ptr_type_node); 1039 else 1040 { 1041 /* Rebuild pointer type starting from canonical types using 1042 unqualified pointers and references only. This way all such 1043 pointers will have the same alias set and will conflict with 1044 each other. 1045 1046 Most of time we already have pointers or references of a given type. 1047 If not we build new one just to be sure that if someone later 1048 (probably only middle-end can, as we should assign all alias 1049 classes only after finishing translation unit) builds the pointer 1050 type, the canonical type will match. */ 1051 p = TYPE_CANONICAL (p); 1052 while (!reference.is_empty ()) 1053 { 1054 if (reference.pop ()) 1055 p = build_reference_type (p); 1056 else 1057 p = build_pointer_type (p); 1058 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p)); 1059 /* build_pointer_type should always return the canonical type. 1060 For LTO TYPE_CANOINCAL may be NULL, because we do not compute 1061 them. Be sure that frontends do not glob canonical types of 1062 pointers in unexpected way and that p == TYPE_CANONICAL (p) 1063 in all other cases. */ 1064 gcc_checking_assert (!TYPE_CANONICAL (p) 1065 || p == TYPE_CANONICAL (p)); 1066 } 1067 1068 /* Assign the alias set to both p and t. 1069 We can not call get_alias_set (p) here as that would trigger 1070 infinite recursion when p == t. In other cases it would just 1071 trigger unnecesary legwork of rebuilding the pointer again. */ 1072 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p)); 1073 if (TYPE_ALIAS_SET_KNOWN_P (p)) 1074 set = TYPE_ALIAS_SET (p); 1075 else 1076 { 1077 set = new_alias_set (); 1078 TYPE_ALIAS_SET (p) = set; 1079 } 1080 } 1081 } 1082 /* Alias set of ptr_type_node is special and serve as universal pointer which 1083 is TBAA compatible with every other pointer type. Be sure we have the 1084 alias set built even for LTO which otherwise keeps all TYPE_CANONICAL 1085 of pointer types NULL. */ 1086 else if (t == ptr_type_node) 1087 set = new_alias_set (); 1088 1089 /* Otherwise make a new alias set for this type. */ 1090 else 1091 { 1092 /* Each canonical type gets its own alias set, so canonical types 1093 shouldn't form a tree. It doesn't really matter for types 1094 we handle specially above, so only check it where it possibly 1095 would result in a bogus alias set. */ 1096 gcc_checking_assert (TYPE_CANONICAL (t) == t); 1097 1098 set = new_alias_set (); 1099 } 1100 1101 TYPE_ALIAS_SET (t) = set; 1102 1103 /* If this is an aggregate type or a complex type, we must record any 1104 component aliasing information. */ 1105 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE) 1106 record_component_aliases (t); 1107 1108 /* We treat pointer types specially in alias_set_subset_of. */ 1109 if (POINTER_TYPE_P (t) && set) 1110 { 1111 alias_set_entry *ase = get_alias_set_entry (set); 1112 if (!ase) 1113 ase = init_alias_set_entry (set); 1114 ase->is_pointer = true; 1115 ase->has_pointer = true; 1116 } 1117 1118 return set; 1119 } 1120 1121 /* Return a brand-new alias set. */ 1122 1123 alias_set_type 1124 new_alias_set (void) 1125 { 1126 if (alias_sets == 0) 1127 vec_safe_push (alias_sets, (alias_set_entry *) NULL); 1128 vec_safe_push (alias_sets, (alias_set_entry *) NULL); 1129 return alias_sets->length () - 1; 1130 } 1131 1132 /* Indicate that things in SUBSET can alias things in SUPERSET, but that 1133 not everything that aliases SUPERSET also aliases SUBSET. For example, 1134 in C, a store to an `int' can alias a load of a structure containing an 1135 `int', and vice versa. But it can't alias a load of a 'double' member 1136 of the same structure. Here, the structure would be the SUPERSET and 1137 `int' the SUBSET. This relationship is also described in the comment at 1138 the beginning of this file. 1139 1140 This function should be called only once per SUPERSET/SUBSET pair. 1141 1142 It is illegal for SUPERSET to be zero; everything is implicitly a 1143 subset of alias set zero. */ 1144 1145 void 1146 record_alias_subset (alias_set_type superset, alias_set_type subset) 1147 { 1148 alias_set_entry *superset_entry; 1149 alias_set_entry *subset_entry; 1150 1151 /* It is possible in complex type situations for both sets to be the same, 1152 in which case we can ignore this operation. */ 1153 if (superset == subset) 1154 return; 1155 1156 gcc_assert (superset); 1157 1158 superset_entry = get_alias_set_entry (superset); 1159 if (superset_entry == 0) 1160 { 1161 /* Create an entry for the SUPERSET, so that we have a place to 1162 attach the SUBSET. */ 1163 superset_entry = init_alias_set_entry (superset); 1164 } 1165 1166 if (subset == 0) 1167 superset_entry->has_zero_child = 1; 1168 else 1169 { 1170 subset_entry = get_alias_set_entry (subset); 1171 if (!superset_entry->children) 1172 superset_entry->children 1173 = hash_map<alias_set_hash, int>::create_ggc (64); 1174 /* If there is an entry for the subset, enter all of its children 1175 (if they are not already present) as children of the SUPERSET. */ 1176 if (subset_entry) 1177 { 1178 if (subset_entry->has_zero_child) 1179 superset_entry->has_zero_child = true; 1180 if (subset_entry->has_pointer) 1181 superset_entry->has_pointer = true; 1182 1183 if (subset_entry->children) 1184 { 1185 hash_map<alias_set_hash, int>::iterator iter 1186 = subset_entry->children->begin (); 1187 for (; iter != subset_entry->children->end (); ++iter) 1188 superset_entry->children->put ((*iter).first, (*iter).second); 1189 } 1190 } 1191 1192 /* Enter the SUBSET itself as a child of the SUPERSET. */ 1193 superset_entry->children->put (subset, 0); 1194 } 1195 } 1196 1197 /* Record that component types of TYPE, if any, are part of that type for 1198 aliasing purposes. For record types, we only record component types 1199 for fields that are not marked non-addressable. For array types, we 1200 only record the component type if it is not marked non-aliased. */ 1201 1202 void 1203 record_component_aliases (tree type) 1204 { 1205 alias_set_type superset = get_alias_set (type); 1206 tree field; 1207 1208 if (superset == 0) 1209 return; 1210 1211 switch (TREE_CODE (type)) 1212 { 1213 case RECORD_TYPE: 1214 case UNION_TYPE: 1215 case QUAL_UNION_TYPE: 1216 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field)) 1217 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field)) 1218 { 1219 /* LTO type merging does not make any difference between 1220 component pointer types. We may have 1221 1222 struct foo {int *a;}; 1223 1224 as TYPE_CANONICAL of 1225 1226 struct bar {float *a;}; 1227 1228 Because accesses to int * and float * do not alias, we would get 1229 false negative when accessing the same memory location by 1230 float ** and bar *. We thus record the canonical type as: 1231 1232 struct {void *a;}; 1233 1234 void * is special cased and works as a universal pointer type. 1235 Accesses to it conflicts with accesses to any other pointer 1236 type. */ 1237 tree t = TREE_TYPE (field); 1238 if (in_lto_p) 1239 { 1240 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their 1241 element type and that type has to be normalized to void *, 1242 too, in the case it is a pointer. */ 1243 while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t)) 1244 { 1245 gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t)); 1246 t = TREE_TYPE (t); 1247 } 1248 if (POINTER_TYPE_P (t)) 1249 t = ptr_type_node; 1250 else if (flag_checking) 1251 gcc_checking_assert (get_alias_set (t) 1252 == get_alias_set (TREE_TYPE (field))); 1253 } 1254 1255 record_alias_subset (superset, get_alias_set (t)); 1256 } 1257 break; 1258 1259 case COMPLEX_TYPE: 1260 record_alias_subset (superset, get_alias_set (TREE_TYPE (type))); 1261 break; 1262 1263 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their 1264 element type. */ 1265 1266 default: 1267 break; 1268 } 1269 } 1270 1271 /* Allocate an alias set for use in storing and reading from the varargs 1272 spill area. */ 1273 1274 static GTY(()) alias_set_type varargs_set = -1; 1275 1276 alias_set_type 1277 get_varargs_alias_set (void) 1278 { 1279 #if 1 1280 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the 1281 varargs alias set to an INDIRECT_REF (FIXME!), so we can't 1282 consistently use the varargs alias set for loads from the varargs 1283 area. So don't use it anywhere. */ 1284 return 0; 1285 #else 1286 if (varargs_set == -1) 1287 varargs_set = new_alias_set (); 1288 1289 return varargs_set; 1290 #endif 1291 } 1292 1293 /* Likewise, but used for the fixed portions of the frame, e.g., register 1294 save areas. */ 1295 1296 static GTY(()) alias_set_type frame_set = -1; 1297 1298 alias_set_type 1299 get_frame_alias_set (void) 1300 { 1301 if (frame_set == -1) 1302 frame_set = new_alias_set (); 1303 1304 return frame_set; 1305 } 1306 1307 /* Create a new, unique base with id ID. */ 1308 1309 static rtx 1310 unique_base_value (HOST_WIDE_INT id) 1311 { 1312 return gen_rtx_ADDRESS (Pmode, id); 1313 } 1314 1315 /* Return true if accesses based on any other base value cannot alias 1316 those based on X. */ 1317 1318 static bool 1319 unique_base_value_p (rtx x) 1320 { 1321 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode; 1322 } 1323 1324 /* Return true if X is known to be a base value. */ 1325 1326 static bool 1327 known_base_value_p (rtx x) 1328 { 1329 switch (GET_CODE (x)) 1330 { 1331 case LABEL_REF: 1332 case SYMBOL_REF: 1333 return true; 1334 1335 case ADDRESS: 1336 /* Arguments may or may not be bases; we don't know for sure. */ 1337 return GET_MODE (x) != VOIDmode; 1338 1339 default: 1340 return false; 1341 } 1342 } 1343 1344 /* Inside SRC, the source of a SET, find a base address. */ 1345 1346 static rtx 1347 find_base_value (rtx src) 1348 { 1349 unsigned int regno; 1350 scalar_int_mode int_mode; 1351 1352 #if defined (FIND_BASE_TERM) 1353 /* Try machine-dependent ways to find the base term. */ 1354 src = FIND_BASE_TERM (src); 1355 #endif 1356 1357 switch (GET_CODE (src)) 1358 { 1359 case SYMBOL_REF: 1360 case LABEL_REF: 1361 return src; 1362 1363 case REG: 1364 regno = REGNO (src); 1365 /* At the start of a function, argument registers have known base 1366 values which may be lost later. Returning an ADDRESS 1367 expression here allows optimization based on argument values 1368 even when the argument registers are used for other purposes. */ 1369 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments) 1370 return new_reg_base_value[regno]; 1371 1372 /* If a pseudo has a known base value, return it. Do not do this 1373 for non-fixed hard regs since it can result in a circular 1374 dependency chain for registers which have values at function entry. 1375 1376 The test above is not sufficient because the scheduler may move 1377 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */ 1378 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno]) 1379 && regno < vec_safe_length (reg_base_value)) 1380 { 1381 /* If we're inside init_alias_analysis, use new_reg_base_value 1382 to reduce the number of relaxation iterations. */ 1383 if (new_reg_base_value && new_reg_base_value[regno] 1384 && DF_REG_DEF_COUNT (regno) == 1) 1385 return new_reg_base_value[regno]; 1386 1387 if ((*reg_base_value)[regno]) 1388 return (*reg_base_value)[regno]; 1389 } 1390 1391 return 0; 1392 1393 case MEM: 1394 /* Check for an argument passed in memory. Only record in the 1395 copying-arguments block; it is too hard to track changes 1396 otherwise. */ 1397 if (copying_arguments 1398 && (XEXP (src, 0) == arg_pointer_rtx 1399 || (GET_CODE (XEXP (src, 0)) == PLUS 1400 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx))) 1401 return arg_base_value; 1402 return 0; 1403 1404 case CONST: 1405 src = XEXP (src, 0); 1406 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS) 1407 break; 1408 1409 /* fall through */ 1410 1411 case PLUS: 1412 case MINUS: 1413 { 1414 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1); 1415 1416 /* If either operand is a REG that is a known pointer, then it 1417 is the base. */ 1418 if (REG_P (src_0) && REG_POINTER (src_0)) 1419 return find_base_value (src_0); 1420 if (REG_P (src_1) && REG_POINTER (src_1)) 1421 return find_base_value (src_1); 1422 1423 /* If either operand is a REG, then see if we already have 1424 a known value for it. */ 1425 if (REG_P (src_0)) 1426 { 1427 temp = find_base_value (src_0); 1428 if (temp != 0) 1429 src_0 = temp; 1430 } 1431 1432 if (REG_P (src_1)) 1433 { 1434 temp = find_base_value (src_1); 1435 if (temp!= 0) 1436 src_1 = temp; 1437 } 1438 1439 /* If either base is named object or a special address 1440 (like an argument or stack reference), then use it for the 1441 base term. */ 1442 if (src_0 != 0 && known_base_value_p (src_0)) 1443 return src_0; 1444 1445 if (src_1 != 0 && known_base_value_p (src_1)) 1446 return src_1; 1447 1448 /* Guess which operand is the base address: 1449 If either operand is a symbol, then it is the base. If 1450 either operand is a CONST_INT, then the other is the base. */ 1451 if (CONST_INT_P (src_1) || CONSTANT_P (src_0)) 1452 return find_base_value (src_0); 1453 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1)) 1454 return find_base_value (src_1); 1455 1456 return 0; 1457 } 1458 1459 case LO_SUM: 1460 /* The standard form is (lo_sum reg sym) so look only at the 1461 second operand. */ 1462 return find_base_value (XEXP (src, 1)); 1463 1464 case AND: 1465 /* If the second operand is constant set the base 1466 address to the first operand. */ 1467 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0) 1468 return find_base_value (XEXP (src, 0)); 1469 return 0; 1470 1471 case TRUNCATE: 1472 /* As we do not know which address space the pointer is referring to, we can 1473 handle this only if the target does not support different pointer or 1474 address modes depending on the address space. */ 1475 if (!target_default_pointer_address_modes_p ()) 1476 break; 1477 if (!is_a <scalar_int_mode> (GET_MODE (src), &int_mode) 1478 || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode)) 1479 break; 1480 /* Fall through. */ 1481 case HIGH: 1482 case PRE_INC: 1483 case PRE_DEC: 1484 case POST_INC: 1485 case POST_DEC: 1486 case PRE_MODIFY: 1487 case POST_MODIFY: 1488 return find_base_value (XEXP (src, 0)); 1489 1490 case ZERO_EXTEND: 1491 case SIGN_EXTEND: /* used for NT/Alpha pointers */ 1492 /* As we do not know which address space the pointer is referring to, we can 1493 handle this only if the target does not support different pointer or 1494 address modes depending on the address space. */ 1495 if (!target_default_pointer_address_modes_p ()) 1496 break; 1497 1498 { 1499 rtx temp = find_base_value (XEXP (src, 0)); 1500 1501 if (temp != 0 && CONSTANT_P (temp)) 1502 temp = convert_memory_address (Pmode, temp); 1503 1504 return temp; 1505 } 1506 1507 default: 1508 break; 1509 } 1510 1511 return 0; 1512 } 1513 1514 /* Called from init_alias_analysis indirectly through note_stores, 1515 or directly if DEST is a register with a REG_NOALIAS note attached. 1516 SET is null in the latter case. */ 1517 1518 /* While scanning insns to find base values, reg_seen[N] is nonzero if 1519 register N has been set in this function. */ 1520 static sbitmap reg_seen; 1521 1522 static void 1523 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED) 1524 { 1525 unsigned regno; 1526 rtx src; 1527 int n; 1528 1529 if (!REG_P (dest)) 1530 return; 1531 1532 regno = REGNO (dest); 1533 1534 gcc_checking_assert (regno < reg_base_value->length ()); 1535 1536 n = REG_NREGS (dest); 1537 if (n != 1) 1538 { 1539 while (--n >= 0) 1540 { 1541 bitmap_set_bit (reg_seen, regno + n); 1542 new_reg_base_value[regno + n] = 0; 1543 } 1544 return; 1545 } 1546 1547 if (set) 1548 { 1549 /* A CLOBBER wipes out any old value but does not prevent a previously 1550 unset register from acquiring a base address (i.e. reg_seen is not 1551 set). */ 1552 if (GET_CODE (set) == CLOBBER) 1553 { 1554 new_reg_base_value[regno] = 0; 1555 return; 1556 } 1557 src = SET_SRC (set); 1558 } 1559 else 1560 { 1561 /* There's a REG_NOALIAS note against DEST. */ 1562 if (bitmap_bit_p (reg_seen, regno)) 1563 { 1564 new_reg_base_value[regno] = 0; 1565 return; 1566 } 1567 bitmap_set_bit (reg_seen, regno); 1568 new_reg_base_value[regno] = unique_base_value (unique_id++); 1569 return; 1570 } 1571 1572 /* If this is not the first set of REGNO, see whether the new value 1573 is related to the old one. There are two cases of interest: 1574 1575 (1) The register might be assigned an entirely new value 1576 that has the same base term as the original set. 1577 1578 (2) The set might be a simple self-modification that 1579 cannot change REGNO's base value. 1580 1581 If neither case holds, reject the original base value as invalid. 1582 Note that the following situation is not detected: 1583 1584 extern int x, y; int *p = &x; p += (&y-&x); 1585 1586 ANSI C does not allow computing the difference of addresses 1587 of distinct top level objects. */ 1588 if (new_reg_base_value[regno] != 0 1589 && find_base_value (src) != new_reg_base_value[regno]) 1590 switch (GET_CODE (src)) 1591 { 1592 case LO_SUM: 1593 case MINUS: 1594 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest) 1595 new_reg_base_value[regno] = 0; 1596 break; 1597 case PLUS: 1598 /* If the value we add in the PLUS is also a valid base value, 1599 this might be the actual base value, and the original value 1600 an index. */ 1601 { 1602 rtx other = NULL_RTX; 1603 1604 if (XEXP (src, 0) == dest) 1605 other = XEXP (src, 1); 1606 else if (XEXP (src, 1) == dest) 1607 other = XEXP (src, 0); 1608 1609 if (! other || find_base_value (other)) 1610 new_reg_base_value[regno] = 0; 1611 break; 1612 } 1613 case AND: 1614 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1))) 1615 new_reg_base_value[regno] = 0; 1616 break; 1617 default: 1618 new_reg_base_value[regno] = 0; 1619 break; 1620 } 1621 /* If this is the first set of a register, record the value. */ 1622 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno]) 1623 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0) 1624 new_reg_base_value[regno] = find_base_value (src); 1625 1626 bitmap_set_bit (reg_seen, regno); 1627 } 1628 1629 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid 1630 using hard registers with non-null REG_BASE_VALUE for renaming. */ 1631 rtx 1632 get_reg_base_value (unsigned int regno) 1633 { 1634 return (*reg_base_value)[regno]; 1635 } 1636 1637 /* If a value is known for REGNO, return it. */ 1638 1639 rtx 1640 get_reg_known_value (unsigned int regno) 1641 { 1642 if (regno >= FIRST_PSEUDO_REGISTER) 1643 { 1644 regno -= FIRST_PSEUDO_REGISTER; 1645 if (regno < vec_safe_length (reg_known_value)) 1646 return (*reg_known_value)[regno]; 1647 } 1648 return NULL; 1649 } 1650 1651 /* Set it. */ 1652 1653 static void 1654 set_reg_known_value (unsigned int regno, rtx val) 1655 { 1656 if (regno >= FIRST_PSEUDO_REGISTER) 1657 { 1658 regno -= FIRST_PSEUDO_REGISTER; 1659 if (regno < vec_safe_length (reg_known_value)) 1660 (*reg_known_value)[regno] = val; 1661 } 1662 } 1663 1664 /* Similarly for reg_known_equiv_p. */ 1665 1666 bool 1667 get_reg_known_equiv_p (unsigned int regno) 1668 { 1669 if (regno >= FIRST_PSEUDO_REGISTER) 1670 { 1671 regno -= FIRST_PSEUDO_REGISTER; 1672 if (regno < vec_safe_length (reg_known_value)) 1673 return bitmap_bit_p (reg_known_equiv_p, regno); 1674 } 1675 return false; 1676 } 1677 1678 static void 1679 set_reg_known_equiv_p (unsigned int regno, bool val) 1680 { 1681 if (regno >= FIRST_PSEUDO_REGISTER) 1682 { 1683 regno -= FIRST_PSEUDO_REGISTER; 1684 if (regno < vec_safe_length (reg_known_value)) 1685 { 1686 if (val) 1687 bitmap_set_bit (reg_known_equiv_p, regno); 1688 else 1689 bitmap_clear_bit (reg_known_equiv_p, regno); 1690 } 1691 } 1692 } 1693 1694 1695 /* Returns a canonical version of X, from the point of view alias 1696 analysis. (For example, if X is a MEM whose address is a register, 1697 and the register has a known value (say a SYMBOL_REF), then a MEM 1698 whose address is the SYMBOL_REF is returned.) */ 1699 1700 rtx 1701 canon_rtx (rtx x) 1702 { 1703 /* Recursively look for equivalences. */ 1704 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER) 1705 { 1706 rtx t = get_reg_known_value (REGNO (x)); 1707 if (t == x) 1708 return x; 1709 if (t) 1710 return canon_rtx (t); 1711 } 1712 1713 if (GET_CODE (x) == PLUS) 1714 { 1715 rtx x0 = canon_rtx (XEXP (x, 0)); 1716 rtx x1 = canon_rtx (XEXP (x, 1)); 1717 1718 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1)) 1719 return simplify_gen_binary (PLUS, GET_MODE (x), x0, x1); 1720 } 1721 1722 /* This gives us much better alias analysis when called from 1723 the loop optimizer. Note we want to leave the original 1724 MEM alone, but need to return the canonicalized MEM with 1725 all the flags with their original values. */ 1726 else if (MEM_P (x)) 1727 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0))); 1728 1729 return x; 1730 } 1731 1732 /* Return 1 if X and Y are identical-looking rtx's. 1733 Expect that X and Y has been already canonicalized. 1734 1735 We use the data in reg_known_value above to see if two registers with 1736 different numbers are, in fact, equivalent. */ 1737 1738 static int 1739 rtx_equal_for_memref_p (const_rtx x, const_rtx y) 1740 { 1741 int i; 1742 int j; 1743 enum rtx_code code; 1744 const char *fmt; 1745 1746 if (x == 0 && y == 0) 1747 return 1; 1748 if (x == 0 || y == 0) 1749 return 0; 1750 1751 if (x == y) 1752 return 1; 1753 1754 code = GET_CODE (x); 1755 /* Rtx's of different codes cannot be equal. */ 1756 if (code != GET_CODE (y)) 1757 return 0; 1758 1759 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. 1760 (REG:SI x) and (REG:HI x) are NOT equivalent. */ 1761 1762 if (GET_MODE (x) != GET_MODE (y)) 1763 return 0; 1764 1765 /* Some RTL can be compared without a recursive examination. */ 1766 switch (code) 1767 { 1768 case REG: 1769 return REGNO (x) == REGNO (y); 1770 1771 case LABEL_REF: 1772 return label_ref_label (x) == label_ref_label (y); 1773 1774 case SYMBOL_REF: 1775 return compare_base_symbol_refs (x, y) == 1; 1776 1777 case ENTRY_VALUE: 1778 /* This is magic, don't go through canonicalization et al. */ 1779 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y)); 1780 1781 case VALUE: 1782 CASE_CONST_UNIQUE: 1783 /* Pointer equality guarantees equality for these nodes. */ 1784 return 0; 1785 1786 default: 1787 break; 1788 } 1789 1790 /* canon_rtx knows how to handle plus. No need to canonicalize. */ 1791 if (code == PLUS) 1792 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) 1793 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))) 1794 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1)) 1795 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0)))); 1796 /* For commutative operations, the RTX match if the operand match in any 1797 order. Also handle the simple binary and unary cases without a loop. */ 1798 if (COMMUTATIVE_P (x)) 1799 { 1800 rtx xop0 = canon_rtx (XEXP (x, 0)); 1801 rtx yop0 = canon_rtx (XEXP (y, 0)); 1802 rtx yop1 = canon_rtx (XEXP (y, 1)); 1803 1804 return ((rtx_equal_for_memref_p (xop0, yop0) 1805 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1)) 1806 || (rtx_equal_for_memref_p (xop0, yop1) 1807 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0))); 1808 } 1809 else if (NON_COMMUTATIVE_P (x)) 1810 { 1811 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)), 1812 canon_rtx (XEXP (y, 0))) 1813 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), 1814 canon_rtx (XEXP (y, 1)))); 1815 } 1816 else if (UNARY_P (x)) 1817 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)), 1818 canon_rtx (XEXP (y, 0))); 1819 1820 /* Compare the elements. If any pair of corresponding elements 1821 fail to match, return 0 for the whole things. 1822 1823 Limit cases to types which actually appear in addresses. */ 1824 1825 fmt = GET_RTX_FORMAT (code); 1826 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 1827 { 1828 switch (fmt[i]) 1829 { 1830 case 'i': 1831 if (XINT (x, i) != XINT (y, i)) 1832 return 0; 1833 break; 1834 1835 case 'p': 1836 if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y))) 1837 return 0; 1838 break; 1839 1840 case 'E': 1841 /* Two vectors must have the same length. */ 1842 if (XVECLEN (x, i) != XVECLEN (y, i)) 1843 return 0; 1844 1845 /* And the corresponding elements must match. */ 1846 for (j = 0; j < XVECLEN (x, i); j++) 1847 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)), 1848 canon_rtx (XVECEXP (y, i, j))) == 0) 1849 return 0; 1850 break; 1851 1852 case 'e': 1853 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)), 1854 canon_rtx (XEXP (y, i))) == 0) 1855 return 0; 1856 break; 1857 1858 /* This can happen for asm operands. */ 1859 case 's': 1860 if (strcmp (XSTR (x, i), XSTR (y, i))) 1861 return 0; 1862 break; 1863 1864 /* This can happen for an asm which clobbers memory. */ 1865 case '0': 1866 break; 1867 1868 /* It is believed that rtx's at this level will never 1869 contain anything but integers and other rtx's, 1870 except for within LABEL_REFs and SYMBOL_REFs. */ 1871 default: 1872 gcc_unreachable (); 1873 } 1874 } 1875 return 1; 1876 } 1877 1878 static rtx 1879 find_base_term (rtx x, vec<std::pair<cselib_val *, 1880 struct elt_loc_list *> > &visited_vals) 1881 { 1882 cselib_val *val; 1883 struct elt_loc_list *l, *f; 1884 rtx ret; 1885 scalar_int_mode int_mode; 1886 1887 #if defined (FIND_BASE_TERM) 1888 /* Try machine-dependent ways to find the base term. */ 1889 x = FIND_BASE_TERM (x); 1890 #endif 1891 1892 switch (GET_CODE (x)) 1893 { 1894 case REG: 1895 return REG_BASE_VALUE (x); 1896 1897 case TRUNCATE: 1898 /* As we do not know which address space the pointer is referring to, we can 1899 handle this only if the target does not support different pointer or 1900 address modes depending on the address space. */ 1901 if (!target_default_pointer_address_modes_p ()) 1902 return 0; 1903 if (!is_a <scalar_int_mode> (GET_MODE (x), &int_mode) 1904 || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode)) 1905 return 0; 1906 /* Fall through. */ 1907 case HIGH: 1908 case PRE_INC: 1909 case PRE_DEC: 1910 case POST_INC: 1911 case POST_DEC: 1912 case PRE_MODIFY: 1913 case POST_MODIFY: 1914 return find_base_term (XEXP (x, 0), visited_vals); 1915 1916 case ZERO_EXTEND: 1917 case SIGN_EXTEND: /* Used for Alpha/NT pointers */ 1918 /* As we do not know which address space the pointer is referring to, we can 1919 handle this only if the target does not support different pointer or 1920 address modes depending on the address space. */ 1921 if (!target_default_pointer_address_modes_p ()) 1922 return 0; 1923 1924 { 1925 rtx temp = find_base_term (XEXP (x, 0), visited_vals); 1926 1927 if (temp != 0 && CONSTANT_P (temp)) 1928 temp = convert_memory_address (Pmode, temp); 1929 1930 return temp; 1931 } 1932 1933 case VALUE: 1934 val = CSELIB_VAL_PTR (x); 1935 ret = NULL_RTX; 1936 1937 if (!val) 1938 return ret; 1939 1940 if (cselib_sp_based_value_p (val)) 1941 return static_reg_base_value[STACK_POINTER_REGNUM]; 1942 1943 f = val->locs; 1944 /* Reset val->locs to avoid infinite recursion. */ 1945 if (f) 1946 visited_vals.safe_push (std::make_pair (val, f)); 1947 val->locs = NULL; 1948 1949 for (l = f; l; l = l->next) 1950 if (GET_CODE (l->loc) == VALUE 1951 && CSELIB_VAL_PTR (l->loc)->locs 1952 && !CSELIB_VAL_PTR (l->loc)->locs->next 1953 && CSELIB_VAL_PTR (l->loc)->locs->loc == x) 1954 continue; 1955 else if ((ret = find_base_term (l->loc, visited_vals)) != 0) 1956 break; 1957 1958 return ret; 1959 1960 case LO_SUM: 1961 /* The standard form is (lo_sum reg sym) so look only at the 1962 second operand. */ 1963 return find_base_term (XEXP (x, 1), visited_vals); 1964 1965 case CONST: 1966 x = XEXP (x, 0); 1967 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS) 1968 return 0; 1969 /* Fall through. */ 1970 case PLUS: 1971 case MINUS: 1972 { 1973 rtx tmp1 = XEXP (x, 0); 1974 rtx tmp2 = XEXP (x, 1); 1975 1976 /* This is a little bit tricky since we have to determine which of 1977 the two operands represents the real base address. Otherwise this 1978 routine may return the index register instead of the base register. 1979 1980 That may cause us to believe no aliasing was possible, when in 1981 fact aliasing is possible. 1982 1983 We use a few simple tests to guess the base register. Additional 1984 tests can certainly be added. For example, if one of the operands 1985 is a shift or multiply, then it must be the index register and the 1986 other operand is the base register. */ 1987 1988 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2)) 1989 return find_base_term (tmp2, visited_vals); 1990 1991 /* If either operand is known to be a pointer, then prefer it 1992 to determine the base term. */ 1993 if (REG_P (tmp1) && REG_POINTER (tmp1)) 1994 ; 1995 else if (REG_P (tmp2) && REG_POINTER (tmp2)) 1996 std::swap (tmp1, tmp2); 1997 /* If second argument is constant which has base term, prefer it 1998 over variable tmp1. See PR64025. */ 1999 else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2)) 2000 std::swap (tmp1, tmp2); 2001 2002 /* Go ahead and find the base term for both operands. If either base 2003 term is from a pointer or is a named object or a special address 2004 (like an argument or stack reference), then use it for the 2005 base term. */ 2006 rtx base = find_base_term (tmp1, visited_vals); 2007 if (base != NULL_RTX 2008 && ((REG_P (tmp1) && REG_POINTER (tmp1)) 2009 || known_base_value_p (base))) 2010 return base; 2011 base = find_base_term (tmp2, visited_vals); 2012 if (base != NULL_RTX 2013 && ((REG_P (tmp2) && REG_POINTER (tmp2)) 2014 || known_base_value_p (base))) 2015 return base; 2016 2017 /* We could not determine which of the two operands was the 2018 base register and which was the index. So we can determine 2019 nothing from the base alias check. */ 2020 return 0; 2021 } 2022 2023 case AND: 2024 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0) 2025 return find_base_term (XEXP (x, 0), visited_vals); 2026 return 0; 2027 2028 case SYMBOL_REF: 2029 case LABEL_REF: 2030 return x; 2031 2032 default: 2033 return 0; 2034 } 2035 } 2036 2037 /* Wrapper around the worker above which removes locs from visited VALUEs 2038 to avoid visiting them multiple times. We unwind that changes here. */ 2039 2040 static rtx 2041 find_base_term (rtx x) 2042 { 2043 auto_vec<std::pair<cselib_val *, struct elt_loc_list *>, 32> visited_vals; 2044 rtx res = find_base_term (x, visited_vals); 2045 for (unsigned i = 0; i < visited_vals.length (); ++i) 2046 visited_vals[i].first->locs = visited_vals[i].second; 2047 return res; 2048 } 2049 2050 /* Return true if accesses to address X may alias accesses based 2051 on the stack pointer. */ 2052 2053 bool 2054 may_be_sp_based_p (rtx x) 2055 { 2056 rtx base = find_base_term (x); 2057 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM]; 2058 } 2059 2060 /* BASE1 and BASE2 are decls. Return 1 if they refer to same object, 0 2061 if they refer to different objects and -1 if we can not decide. */ 2062 2063 int 2064 compare_base_decls (tree base1, tree base2) 2065 { 2066 int ret; 2067 gcc_checking_assert (DECL_P (base1) && DECL_P (base2)); 2068 if (base1 == base2) 2069 return 1; 2070 2071 /* If we have two register decls with register specification we 2072 cannot decide unless their assembler names are the same. */ 2073 if (DECL_REGISTER (base1) 2074 && DECL_REGISTER (base2) 2075 && HAS_DECL_ASSEMBLER_NAME_P (base1) 2076 && HAS_DECL_ASSEMBLER_NAME_P (base2) 2077 && DECL_ASSEMBLER_NAME_SET_P (base1) 2078 && DECL_ASSEMBLER_NAME_SET_P (base2)) 2079 { 2080 if (DECL_ASSEMBLER_NAME_RAW (base1) == DECL_ASSEMBLER_NAME_RAW (base2)) 2081 return 1; 2082 return -1; 2083 } 2084 2085 /* Declarations of non-automatic variables may have aliases. All other 2086 decls are unique. */ 2087 if (!decl_in_symtab_p (base1) 2088 || !decl_in_symtab_p (base2)) 2089 return 0; 2090 2091 /* Don't cause symbols to be inserted by the act of checking. */ 2092 symtab_node *node1 = symtab_node::get (base1); 2093 if (!node1) 2094 return 0; 2095 symtab_node *node2 = symtab_node::get (base2); 2096 if (!node2) 2097 return 0; 2098 2099 ret = node1->equal_address_to (node2, true); 2100 return ret; 2101 } 2102 2103 /* Same as compare_base_decls but for SYMBOL_REF. */ 2104 2105 static int 2106 compare_base_symbol_refs (const_rtx x_base, const_rtx y_base) 2107 { 2108 tree x_decl = SYMBOL_REF_DECL (x_base); 2109 tree y_decl = SYMBOL_REF_DECL (y_base); 2110 bool binds_def = true; 2111 2112 if (XSTR (x_base, 0) == XSTR (y_base, 0)) 2113 return 1; 2114 if (x_decl && y_decl) 2115 return compare_base_decls (x_decl, y_decl); 2116 if (x_decl || y_decl) 2117 { 2118 if (!x_decl) 2119 { 2120 std::swap (x_decl, y_decl); 2121 std::swap (x_base, y_base); 2122 } 2123 /* We handle specially only section anchors and assume that other 2124 labels may overlap with user variables in an arbitrary way. */ 2125 if (!SYMBOL_REF_HAS_BLOCK_INFO_P (y_base)) 2126 return -1; 2127 /* Anchors contains static VAR_DECLs and CONST_DECLs. We are safe 2128 to ignore CONST_DECLs because they are readonly. */ 2129 if (!VAR_P (x_decl) 2130 || (!TREE_STATIC (x_decl) && !TREE_PUBLIC (x_decl))) 2131 return 0; 2132 2133 symtab_node *x_node = symtab_node::get_create (x_decl) 2134 ->ultimate_alias_target (); 2135 /* External variable can not be in section anchor. */ 2136 if (!x_node->definition) 2137 return 0; 2138 x_base = XEXP (DECL_RTL (x_node->decl), 0); 2139 /* If not in anchor, we can disambiguate. */ 2140 if (!SYMBOL_REF_HAS_BLOCK_INFO_P (x_base)) 2141 return 0; 2142 2143 /* We have an alias of anchored variable. If it can be interposed; 2144 we must assume it may or may not alias its anchor. */ 2145 binds_def = decl_binds_to_current_def_p (x_decl); 2146 } 2147 /* If we have variable in section anchor, we can compare by offset. */ 2148 if (SYMBOL_REF_HAS_BLOCK_INFO_P (x_base) 2149 && SYMBOL_REF_HAS_BLOCK_INFO_P (y_base)) 2150 { 2151 if (SYMBOL_REF_BLOCK (x_base) != SYMBOL_REF_BLOCK (y_base)) 2152 return 0; 2153 if (SYMBOL_REF_BLOCK_OFFSET (x_base) == SYMBOL_REF_BLOCK_OFFSET (y_base)) 2154 return binds_def ? 1 : -1; 2155 if (SYMBOL_REF_ANCHOR_P (x_base) != SYMBOL_REF_ANCHOR_P (y_base)) 2156 return -1; 2157 return 0; 2158 } 2159 /* In general we assume that memory locations pointed to by different labels 2160 may overlap in undefined ways. */ 2161 return -1; 2162 } 2163 2164 /* Return 0 if the addresses X and Y are known to point to different 2165 objects, 1 if they might be pointers to the same object. */ 2166 2167 static int 2168 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base, 2169 machine_mode x_mode, machine_mode y_mode) 2170 { 2171 /* If the address itself has no known base see if a known equivalent 2172 value has one. If either address still has no known base, nothing 2173 is known about aliasing. */ 2174 if (x_base == 0) 2175 { 2176 rtx x_c; 2177 2178 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x) 2179 return 1; 2180 2181 x_base = find_base_term (x_c); 2182 if (x_base == 0) 2183 return 1; 2184 } 2185 2186 if (y_base == 0) 2187 { 2188 rtx y_c; 2189 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y) 2190 return 1; 2191 2192 y_base = find_base_term (y_c); 2193 if (y_base == 0) 2194 return 1; 2195 } 2196 2197 /* If the base addresses are equal nothing is known about aliasing. */ 2198 if (rtx_equal_p (x_base, y_base)) 2199 return 1; 2200 2201 /* The base addresses are different expressions. If they are not accessed 2202 via AND, there is no conflict. We can bring knowledge of object 2203 alignment into play here. For example, on alpha, "char a, b;" can 2204 alias one another, though "char a; long b;" cannot. AND addresses may 2205 implicitly alias surrounding objects; i.e. unaligned access in DImode 2206 via AND address can alias all surrounding object types except those 2207 with aligment 8 or higher. */ 2208 if (GET_CODE (x) == AND && GET_CODE (y) == AND) 2209 return 1; 2210 if (GET_CODE (x) == AND 2211 && (!CONST_INT_P (XEXP (x, 1)) 2212 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1)))) 2213 return 1; 2214 if (GET_CODE (y) == AND 2215 && (!CONST_INT_P (XEXP (y, 1)) 2216 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1)))) 2217 return 1; 2218 2219 /* Differing symbols not accessed via AND never alias. */ 2220 if (GET_CODE (x_base) == SYMBOL_REF && GET_CODE (y_base) == SYMBOL_REF) 2221 return compare_base_symbol_refs (x_base, y_base) != 0; 2222 2223 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS) 2224 return 0; 2225 2226 if (unique_base_value_p (x_base) || unique_base_value_p (y_base)) 2227 return 0; 2228 2229 return 1; 2230 } 2231 2232 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than 2233 (or equal to) that of V. */ 2234 2235 static bool 2236 refs_newer_value_p (const_rtx expr, rtx v) 2237 { 2238 int minuid = CSELIB_VAL_PTR (v)->uid; 2239 subrtx_iterator::array_type array; 2240 FOR_EACH_SUBRTX (iter, array, expr, NONCONST) 2241 if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid >= minuid) 2242 return true; 2243 return false; 2244 } 2245 2246 /* Convert the address X into something we can use. This is done by returning 2247 it unchanged unless it is a VALUE or VALUE +/- constant; for VALUE 2248 we call cselib to get a more useful rtx. */ 2249 2250 rtx 2251 get_addr (rtx x) 2252 { 2253 cselib_val *v; 2254 struct elt_loc_list *l; 2255 2256 if (GET_CODE (x) != VALUE) 2257 { 2258 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS) 2259 && GET_CODE (XEXP (x, 0)) == VALUE 2260 && CONST_SCALAR_INT_P (XEXP (x, 1))) 2261 { 2262 rtx op0 = get_addr (XEXP (x, 0)); 2263 if (op0 != XEXP (x, 0)) 2264 { 2265 if (GET_CODE (x) == PLUS 2266 && GET_CODE (XEXP (x, 1)) == CONST_INT) 2267 return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1))); 2268 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), 2269 op0, XEXP (x, 1)); 2270 } 2271 } 2272 return x; 2273 } 2274 v = CSELIB_VAL_PTR (x); 2275 if (v) 2276 { 2277 bool have_equivs = cselib_have_permanent_equivalences (); 2278 if (have_equivs) 2279 v = canonical_cselib_val (v); 2280 for (l = v->locs; l; l = l->next) 2281 if (CONSTANT_P (l->loc)) 2282 return l->loc; 2283 for (l = v->locs; l; l = l->next) 2284 if (!REG_P (l->loc) && !MEM_P (l->loc) 2285 /* Avoid infinite recursion when potentially dealing with 2286 var-tracking artificial equivalences, by skipping the 2287 equivalences themselves, and not choosing expressions 2288 that refer to newer VALUEs. */ 2289 && (!have_equivs 2290 || (GET_CODE (l->loc) != VALUE 2291 && !refs_newer_value_p (l->loc, x)))) 2292 return l->loc; 2293 if (have_equivs) 2294 { 2295 for (l = v->locs; l; l = l->next) 2296 if (REG_P (l->loc) 2297 || (GET_CODE (l->loc) != VALUE 2298 && !refs_newer_value_p (l->loc, x))) 2299 return l->loc; 2300 /* Return the canonical value. */ 2301 return v->val_rtx; 2302 } 2303 if (v->locs) 2304 return v->locs->loc; 2305 } 2306 return x; 2307 } 2308 2309 /* Return the address of the (N_REFS + 1)th memory reference to ADDR 2310 where SIZE is the size in bytes of the memory reference. If ADDR 2311 is not modified by the memory reference then ADDR is returned. */ 2312 2313 static rtx 2314 addr_side_effect_eval (rtx addr, poly_int64 size, int n_refs) 2315 { 2316 poly_int64 offset = 0; 2317 2318 switch (GET_CODE (addr)) 2319 { 2320 case PRE_INC: 2321 offset = (n_refs + 1) * size; 2322 break; 2323 case PRE_DEC: 2324 offset = -(n_refs + 1) * size; 2325 break; 2326 case POST_INC: 2327 offset = n_refs * size; 2328 break; 2329 case POST_DEC: 2330 offset = -n_refs * size; 2331 break; 2332 2333 default: 2334 return addr; 2335 } 2336 2337 addr = plus_constant (GET_MODE (addr), XEXP (addr, 0), offset); 2338 addr = canon_rtx (addr); 2339 2340 return addr; 2341 } 2342 2343 /* Return TRUE if an object X sized at XSIZE bytes and another object 2344 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If 2345 any of the sizes is zero, assume an overlap, otherwise use the 2346 absolute value of the sizes as the actual sizes. */ 2347 2348 static inline bool 2349 offset_overlap_p (poly_int64 c, poly_int64 xsize, poly_int64 ysize) 2350 { 2351 if (known_eq (xsize, 0) || known_eq (ysize, 0)) 2352 return true; 2353 2354 if (maybe_ge (c, 0)) 2355 return maybe_gt (maybe_lt (xsize, 0) ? -xsize : xsize, c); 2356 else 2357 return maybe_gt (maybe_lt (ysize, 0) ? -ysize : ysize, -c); 2358 } 2359 2360 /* Return one if X and Y (memory addresses) reference the 2361 same location in memory or if the references overlap. 2362 Return zero if they do not overlap, else return 2363 minus one in which case they still might reference the same location. 2364 2365 C is an offset accumulator. When 2366 C is nonzero, we are testing aliases between X and Y + C. 2367 XSIZE is the size in bytes of the X reference, 2368 similarly YSIZE is the size in bytes for Y. 2369 Expect that canon_rtx has been already called for X and Y. 2370 2371 If XSIZE or YSIZE is zero, we do not know the amount of memory being 2372 referenced (the reference was BLKmode), so make the most pessimistic 2373 assumptions. 2374 2375 If XSIZE or YSIZE is negative, we may access memory outside the object 2376 being referenced as a side effect. This can happen when using AND to 2377 align memory references, as is done on the Alpha. 2378 2379 Nice to notice that varying addresses cannot conflict with fp if no 2380 local variables had their addresses taken, but that's too hard now. 2381 2382 ??? Contrary to the tree alias oracle this does not return 2383 one for X + non-constant and Y + non-constant when X and Y are equal. 2384 If that is fixed the TBAA hack for union type-punning can be removed. */ 2385 2386 static int 2387 memrefs_conflict_p (poly_int64 xsize, rtx x, poly_int64 ysize, rtx y, 2388 poly_int64 c) 2389 { 2390 if (GET_CODE (x) == VALUE) 2391 { 2392 if (REG_P (y)) 2393 { 2394 struct elt_loc_list *l = NULL; 2395 if (CSELIB_VAL_PTR (x)) 2396 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs; 2397 l; l = l->next) 2398 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y)) 2399 break; 2400 if (l) 2401 x = y; 2402 else 2403 x = get_addr (x); 2404 } 2405 /* Don't call get_addr if y is the same VALUE. */ 2406 else if (x != y) 2407 x = get_addr (x); 2408 } 2409 if (GET_CODE (y) == VALUE) 2410 { 2411 if (REG_P (x)) 2412 { 2413 struct elt_loc_list *l = NULL; 2414 if (CSELIB_VAL_PTR (y)) 2415 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs; 2416 l; l = l->next) 2417 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x)) 2418 break; 2419 if (l) 2420 y = x; 2421 else 2422 y = get_addr (y); 2423 } 2424 /* Don't call get_addr if x is the same VALUE. */ 2425 else if (y != x) 2426 y = get_addr (y); 2427 } 2428 if (GET_CODE (x) == HIGH) 2429 x = XEXP (x, 0); 2430 else if (GET_CODE (x) == LO_SUM) 2431 x = XEXP (x, 1); 2432 else 2433 x = addr_side_effect_eval (x, maybe_lt (xsize, 0) ? -xsize : xsize, 0); 2434 if (GET_CODE (y) == HIGH) 2435 y = XEXP (y, 0); 2436 else if (GET_CODE (y) == LO_SUM) 2437 y = XEXP (y, 1); 2438 else 2439 y = addr_side_effect_eval (y, maybe_lt (ysize, 0) ? -ysize : ysize, 0); 2440 2441 if (GET_CODE (x) == SYMBOL_REF && GET_CODE (y) == SYMBOL_REF) 2442 { 2443 int cmp = compare_base_symbol_refs (x,y); 2444 2445 /* If both decls are the same, decide by offsets. */ 2446 if (cmp == 1) 2447 return offset_overlap_p (c, xsize, ysize); 2448 /* Assume a potential overlap for symbolic addresses that went 2449 through alignment adjustments (i.e., that have negative 2450 sizes), because we can't know how far they are from each 2451 other. */ 2452 if (maybe_lt (xsize, 0) || maybe_lt (ysize, 0)) 2453 return -1; 2454 /* If decls are different or we know by offsets that there is no overlap, 2455 we win. */ 2456 if (!cmp || !offset_overlap_p (c, xsize, ysize)) 2457 return 0; 2458 /* Decls may or may not be different and offsets overlap....*/ 2459 return -1; 2460 } 2461 else if (rtx_equal_for_memref_p (x, y)) 2462 { 2463 return offset_overlap_p (c, xsize, ysize); 2464 } 2465 2466 /* This code used to check for conflicts involving stack references and 2467 globals but the base address alias code now handles these cases. */ 2468 2469 if (GET_CODE (x) == PLUS) 2470 { 2471 /* The fact that X is canonicalized means that this 2472 PLUS rtx is canonicalized. */ 2473 rtx x0 = XEXP (x, 0); 2474 rtx x1 = XEXP (x, 1); 2475 2476 /* However, VALUEs might end up in different positions even in 2477 canonical PLUSes. Comparing their addresses is enough. */ 2478 if (x0 == y) 2479 return memrefs_conflict_p (xsize, x1, ysize, const0_rtx, c); 2480 else if (x1 == y) 2481 return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c); 2482 2483 poly_int64 cx1, cy1; 2484 if (GET_CODE (y) == PLUS) 2485 { 2486 /* The fact that Y is canonicalized means that this 2487 PLUS rtx is canonicalized. */ 2488 rtx y0 = XEXP (y, 0); 2489 rtx y1 = XEXP (y, 1); 2490 2491 if (x0 == y1) 2492 return memrefs_conflict_p (xsize, x1, ysize, y0, c); 2493 if (x1 == y0) 2494 return memrefs_conflict_p (xsize, x0, ysize, y1, c); 2495 2496 if (rtx_equal_for_memref_p (x1, y1)) 2497 return memrefs_conflict_p (xsize, x0, ysize, y0, c); 2498 if (rtx_equal_for_memref_p (x0, y0)) 2499 return memrefs_conflict_p (xsize, x1, ysize, y1, c); 2500 if (poly_int_rtx_p (x1, &cx1)) 2501 { 2502 if (poly_int_rtx_p (y1, &cy1)) 2503 return memrefs_conflict_p (xsize, x0, ysize, y0, 2504 c - cx1 + cy1); 2505 else 2506 return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1); 2507 } 2508 else if (poly_int_rtx_p (y1, &cy1)) 2509 return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1); 2510 2511 return -1; 2512 } 2513 else if (poly_int_rtx_p (x1, &cx1)) 2514 return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1); 2515 } 2516 else if (GET_CODE (y) == PLUS) 2517 { 2518 /* The fact that Y is canonicalized means that this 2519 PLUS rtx is canonicalized. */ 2520 rtx y0 = XEXP (y, 0); 2521 rtx y1 = XEXP (y, 1); 2522 2523 if (x == y0) 2524 return memrefs_conflict_p (xsize, const0_rtx, ysize, y1, c); 2525 if (x == y1) 2526 return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c); 2527 2528 poly_int64 cy1; 2529 if (poly_int_rtx_p (y1, &cy1)) 2530 return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1); 2531 else 2532 return -1; 2533 } 2534 2535 if (GET_CODE (x) == GET_CODE (y)) 2536 switch (GET_CODE (x)) 2537 { 2538 case MULT: 2539 { 2540 /* Handle cases where we expect the second operands to be the 2541 same, and check only whether the first operand would conflict 2542 or not. */ 2543 rtx x0, y0; 2544 rtx x1 = canon_rtx (XEXP (x, 1)); 2545 rtx y1 = canon_rtx (XEXP (y, 1)); 2546 if (! rtx_equal_for_memref_p (x1, y1)) 2547 return -1; 2548 x0 = canon_rtx (XEXP (x, 0)); 2549 y0 = canon_rtx (XEXP (y, 0)); 2550 if (rtx_equal_for_memref_p (x0, y0)) 2551 return offset_overlap_p (c, xsize, ysize); 2552 2553 /* Can't properly adjust our sizes. */ 2554 if (!CONST_INT_P (x1) 2555 || !can_div_trunc_p (xsize, INTVAL (x1), &xsize) 2556 || !can_div_trunc_p (ysize, INTVAL (x1), &ysize) 2557 || !can_div_trunc_p (c, INTVAL (x1), &c)) 2558 return -1; 2559 return memrefs_conflict_p (xsize, x0, ysize, y0, c); 2560 } 2561 2562 default: 2563 break; 2564 } 2565 2566 /* Deal with alignment ANDs by adjusting offset and size so as to 2567 cover the maximum range, without taking any previously known 2568 alignment into account. Make a size negative after such an 2569 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we 2570 assume a potential overlap, because they may end up in contiguous 2571 memory locations and the stricter-alignment access may span over 2572 part of both. */ 2573 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))) 2574 { 2575 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1)); 2576 unsigned HOST_WIDE_INT uc = sc; 2577 if (sc < 0 && pow2_or_zerop (-uc)) 2578 { 2579 if (maybe_gt (xsize, 0)) 2580 xsize = -xsize; 2581 if (maybe_ne (xsize, 0)) 2582 xsize += sc + 1; 2583 c -= sc + 1; 2584 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), 2585 ysize, y, c); 2586 } 2587 } 2588 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1))) 2589 { 2590 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1)); 2591 unsigned HOST_WIDE_INT uc = sc; 2592 if (sc < 0 && pow2_or_zerop (-uc)) 2593 { 2594 if (maybe_gt (ysize, 0)) 2595 ysize = -ysize; 2596 if (maybe_ne (ysize, 0)) 2597 ysize += sc + 1; 2598 c += sc + 1; 2599 return memrefs_conflict_p (xsize, x, 2600 ysize, canon_rtx (XEXP (y, 0)), c); 2601 } 2602 } 2603 2604 if (CONSTANT_P (x)) 2605 { 2606 poly_int64 cx, cy; 2607 if (poly_int_rtx_p (x, &cx) && poly_int_rtx_p (y, &cy)) 2608 { 2609 c += cy - cx; 2610 return offset_overlap_p (c, xsize, ysize); 2611 } 2612 2613 if (GET_CODE (x) == CONST) 2614 { 2615 if (GET_CODE (y) == CONST) 2616 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), 2617 ysize, canon_rtx (XEXP (y, 0)), c); 2618 else 2619 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), 2620 ysize, y, c); 2621 } 2622 if (GET_CODE (y) == CONST) 2623 return memrefs_conflict_p (xsize, x, ysize, 2624 canon_rtx (XEXP (y, 0)), c); 2625 2626 /* Assume a potential overlap for symbolic addresses that went 2627 through alignment adjustments (i.e., that have negative 2628 sizes), because we can't know how far they are from each 2629 other. */ 2630 if (CONSTANT_P (y)) 2631 return (maybe_lt (xsize, 0) 2632 || maybe_lt (ysize, 0) 2633 || offset_overlap_p (c, xsize, ysize)); 2634 2635 return -1; 2636 } 2637 2638 return -1; 2639 } 2640 2641 /* Functions to compute memory dependencies. 2642 2643 Since we process the insns in execution order, we can build tables 2644 to keep track of what registers are fixed (and not aliased), what registers 2645 are varying in known ways, and what registers are varying in unknown 2646 ways. 2647 2648 If both memory references are volatile, then there must always be a 2649 dependence between the two references, since their order can not be 2650 changed. A volatile and non-volatile reference can be interchanged 2651 though. 2652 2653 We also must allow AND addresses, because they may generate accesses 2654 outside the object being referenced. This is used to generate aligned 2655 addresses from unaligned addresses, for instance, the alpha 2656 storeqi_unaligned pattern. */ 2657 2658 /* Read dependence: X is read after read in MEM takes place. There can 2659 only be a dependence here if both reads are volatile, or if either is 2660 an explicit barrier. */ 2661 2662 int 2663 read_dependence (const_rtx mem, const_rtx x) 2664 { 2665 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 2666 return true; 2667 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 2668 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 2669 return true; 2670 return false; 2671 } 2672 2673 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */ 2674 2675 static tree 2676 decl_for_component_ref (tree x) 2677 { 2678 do 2679 { 2680 x = TREE_OPERAND (x, 0); 2681 } 2682 while (x && TREE_CODE (x) == COMPONENT_REF); 2683 2684 return x && DECL_P (x) ? x : NULL_TREE; 2685 } 2686 2687 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate 2688 for the offset of the field reference. *KNOWN_P says whether the 2689 offset is known. */ 2690 2691 static void 2692 adjust_offset_for_component_ref (tree x, bool *known_p, 2693 poly_int64 *offset) 2694 { 2695 if (!*known_p) 2696 return; 2697 do 2698 { 2699 tree xoffset = component_ref_field_offset (x); 2700 tree field = TREE_OPERAND (x, 1); 2701 if (TREE_CODE (xoffset) != INTEGER_CST) 2702 { 2703 *known_p = false; 2704 return; 2705 } 2706 2707 offset_int woffset 2708 = (wi::to_offset (xoffset) 2709 + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)) 2710 >> LOG2_BITS_PER_UNIT)); 2711 if (!wi::fits_uhwi_p (woffset)) 2712 { 2713 *known_p = false; 2714 return; 2715 } 2716 *offset += woffset.to_uhwi (); 2717 2718 x = TREE_OPERAND (x, 0); 2719 } 2720 while (x && TREE_CODE (x) == COMPONENT_REF); 2721 } 2722 2723 /* Return nonzero if we can determine the exprs corresponding to memrefs 2724 X and Y and they do not overlap. 2725 If LOOP_VARIANT is set, skip offset-based disambiguation */ 2726 2727 int 2728 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant) 2729 { 2730 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y); 2731 rtx rtlx, rtly; 2732 rtx basex, basey; 2733 bool moffsetx_known_p, moffsety_known_p; 2734 poly_int64 moffsetx = 0, moffsety = 0; 2735 poly_int64 offsetx = 0, offsety = 0, sizex, sizey; 2736 2737 /* Unless both have exprs, we can't tell anything. */ 2738 if (exprx == 0 || expry == 0) 2739 return 0; 2740 2741 /* For spill-slot accesses make sure we have valid offsets. */ 2742 if ((exprx == get_spill_slot_decl (false) 2743 && ! MEM_OFFSET_KNOWN_P (x)) 2744 || (expry == get_spill_slot_decl (false) 2745 && ! MEM_OFFSET_KNOWN_P (y))) 2746 return 0; 2747 2748 /* If the field reference test failed, look at the DECLs involved. */ 2749 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x); 2750 if (moffsetx_known_p) 2751 moffsetx = MEM_OFFSET (x); 2752 if (TREE_CODE (exprx) == COMPONENT_REF) 2753 { 2754 tree t = decl_for_component_ref (exprx); 2755 if (! t) 2756 return 0; 2757 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx); 2758 exprx = t; 2759 } 2760 2761 moffsety_known_p = MEM_OFFSET_KNOWN_P (y); 2762 if (moffsety_known_p) 2763 moffsety = MEM_OFFSET (y); 2764 if (TREE_CODE (expry) == COMPONENT_REF) 2765 { 2766 tree t = decl_for_component_ref (expry); 2767 if (! t) 2768 return 0; 2769 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety); 2770 expry = t; 2771 } 2772 2773 if (! DECL_P (exprx) || ! DECL_P (expry)) 2774 return 0; 2775 2776 /* If we refer to different gimple registers, or one gimple register 2777 and one non-gimple-register, we know they can't overlap. First, 2778 gimple registers don't have their addresses taken. Now, there 2779 could be more than one stack slot for (different versions of) the 2780 same gimple register, but we can presumably tell they don't 2781 overlap based on offsets from stack base addresses elsewhere. 2782 It's important that we don't proceed to DECL_RTL, because gimple 2783 registers may not pass DECL_RTL_SET_P, and make_decl_rtl won't be 2784 able to do anything about them since no SSA information will have 2785 remained to guide it. */ 2786 if (is_gimple_reg (exprx) || is_gimple_reg (expry)) 2787 return exprx != expry 2788 || (moffsetx_known_p && moffsety_known_p 2789 && MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y) 2790 && !offset_overlap_p (moffsety - moffsetx, 2791 MEM_SIZE (x), MEM_SIZE (y))); 2792 2793 /* With invalid code we can end up storing into the constant pool. 2794 Bail out to avoid ICEing when creating RTL for this. 2795 See gfortran.dg/lto/20091028-2_0.f90. */ 2796 if (TREE_CODE (exprx) == CONST_DECL 2797 || TREE_CODE (expry) == CONST_DECL) 2798 return 1; 2799 2800 /* If one decl is known to be a function or label in a function and 2801 the other is some kind of data, they can't overlap. */ 2802 if ((TREE_CODE (exprx) == FUNCTION_DECL 2803 || TREE_CODE (exprx) == LABEL_DECL) 2804 != (TREE_CODE (expry) == FUNCTION_DECL 2805 || TREE_CODE (expry) == LABEL_DECL)) 2806 return 1; 2807 2808 /* If either of the decls doesn't have DECL_RTL set (e.g. marked as 2809 living in multiple places), we can't tell anything. Exception 2810 are FUNCTION_DECLs for which we can create DECL_RTL on demand. */ 2811 if ((!DECL_RTL_SET_P (exprx) && TREE_CODE (exprx) != FUNCTION_DECL) 2812 || (!DECL_RTL_SET_P (expry) && TREE_CODE (expry) != FUNCTION_DECL)) 2813 return 0; 2814 2815 rtlx = DECL_RTL (exprx); 2816 rtly = DECL_RTL (expry); 2817 2818 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they 2819 can't overlap unless they are the same because we never reuse that part 2820 of the stack frame used for locals for spilled pseudos. */ 2821 if ((!MEM_P (rtlx) || !MEM_P (rtly)) 2822 && ! rtx_equal_p (rtlx, rtly)) 2823 return 1; 2824 2825 /* If we have MEMs referring to different address spaces (which can 2826 potentially overlap), we cannot easily tell from the addresses 2827 whether the references overlap. */ 2828 if (MEM_P (rtlx) && MEM_P (rtly) 2829 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly)) 2830 return 0; 2831 2832 /* Get the base and offsets of both decls. If either is a register, we 2833 know both are and are the same, so use that as the base. The only 2834 we can avoid overlap is if we can deduce that they are nonoverlapping 2835 pieces of that decl, which is very rare. */ 2836 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx; 2837 basex = strip_offset_and_add (basex, &offsetx); 2838 2839 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly; 2840 basey = strip_offset_and_add (basey, &offsety); 2841 2842 /* If the bases are different, we know they do not overlap if both 2843 are constants or if one is a constant and the other a pointer into the 2844 stack frame. Otherwise a different base means we can't tell if they 2845 overlap or not. */ 2846 if (compare_base_decls (exprx, expry) == 0) 2847 return ((CONSTANT_P (basex) && CONSTANT_P (basey)) 2848 || (CONSTANT_P (basex) && REG_P (basey) 2849 && REGNO_PTR_FRAME_P (REGNO (basey))) 2850 || (CONSTANT_P (basey) && REG_P (basex) 2851 && REGNO_PTR_FRAME_P (REGNO (basex)))); 2852 2853 /* Offset based disambiguation not appropriate for loop invariant */ 2854 if (loop_invariant) 2855 return 0; 2856 2857 /* Offset based disambiguation is OK even if we do not know that the 2858 declarations are necessarily different 2859 (i.e. compare_base_decls (exprx, expry) == -1) */ 2860 2861 sizex = (!MEM_P (rtlx) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtlx))) 2862 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx) 2863 : -1); 2864 sizey = (!MEM_P (rtly) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtly))) 2865 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly) 2866 : -1); 2867 2868 /* If we have an offset for either memref, it can update the values computed 2869 above. */ 2870 if (moffsetx_known_p) 2871 offsetx += moffsetx, sizex -= moffsetx; 2872 if (moffsety_known_p) 2873 offsety += moffsety, sizey -= moffsety; 2874 2875 /* If a memref has both a size and an offset, we can use the smaller size. 2876 We can't do this if the offset isn't known because we must view this 2877 memref as being anywhere inside the DECL's MEM. */ 2878 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p) 2879 sizex = MEM_SIZE (x); 2880 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p) 2881 sizey = MEM_SIZE (y); 2882 2883 return !ranges_maybe_overlap_p (offsetx, sizex, offsety, sizey); 2884 } 2885 2886 /* Helper for true_dependence and canon_true_dependence. 2887 Checks for true dependence: X is read after store in MEM takes place. 2888 2889 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be 2890 NULL_RTX, and the canonical addresses of MEM and X are both computed 2891 here. If MEM_CANONICALIZED, then MEM must be already canonicalized. 2892 2893 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0). 2894 2895 Returns 1 if there is a true dependence, 0 otherwise. */ 2896 2897 static int 2898 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr, 2899 const_rtx x, rtx x_addr, bool mem_canonicalized) 2900 { 2901 rtx true_mem_addr; 2902 rtx base; 2903 int ret; 2904 2905 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX) 2906 : (mem_addr == NULL_RTX && x_addr == NULL_RTX)); 2907 2908 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 2909 return 1; 2910 2911 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. 2912 This is used in epilogue deallocation functions, and in cselib. */ 2913 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) 2914 return 1; 2915 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) 2916 return 1; 2917 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 2918 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 2919 return 1; 2920 2921 if (! x_addr) 2922 x_addr = XEXP (x, 0); 2923 x_addr = get_addr (x_addr); 2924 2925 if (! mem_addr) 2926 { 2927 mem_addr = XEXP (mem, 0); 2928 if (mem_mode == VOIDmode) 2929 mem_mode = GET_MODE (mem); 2930 } 2931 true_mem_addr = get_addr (mem_addr); 2932 2933 /* Read-only memory is by definition never modified, and therefore can't 2934 conflict with anything. However, don't assume anything when AND 2935 addresses are involved and leave to the code below to determine 2936 dependence. We don't expect to find read-only set on MEM, but 2937 stupid user tricks can produce them, so don't die. */ 2938 if (MEM_READONLY_P (x) 2939 && GET_CODE (x_addr) != AND 2940 && GET_CODE (true_mem_addr) != AND) 2941 return 0; 2942 2943 /* If we have MEMs referring to different address spaces (which can 2944 potentially overlap), we cannot easily tell from the addresses 2945 whether the references overlap. */ 2946 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x)) 2947 return 1; 2948 2949 base = find_base_term (x_addr); 2950 if (base && (GET_CODE (base) == LABEL_REF 2951 || (GET_CODE (base) == SYMBOL_REF 2952 && CONSTANT_POOL_ADDRESS_P (base)))) 2953 return 0; 2954 2955 rtx mem_base = find_base_term (true_mem_addr); 2956 if (! base_alias_check (x_addr, base, true_mem_addr, mem_base, 2957 GET_MODE (x), mem_mode)) 2958 return 0; 2959 2960 x_addr = canon_rtx (x_addr); 2961 if (!mem_canonicalized) 2962 mem_addr = canon_rtx (true_mem_addr); 2963 2964 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr, 2965 SIZE_FOR_MODE (x), x_addr, 0)) != -1) 2966 return ret; 2967 2968 if (mems_in_disjoint_alias_sets_p (x, mem)) 2969 return 0; 2970 2971 if (nonoverlapping_memrefs_p (mem, x, false)) 2972 return 0; 2973 2974 return rtx_refs_may_alias_p (x, mem, true); 2975 } 2976 2977 /* True dependence: X is read after store in MEM takes place. */ 2978 2979 int 2980 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x) 2981 { 2982 return true_dependence_1 (mem, mem_mode, NULL_RTX, 2983 x, NULL_RTX, /*mem_canonicalized=*/false); 2984 } 2985 2986 /* Canonical true dependence: X is read after store in MEM takes place. 2987 Variant of true_dependence which assumes MEM has already been 2988 canonicalized (hence we no longer do that here). 2989 The mem_addr argument has been added, since true_dependence_1 computed 2990 this value prior to canonicalizing. */ 2991 2992 int 2993 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr, 2994 const_rtx x, rtx x_addr) 2995 { 2996 return true_dependence_1 (mem, mem_mode, mem_addr, 2997 x, x_addr, /*mem_canonicalized=*/true); 2998 } 2999 3000 /* Returns nonzero if a write to X might alias a previous read from 3001 (or, if WRITEP is true, a write to) MEM. 3002 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X, 3003 and X_MODE the mode for that access. 3004 If MEM_CANONICALIZED is true, MEM is canonicalized. */ 3005 3006 static int 3007 write_dependence_p (const_rtx mem, 3008 const_rtx x, machine_mode x_mode, rtx x_addr, 3009 bool mem_canonicalized, bool x_canonicalized, bool writep) 3010 { 3011 rtx mem_addr; 3012 rtx true_mem_addr, true_x_addr; 3013 rtx base; 3014 int ret; 3015 3016 gcc_checking_assert (x_canonicalized 3017 ? (x_addr != NULL_RTX 3018 && (x_mode != VOIDmode || GET_MODE (x) == VOIDmode)) 3019 : (x_addr == NULL_RTX && x_mode == VOIDmode)); 3020 3021 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 3022 return 1; 3023 3024 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. 3025 This is used in epilogue deallocation functions. */ 3026 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) 3027 return 1; 3028 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) 3029 return 1; 3030 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 3031 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 3032 return 1; 3033 3034 if (!x_addr) 3035 x_addr = XEXP (x, 0); 3036 true_x_addr = get_addr (x_addr); 3037 3038 mem_addr = XEXP (mem, 0); 3039 true_mem_addr = get_addr (mem_addr); 3040 3041 /* A read from read-only memory can't conflict with read-write memory. 3042 Don't assume anything when AND addresses are involved and leave to 3043 the code below to determine dependence. */ 3044 if (!writep 3045 && MEM_READONLY_P (mem) 3046 && GET_CODE (true_x_addr) != AND 3047 && GET_CODE (true_mem_addr) != AND) 3048 return 0; 3049 3050 /* If we have MEMs referring to different address spaces (which can 3051 potentially overlap), we cannot easily tell from the addresses 3052 whether the references overlap. */ 3053 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x)) 3054 return 1; 3055 3056 base = find_base_term (true_mem_addr); 3057 if (! writep 3058 && base 3059 && (GET_CODE (base) == LABEL_REF 3060 || (GET_CODE (base) == SYMBOL_REF 3061 && CONSTANT_POOL_ADDRESS_P (base)))) 3062 return 0; 3063 3064 rtx x_base = find_base_term (true_x_addr); 3065 if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base, 3066 GET_MODE (x), GET_MODE (mem))) 3067 return 0; 3068 3069 if (!x_canonicalized) 3070 { 3071 x_addr = canon_rtx (true_x_addr); 3072 x_mode = GET_MODE (x); 3073 } 3074 if (!mem_canonicalized) 3075 mem_addr = canon_rtx (true_mem_addr); 3076 3077 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr, 3078 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1) 3079 return ret; 3080 3081 if (nonoverlapping_memrefs_p (x, mem, false)) 3082 return 0; 3083 3084 return rtx_refs_may_alias_p (x, mem, false); 3085 } 3086 3087 /* Anti dependence: X is written after read in MEM takes place. */ 3088 3089 int 3090 anti_dependence (const_rtx mem, const_rtx x) 3091 { 3092 return write_dependence_p (mem, x, VOIDmode, NULL_RTX, 3093 /*mem_canonicalized=*/false, 3094 /*x_canonicalized*/false, /*writep=*/false); 3095 } 3096 3097 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X. 3098 Also, consider X in X_MODE (which might be from an enclosing 3099 STRICT_LOW_PART / ZERO_EXTRACT). 3100 If MEM_CANONICALIZED is true, MEM is canonicalized. */ 3101 3102 int 3103 canon_anti_dependence (const_rtx mem, bool mem_canonicalized, 3104 const_rtx x, machine_mode x_mode, rtx x_addr) 3105 { 3106 return write_dependence_p (mem, x, x_mode, x_addr, 3107 mem_canonicalized, /*x_canonicalized=*/true, 3108 /*writep=*/false); 3109 } 3110 3111 /* Output dependence: X is written after store in MEM takes place. */ 3112 3113 int 3114 output_dependence (const_rtx mem, const_rtx x) 3115 { 3116 return write_dependence_p (mem, x, VOIDmode, NULL_RTX, 3117 /*mem_canonicalized=*/false, 3118 /*x_canonicalized*/false, /*writep=*/true); 3119 } 3120 3121 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X. 3122 Also, consider X in X_MODE (which might be from an enclosing 3123 STRICT_LOW_PART / ZERO_EXTRACT). 3124 If MEM_CANONICALIZED is true, MEM is canonicalized. */ 3125 3126 int 3127 canon_output_dependence (const_rtx mem, bool mem_canonicalized, 3128 const_rtx x, machine_mode x_mode, rtx x_addr) 3129 { 3130 return write_dependence_p (mem, x, x_mode, x_addr, 3131 mem_canonicalized, /*x_canonicalized=*/true, 3132 /*writep=*/true); 3133 } 3134 3135 3136 3137 /* Check whether X may be aliased with MEM. Don't do offset-based 3138 memory disambiguation & TBAA. */ 3139 int 3140 may_alias_p (const_rtx mem, const_rtx x) 3141 { 3142 rtx x_addr, mem_addr; 3143 3144 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 3145 return 1; 3146 3147 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. 3148 This is used in epilogue deallocation functions. */ 3149 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) 3150 return 1; 3151 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) 3152 return 1; 3153 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 3154 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 3155 return 1; 3156 3157 x_addr = XEXP (x, 0); 3158 x_addr = get_addr (x_addr); 3159 3160 mem_addr = XEXP (mem, 0); 3161 mem_addr = get_addr (mem_addr); 3162 3163 /* Read-only memory is by definition never modified, and therefore can't 3164 conflict with anything. However, don't assume anything when AND 3165 addresses are involved and leave to the code below to determine 3166 dependence. We don't expect to find read-only set on MEM, but 3167 stupid user tricks can produce them, so don't die. */ 3168 if (MEM_READONLY_P (x) 3169 && GET_CODE (x_addr) != AND 3170 && GET_CODE (mem_addr) != AND) 3171 return 0; 3172 3173 /* If we have MEMs referring to different address spaces (which can 3174 potentially overlap), we cannot easily tell from the addresses 3175 whether the references overlap. */ 3176 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x)) 3177 return 1; 3178 3179 rtx x_base = find_base_term (x_addr); 3180 rtx mem_base = find_base_term (mem_addr); 3181 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base, 3182 GET_MODE (x), GET_MODE (mem_addr))) 3183 return 0; 3184 3185 if (nonoverlapping_memrefs_p (mem, x, true)) 3186 return 0; 3187 3188 /* TBAA not valid for loop_invarint */ 3189 return rtx_refs_may_alias_p (x, mem, false); 3190 } 3191 3192 void 3193 init_alias_target (void) 3194 { 3195 int i; 3196 3197 if (!arg_base_value) 3198 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0); 3199 3200 memset (static_reg_base_value, 0, sizeof static_reg_base_value); 3201 3202 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) 3203 /* Check whether this register can hold an incoming pointer 3204 argument. FUNCTION_ARG_REGNO_P tests outgoing register 3205 numbers, so translate if necessary due to register windows. */ 3206 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i)) 3207 && targetm.hard_regno_mode_ok (i, Pmode)) 3208 static_reg_base_value[i] = arg_base_value; 3209 3210 /* RTL code is required to be consistent about whether it uses the 3211 stack pointer, the frame pointer or the argument pointer to 3212 access a given area of the frame. We can therefore use the 3213 base address to distinguish between the different areas. */ 3214 static_reg_base_value[STACK_POINTER_REGNUM] 3215 = unique_base_value (UNIQUE_BASE_VALUE_SP); 3216 static_reg_base_value[ARG_POINTER_REGNUM] 3217 = unique_base_value (UNIQUE_BASE_VALUE_ARGP); 3218 static_reg_base_value[FRAME_POINTER_REGNUM] 3219 = unique_base_value (UNIQUE_BASE_VALUE_FP); 3220 3221 /* The above rules extend post-reload, with eliminations applying 3222 consistently to each of the three pointers. Cope with cases in 3223 which the frame pointer is eliminated to the hard frame pointer 3224 rather than the stack pointer. */ 3225 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER) 3226 static_reg_base_value[HARD_FRAME_POINTER_REGNUM] 3227 = unique_base_value (UNIQUE_BASE_VALUE_HFP); 3228 } 3229 3230 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed 3231 to be memory reference. */ 3232 static bool memory_modified; 3233 static void 3234 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data) 3235 { 3236 if (MEM_P (x)) 3237 { 3238 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data)) 3239 memory_modified = true; 3240 } 3241 } 3242 3243 3244 /* Return true when INSN possibly modify memory contents of MEM 3245 (i.e. address can be modified). */ 3246 bool 3247 memory_modified_in_insn_p (const_rtx mem, const_rtx insn) 3248 { 3249 if (!INSN_P (insn)) 3250 return false; 3251 /* Conservatively assume all non-readonly MEMs might be modified in 3252 calls. */ 3253 if (CALL_P (insn)) 3254 return true; 3255 memory_modified = false; 3256 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem)); 3257 return memory_modified; 3258 } 3259 3260 /* Return TRUE if the destination of a set is rtx identical to 3261 ITEM. */ 3262 static inline bool 3263 set_dest_equal_p (const_rtx set, const_rtx item) 3264 { 3265 rtx dest = SET_DEST (set); 3266 return rtx_equal_p (dest, item); 3267 } 3268 3269 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE 3270 array. */ 3271 3272 void 3273 init_alias_analysis (void) 3274 { 3275 unsigned int maxreg = max_reg_num (); 3276 int changed, pass; 3277 int i; 3278 unsigned int ui; 3279 rtx_insn *insn; 3280 rtx val; 3281 int rpo_cnt; 3282 int *rpo; 3283 3284 timevar_push (TV_ALIAS_ANALYSIS); 3285 3286 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER); 3287 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER); 3288 bitmap_clear (reg_known_equiv_p); 3289 3290 /* If we have memory allocated from the previous run, use it. */ 3291 if (old_reg_base_value) 3292 reg_base_value = old_reg_base_value; 3293 3294 if (reg_base_value) 3295 reg_base_value->truncate (0); 3296 3297 vec_safe_grow_cleared (reg_base_value, maxreg); 3298 3299 new_reg_base_value = XNEWVEC (rtx, maxreg); 3300 reg_seen = sbitmap_alloc (maxreg); 3301 3302 /* The basic idea is that each pass through this loop will use the 3303 "constant" information from the previous pass to propagate alias 3304 information through another level of assignments. 3305 3306 The propagation is done on the CFG in reverse post-order, to propagate 3307 things forward as far as possible in each iteration. 3308 3309 This could get expensive if the assignment chains are long. Maybe 3310 we should throttle the number of iterations, possibly based on 3311 the optimization level or flag_expensive_optimizations. 3312 3313 We could propagate more information in the first pass by making use 3314 of DF_REG_DEF_COUNT to determine immediately that the alias information 3315 for a pseudo is "constant". 3316 3317 A program with an uninitialized variable can cause an infinite loop 3318 here. Instead of doing a full dataflow analysis to detect such problems 3319 we just cap the number of iterations for the loop. 3320 3321 The state of the arrays for the set chain in question does not matter 3322 since the program has undefined behavior. */ 3323 3324 rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); 3325 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); 3326 3327 /* The prologue/epilogue insns are not threaded onto the 3328 insn chain until after reload has completed. Thus, 3329 there is no sense wasting time checking if INSN is in 3330 the prologue/epilogue until after reload has completed. */ 3331 bool could_be_prologue_epilogue = ((targetm.have_prologue () 3332 || targetm.have_epilogue ()) 3333 && reload_completed); 3334 3335 pass = 0; 3336 do 3337 { 3338 /* Assume nothing will change this iteration of the loop. */ 3339 changed = 0; 3340 3341 /* We want to assign the same IDs each iteration of this loop, so 3342 start counting from one each iteration of the loop. */ 3343 unique_id = 1; 3344 3345 /* We're at the start of the function each iteration through the 3346 loop, so we're copying arguments. */ 3347 copying_arguments = true; 3348 3349 /* Wipe the potential alias information clean for this pass. */ 3350 memset (new_reg_base_value, 0, maxreg * sizeof (rtx)); 3351 3352 /* Wipe the reg_seen array clean. */ 3353 bitmap_clear (reg_seen); 3354 3355 /* Initialize the alias information for this pass. */ 3356 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) 3357 if (static_reg_base_value[i] 3358 /* Don't treat the hard frame pointer as special if we 3359 eliminated the frame pointer to the stack pointer instead. */ 3360 && !(i == HARD_FRAME_POINTER_REGNUM 3361 && reload_completed 3362 && !frame_pointer_needed 3363 && targetm.can_eliminate (FRAME_POINTER_REGNUM, 3364 STACK_POINTER_REGNUM))) 3365 { 3366 new_reg_base_value[i] = static_reg_base_value[i]; 3367 bitmap_set_bit (reg_seen, i); 3368 } 3369 3370 /* Walk the insns adding values to the new_reg_base_value array. */ 3371 for (i = 0; i < rpo_cnt; i++) 3372 { 3373 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); 3374 FOR_BB_INSNS (bb, insn) 3375 { 3376 if (NONDEBUG_INSN_P (insn)) 3377 { 3378 rtx note, set; 3379 3380 if (could_be_prologue_epilogue 3381 && prologue_epilogue_contains (insn)) 3382 continue; 3383 3384 /* If this insn has a noalias note, process it, Otherwise, 3385 scan for sets. A simple set will have no side effects 3386 which could change the base value of any other register. */ 3387 3388 if (GET_CODE (PATTERN (insn)) == SET 3389 && REG_NOTES (insn) != 0 3390 && find_reg_note (insn, REG_NOALIAS, NULL_RTX)) 3391 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL); 3392 else 3393 note_stores (PATTERN (insn), record_set, NULL); 3394 3395 set = single_set (insn); 3396 3397 if (set != 0 3398 && REG_P (SET_DEST (set)) 3399 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) 3400 { 3401 unsigned int regno = REGNO (SET_DEST (set)); 3402 rtx src = SET_SRC (set); 3403 rtx t; 3404 3405 note = find_reg_equal_equiv_note (insn); 3406 if (note && REG_NOTE_KIND (note) == REG_EQUAL 3407 && DF_REG_DEF_COUNT (regno) != 1) 3408 note = NULL_RTX; 3409 3410 if (note != NULL_RTX 3411 && GET_CODE (XEXP (note, 0)) != EXPR_LIST 3412 && ! rtx_varies_p (XEXP (note, 0), 1) 3413 && ! reg_overlap_mentioned_p (SET_DEST (set), 3414 XEXP (note, 0))) 3415 { 3416 set_reg_known_value (regno, XEXP (note, 0)); 3417 set_reg_known_equiv_p (regno, 3418 REG_NOTE_KIND (note) == REG_EQUIV); 3419 } 3420 else if (DF_REG_DEF_COUNT (regno) == 1 3421 && GET_CODE (src) == PLUS 3422 && REG_P (XEXP (src, 0)) 3423 && (t = get_reg_known_value (REGNO (XEXP (src, 0)))) 3424 && CONST_INT_P (XEXP (src, 1))) 3425 { 3426 t = plus_constant (GET_MODE (src), t, 3427 INTVAL (XEXP (src, 1))); 3428 set_reg_known_value (regno, t); 3429 set_reg_known_equiv_p (regno, false); 3430 } 3431 else if (DF_REG_DEF_COUNT (regno) == 1 3432 && ! rtx_varies_p (src, 1)) 3433 { 3434 set_reg_known_value (regno, src); 3435 set_reg_known_equiv_p (regno, false); 3436 } 3437 } 3438 } 3439 else if (NOTE_P (insn) 3440 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG) 3441 copying_arguments = false; 3442 } 3443 } 3444 3445 /* Now propagate values from new_reg_base_value to reg_base_value. */ 3446 gcc_assert (maxreg == (unsigned int) max_reg_num ()); 3447 3448 for (ui = 0; ui < maxreg; ui++) 3449 { 3450 if (new_reg_base_value[ui] 3451 && new_reg_base_value[ui] != (*reg_base_value)[ui] 3452 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui])) 3453 { 3454 (*reg_base_value)[ui] = new_reg_base_value[ui]; 3455 changed = 1; 3456 } 3457 } 3458 } 3459 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES); 3460 XDELETEVEC (rpo); 3461 3462 /* Fill in the remaining entries. */ 3463 FOR_EACH_VEC_ELT (*reg_known_value, i, val) 3464 { 3465 int regno = i + FIRST_PSEUDO_REGISTER; 3466 if (! val) 3467 set_reg_known_value (regno, regno_reg_rtx[regno]); 3468 } 3469 3470 /* Clean up. */ 3471 free (new_reg_base_value); 3472 new_reg_base_value = 0; 3473 sbitmap_free (reg_seen); 3474 reg_seen = 0; 3475 timevar_pop (TV_ALIAS_ANALYSIS); 3476 } 3477 3478 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2). 3479 Special API for var-tracking pass purposes. */ 3480 3481 void 3482 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2) 3483 { 3484 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2); 3485 } 3486 3487 void 3488 end_alias_analysis (void) 3489 { 3490 old_reg_base_value = reg_base_value; 3491 vec_free (reg_known_value); 3492 sbitmap_free (reg_known_equiv_p); 3493 } 3494 3495 void 3496 dump_alias_stats_in_alias_c (FILE *s) 3497 { 3498 fprintf (s, " TBAA oracle: %llu disambiguations %llu queries\n" 3499 " %llu are in alias set 0\n" 3500 " %llu queries asked about the same object\n" 3501 " %llu queries asked about the same alias set\n" 3502 " %llu access volatile\n" 3503 " %llu are dependent in the DAG\n" 3504 " %llu are aritificially in conflict with void *\n", 3505 alias_stats.num_disambiguated, 3506 alias_stats.num_alias_zero + alias_stats.num_same_alias_set 3507 + alias_stats.num_same_objects + alias_stats.num_volatile 3508 + alias_stats.num_dag + alias_stats.num_disambiguated 3509 + alias_stats.num_universal, 3510 alias_stats.num_alias_zero, alias_stats.num_same_alias_set, 3511 alias_stats.num_same_objects, alias_stats.num_volatile, 3512 alias_stats.num_dag, alias_stats.num_universal); 3513 } 3514 #include "gt-alias.h" 3515