1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file exposes an interface to building/using memory SSA to 11 /// walk memory instructions using a use/def graph. 12 /// 13 /// Memory SSA class builds an SSA form that links together memory access 14 /// instructions such as loads, stores, atomics, and calls. Additionally, it 15 /// does a trivial form of "heap versioning" Every time the memory state changes 16 /// in the program, we generate a new heap version. It generates 17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. 18 /// 19 /// As a trivial example, 20 /// define i32 @main() #0 { 21 /// entry: 22 /// %call = call noalias i8* @_Znwm(i64 4) #2 23 /// %0 = bitcast i8* %call to i32* 24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2 25 /// %1 = bitcast i8* %call1 to i32* 26 /// store i32 5, i32* %0, align 4 27 /// store i32 7, i32* %1, align 4 28 /// %2 = load i32* %0, align 4 29 /// %3 = load i32* %1, align 4 30 /// %add = add nsw i32 %2, %3 31 /// ret i32 %add 32 /// } 33 /// 34 /// Will become 35 /// define i32 @main() #0 { 36 /// entry: 37 /// ; 1 = MemoryDef(0) 38 /// %call = call noalias i8* @_Znwm(i64 4) #3 39 /// %2 = bitcast i8* %call to i32* 40 /// ; 2 = MemoryDef(1) 41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3 42 /// %4 = bitcast i8* %call1 to i32* 43 /// ; 3 = MemoryDef(2) 44 /// store i32 5, i32* %2, align 4 45 /// ; 4 = MemoryDef(3) 46 /// store i32 7, i32* %4, align 4 47 /// ; MemoryUse(3) 48 /// %7 = load i32* %2, align 4 49 /// ; MemoryUse(4) 50 /// %8 = load i32* %4, align 4 51 /// %add = add nsw i32 %7, %8 52 /// ret i32 %add 53 /// } 54 /// 55 /// Given this form, all the stores that could ever effect the load at %8 can be 56 /// gotten by using the MemoryUse associated with it, and walking from use to 57 /// def until you hit the top of the function. 58 /// 59 /// Each def also has a list of users associated with it, so you can walk from 60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses, 61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would 62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given 63 /// store, all the MemoryUses on its use lists are may-aliases of that store 64 /// (but the MemoryDefs on its use list may not be). 65 /// 66 /// MemoryDefs are not disambiguated because it would require multiple reaching 67 /// definitions, which would require multiple phis, and multiple memoryaccesses 68 /// per instruction. 69 /// 70 /// In addition to the def/use graph described above, MemoryDefs also contain 71 /// an "optimized" definition use. The "optimized" use points to some def 72 /// reachable through the memory def chain. The optimized def *may* (but is 73 /// not required to) alias the original MemoryDef, but no def *closer* to the 74 /// source def may alias it. As the name implies, the purpose of the optimized 75 /// use is to allow caching of clobber searches for memory defs. The optimized 76 /// def may be nullptr, in which case clients must walk the defining access 77 /// chain. 78 /// 79 /// When iterating the uses of a MemoryDef, both defining uses and optimized 80 /// uses will be encountered. If only one type is needed, the client must 81 /// filter the use walk. 82 // 83 //===----------------------------------------------------------------------===// 84 85 #ifndef LLVM_ANALYSIS_MEMORYSSA_H 86 #define LLVM_ANALYSIS_MEMORYSSA_H 87 88 #include "llvm/ADT/DenseMap.h" 89 #include "llvm/ADT/SmallPtrSet.h" 90 #include "llvm/ADT/SmallVector.h" 91 #include "llvm/ADT/ilist_node.h" 92 #include "llvm/ADT/iterator_range.h" 93 #include "llvm/Analysis/AliasAnalysis.h" 94 #include "llvm/Analysis/MemoryLocation.h" 95 #include "llvm/Analysis/PHITransAddr.h" 96 #include "llvm/IR/DerivedUser.h" 97 #include "llvm/IR/Dominators.h" 98 #include "llvm/IR/Type.h" 99 #include "llvm/IR/User.h" 100 #include "llvm/Pass.h" 101 #include <algorithm> 102 #include <cassert> 103 #include <cstddef> 104 #include <iterator> 105 #include <memory> 106 #include <utility> 107 108 namespace llvm { 109 110 template <class GraphType> struct GraphTraits; 111 class BasicBlock; 112 class Function; 113 class Instruction; 114 class LLVMContext; 115 class MemoryAccess; 116 class MemorySSAWalker; 117 class Module; 118 class Use; 119 class Value; 120 class raw_ostream; 121 122 namespace MSSAHelpers { 123 124 struct AllAccessTag {}; 125 struct DefsOnlyTag {}; 126 127 } // end namespace MSSAHelpers 128 129 enum : unsigned { 130 // Used to signify what the default invalid ID is for MemoryAccess's 131 // getID() 132 INVALID_MEMORYACCESS_ID = -1U 133 }; 134 135 template <class T> class memoryaccess_def_iterator_base; 136 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; 137 using const_memoryaccess_def_iterator = 138 memoryaccess_def_iterator_base<const MemoryAccess>; 139 140 // The base for all memory accesses. All memory accesses in a block are 141 // linked together using an intrusive list. 142 class MemoryAccess 143 : public DerivedUser, 144 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, 145 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { 146 public: 147 using AllAccessType = 148 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 149 using DefsOnlyType = 150 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 151 152 MemoryAccess(const MemoryAccess &) = delete; 153 MemoryAccess &operator=(const MemoryAccess &) = delete; 154 155 void *operator new(size_t) = delete; 156 157 // Methods for support type inquiry through isa, cast, and 158 // dyn_cast 159 static bool classof(const Value *V) { 160 unsigned ID = V->getValueID(); 161 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; 162 } 163 164 BasicBlock *getBlock() const { return Block; } 165 166 void print(raw_ostream &OS) const; 167 void dump() const; 168 169 /// The user iterators for a memory access 170 using iterator = user_iterator; 171 using const_iterator = const_user_iterator; 172 173 /// This iterator walks over all of the defs in a given 174 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For 175 /// MemoryUse/MemoryDef, this walks the defining access. 176 memoryaccess_def_iterator defs_begin(); 177 const_memoryaccess_def_iterator defs_begin() const; 178 memoryaccess_def_iterator defs_end(); 179 const_memoryaccess_def_iterator defs_end() const; 180 181 /// Get the iterators for the all access list and the defs only list 182 /// We default to the all access list. 183 AllAccessType::self_iterator getIterator() { 184 return this->AllAccessType::getIterator(); 185 } 186 AllAccessType::const_self_iterator getIterator() const { 187 return this->AllAccessType::getIterator(); 188 } 189 AllAccessType::reverse_self_iterator getReverseIterator() { 190 return this->AllAccessType::getReverseIterator(); 191 } 192 AllAccessType::const_reverse_self_iterator getReverseIterator() const { 193 return this->AllAccessType::getReverseIterator(); 194 } 195 DefsOnlyType::self_iterator getDefsIterator() { 196 return this->DefsOnlyType::getIterator(); 197 } 198 DefsOnlyType::const_self_iterator getDefsIterator() const { 199 return this->DefsOnlyType::getIterator(); 200 } 201 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { 202 return this->DefsOnlyType::getReverseIterator(); 203 } 204 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { 205 return this->DefsOnlyType::getReverseIterator(); 206 } 207 208 protected: 209 friend class MemoryDef; 210 friend class MemoryPhi; 211 friend class MemorySSA; 212 friend class MemoryUse; 213 friend class MemoryUseOrDef; 214 215 /// Used by MemorySSA to change the block of a MemoryAccess when it is 216 /// moved. 217 void setBlock(BasicBlock *BB) { Block = BB; } 218 219 /// Used for debugging and tracking things about MemoryAccesses. 220 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. 221 inline unsigned getID() const; 222 223 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, 224 BasicBlock *BB, unsigned NumOperands) 225 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), 226 Block(BB) {} 227 228 // Use deleteValue() to delete a generic MemoryAccess. 229 ~MemoryAccess() = default; 230 231 private: 232 BasicBlock *Block; 233 }; 234 235 template <> 236 struct ilist_alloc_traits<MemoryAccess> { 237 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } 238 }; 239 240 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { 241 MA.print(OS); 242 return OS; 243 } 244 245 /// Class that has the common methods + fields of memory uses/defs. It's 246 /// a little awkward to have, but there are many cases where we want either a 247 /// use or def, and there are many cases where uses are needed (defs aren't 248 /// acceptable), and vice-versa. 249 /// 250 /// This class should never be instantiated directly; make a MemoryUse or 251 /// MemoryDef instead. 252 class MemoryUseOrDef : public MemoryAccess { 253 public: 254 void *operator new(size_t) = delete; 255 256 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 257 258 /// Get the instruction that this MemoryUse represents. 259 Instruction *getMemoryInst() const { return MemoryInstruction; } 260 261 /// Get the access that produces the memory state used by this Use. 262 MemoryAccess *getDefiningAccess() const { return getOperand(0); } 263 264 static bool classof(const Value *MA) { 265 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; 266 } 267 268 /// Do we have an optimized use? 269 inline bool isOptimized() const; 270 /// Return the MemoryAccess associated with the optimized use, or nullptr. 271 inline MemoryAccess *getOptimized() const; 272 /// Sets the optimized use for a MemoryDef. 273 inline void setOptimized(MemoryAccess *); 274 275 /// Reset the ID of what this MemoryUse was optimized to, causing it to 276 /// be rewalked by the walker if necessary. 277 /// This really should only be called by tests. 278 inline void resetOptimized(); 279 280 protected: 281 friend class MemorySSA; 282 friend class MemorySSAUpdater; 283 284 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, 285 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, 286 unsigned NumOperands) 287 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), 288 MemoryInstruction(MI) { 289 setDefiningAccess(DMA); 290 } 291 292 // Use deleteValue() to delete a generic MemoryUseOrDef. 293 ~MemoryUseOrDef() = default; 294 295 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) { 296 if (!Optimized) { 297 setOperand(0, DMA); 298 return; 299 } 300 setOptimized(DMA); 301 } 302 303 private: 304 Instruction *MemoryInstruction; 305 }; 306 307 /// Represents read-only accesses to memory 308 /// 309 /// In particular, the set of Instructions that will be represented by 310 /// MemoryUse's is exactly the set of Instructions for which 311 /// AliasAnalysis::getModRefInfo returns "Ref". 312 class MemoryUse final : public MemoryUseOrDef { 313 public: 314 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 315 316 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) 317 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, 318 /*NumOperands=*/1) {} 319 320 // allocate space for exactly one operand 321 void *operator new(size_t S) { return User::operator new(S, 1); } 322 void operator delete(void *Ptr) { User::operator delete(Ptr); } 323 324 static bool classof(const Value *MA) { 325 return MA->getValueID() == MemoryUseVal; 326 } 327 328 void print(raw_ostream &OS) const; 329 330 void setOptimized(MemoryAccess *DMA) { 331 OptimizedID = DMA->getID(); 332 setOperand(0, DMA); 333 } 334 335 /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called, 336 /// uses will usually be optimized, but this is not guaranteed (e.g. due to 337 /// invalidation and optimization limits.) 338 bool isOptimized() const { 339 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); 340 } 341 342 MemoryAccess *getOptimized() const { 343 return getDefiningAccess(); 344 } 345 346 void resetOptimized() { 347 OptimizedID = INVALID_MEMORYACCESS_ID; 348 } 349 350 protected: 351 friend class MemorySSA; 352 353 private: 354 static void deleteMe(DerivedUser *Self); 355 356 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 357 }; 358 359 template <> 360 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; 361 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess) 362 363 /// Represents a read-write access to memory, whether it is a must-alias, 364 /// or a may-alias. 365 /// 366 /// In particular, the set of Instructions that will be represented by 367 /// MemoryDef's is exactly the set of Instructions for which 368 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". 369 /// Note that, in order to provide def-def chains, all defs also have a use 370 /// associated with them. This use points to the nearest reaching 371 /// MemoryDef/MemoryPhi. 372 class MemoryDef final : public MemoryUseOrDef { 373 public: 374 friend class MemorySSA; 375 376 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 377 378 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, 379 unsigned Ver) 380 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, 381 /*NumOperands=*/2), 382 ID(Ver) {} 383 384 // allocate space for exactly two operands 385 void *operator new(size_t S) { return User::operator new(S, 2); } 386 void operator delete(void *Ptr) { User::operator delete(Ptr); } 387 388 static bool classof(const Value *MA) { 389 return MA->getValueID() == MemoryDefVal; 390 } 391 392 void setOptimized(MemoryAccess *MA) { 393 setOperand(1, MA); 394 OptimizedID = MA->getID(); 395 } 396 397 MemoryAccess *getOptimized() const { 398 return cast_or_null<MemoryAccess>(getOperand(1)); 399 } 400 401 bool isOptimized() const { 402 return getOptimized() && OptimizedID == getOptimized()->getID(); 403 } 404 405 void resetOptimized() { 406 OptimizedID = INVALID_MEMORYACCESS_ID; 407 setOperand(1, nullptr); 408 } 409 410 void print(raw_ostream &OS) const; 411 412 unsigned getID() const { return ID; } 413 414 private: 415 static void deleteMe(DerivedUser *Self); 416 417 const unsigned ID; 418 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 419 }; 420 421 template <> 422 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; 423 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess) 424 425 template <> 426 struct OperandTraits<MemoryUseOrDef> { 427 static Use *op_begin(MemoryUseOrDef *MUD) { 428 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 429 return OperandTraits<MemoryUse>::op_begin(MU); 430 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); 431 } 432 433 static Use *op_end(MemoryUseOrDef *MUD) { 434 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 435 return OperandTraits<MemoryUse>::op_end(MU); 436 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); 437 } 438 439 static unsigned operands(const MemoryUseOrDef *MUD) { 440 if (const auto *MU = dyn_cast<MemoryUse>(MUD)) 441 return OperandTraits<MemoryUse>::operands(MU); 442 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); 443 } 444 }; 445 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess) 446 447 /// Represents phi nodes for memory accesses. 448 /// 449 /// These have the same semantic as regular phi nodes, with the exception that 450 /// only one phi will ever exist in a given basic block. 451 /// Guaranteeing one phi per block means guaranteeing there is only ever one 452 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. 453 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or 454 /// a MemoryPhi's operands. 455 /// That is, given 456 /// if (a) { 457 /// store %a 458 /// store %b 459 /// } 460 /// it *must* be transformed into 461 /// if (a) { 462 /// 1 = MemoryDef(liveOnEntry) 463 /// store %a 464 /// 2 = MemoryDef(1) 465 /// store %b 466 /// } 467 /// and *not* 468 /// if (a) { 469 /// 1 = MemoryDef(liveOnEntry) 470 /// store %a 471 /// 2 = MemoryDef(liveOnEntry) 472 /// store %b 473 /// } 474 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the 475 /// end of the branch, and if there are not two phi nodes, one will be 476 /// disconnected completely from the SSA graph below that point. 477 /// Because MemoryUse's do not generate new definitions, they do not have this 478 /// issue. 479 class MemoryPhi final : public MemoryAccess { 480 // allocate space for exactly zero operands 481 void *operator new(size_t S) { return User::operator new(S); } 482 483 public: 484 void operator delete(void *Ptr) { User::operator delete(Ptr); } 485 486 /// Provide fast operand accessors 487 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 488 489 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) 490 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), 491 ReservedSpace(NumPreds) { 492 allocHungoffUses(ReservedSpace); 493 } 494 495 // Block iterator interface. This provides access to the list of incoming 496 // basic blocks, which parallels the list of incoming values. 497 using block_iterator = BasicBlock **; 498 using const_block_iterator = BasicBlock *const *; 499 500 block_iterator block_begin() { 501 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 502 } 503 504 const_block_iterator block_begin() const { 505 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 506 } 507 508 block_iterator block_end() { return block_begin() + getNumOperands(); } 509 510 const_block_iterator block_end() const { 511 return block_begin() + getNumOperands(); 512 } 513 514 iterator_range<block_iterator> blocks() { 515 return make_range(block_begin(), block_end()); 516 } 517 518 iterator_range<const_block_iterator> blocks() const { 519 return make_range(block_begin(), block_end()); 520 } 521 522 op_range incoming_values() { return operands(); } 523 524 const_op_range incoming_values() const { return operands(); } 525 526 /// Return the number of incoming edges 527 unsigned getNumIncomingValues() const { return getNumOperands(); } 528 529 /// Return incoming value number x 530 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } 531 void setIncomingValue(unsigned I, MemoryAccess *V) { 532 assert(V && "PHI node got a null value!"); 533 setOperand(I, V); 534 } 535 536 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } 537 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } 538 539 /// Return incoming basic block number @p i. 540 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } 541 542 /// Return incoming basic block corresponding 543 /// to an operand of the PHI. 544 BasicBlock *getIncomingBlock(const Use &U) const { 545 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 546 return getIncomingBlock(unsigned(&U - op_begin())); 547 } 548 549 /// Return incoming basic block corresponding 550 /// to value use iterator. 551 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { 552 return getIncomingBlock(I.getUse()); 553 } 554 555 void setIncomingBlock(unsigned I, BasicBlock *BB) { 556 assert(BB && "PHI node got a null basic block!"); 557 block_begin()[I] = BB; 558 } 559 560 /// Add an incoming value to the end of the PHI list 561 void addIncoming(MemoryAccess *V, BasicBlock *BB) { 562 if (getNumOperands() == ReservedSpace) 563 growOperands(); // Get more space! 564 // Initialize some new operands. 565 setNumHungOffUseOperands(getNumOperands() + 1); 566 setIncomingValue(getNumOperands() - 1, V); 567 setIncomingBlock(getNumOperands() - 1, BB); 568 } 569 570 /// Return the first index of the specified basic 571 /// block in the value list for this PHI. Returns -1 if no instance. 572 int getBasicBlockIndex(const BasicBlock *BB) const { 573 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 574 if (block_begin()[I] == BB) 575 return I; 576 return -1; 577 } 578 579 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { 580 int Idx = getBasicBlockIndex(BB); 581 assert(Idx >= 0 && "Invalid basic block argument!"); 582 return getIncomingValue(Idx); 583 } 584 585 // After deleting incoming position I, the order of incoming may be changed. 586 void unorderedDeleteIncoming(unsigned I) { 587 unsigned E = getNumOperands(); 588 assert(I < E && "Cannot remove out of bounds Phi entry."); 589 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi 590 // itself should be deleted. 591 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with " 592 "at least 2 values."); 593 setIncomingValue(I, getIncomingValue(E - 1)); 594 setIncomingBlock(I, block_begin()[E - 1]); 595 setOperand(E - 1, nullptr); 596 block_begin()[E - 1] = nullptr; 597 setNumHungOffUseOperands(getNumOperands() - 1); 598 } 599 600 // After deleting entries that satisfy Pred, remaining entries may have 601 // changed order. 602 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { 603 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 604 if (Pred(getIncomingValue(I), getIncomingBlock(I))) { 605 unorderedDeleteIncoming(I); 606 E = getNumOperands(); 607 --I; 608 } 609 assert(getNumOperands() >= 1 && 610 "Cannot remove all incoming blocks in a MemoryPhi."); 611 } 612 613 // After deleting incoming block BB, the incoming blocks order may be changed. 614 void unorderedDeleteIncomingBlock(const BasicBlock *BB) { 615 unorderedDeleteIncomingIf( 616 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); 617 } 618 619 // After deleting incoming memory access MA, the incoming accesses order may 620 // be changed. 621 void unorderedDeleteIncomingValue(const MemoryAccess *MA) { 622 unorderedDeleteIncomingIf( 623 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); 624 } 625 626 static bool classof(const Value *V) { 627 return V->getValueID() == MemoryPhiVal; 628 } 629 630 void print(raw_ostream &OS) const; 631 632 unsigned getID() const { return ID; } 633 634 protected: 635 friend class MemorySSA; 636 637 /// this is more complicated than the generic 638 /// User::allocHungoffUses, because we have to allocate Uses for the incoming 639 /// values and pointers to the incoming blocks, all in one allocation. 640 void allocHungoffUses(unsigned N) { 641 User::allocHungoffUses(N, /* IsPhi */ true); 642 } 643 644 private: 645 // For debugging only 646 const unsigned ID; 647 unsigned ReservedSpace; 648 649 /// This grows the operand list in response to a push_back style of 650 /// operation. This grows the number of ops by 1.5 times. 651 void growOperands() { 652 unsigned E = getNumOperands(); 653 // 2 op PHI nodes are VERY common, so reserve at least enough for that. 654 ReservedSpace = std::max(E + E / 2, 2u); 655 growHungoffUses(ReservedSpace, /* IsPhi */ true); 656 } 657 658 static void deleteMe(DerivedUser *Self); 659 }; 660 661 inline unsigned MemoryAccess::getID() const { 662 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && 663 "only memory defs and phis have ids"); 664 if (const auto *MD = dyn_cast<MemoryDef>(this)) 665 return MD->getID(); 666 return cast<MemoryPhi>(this)->getID(); 667 } 668 669 inline bool MemoryUseOrDef::isOptimized() const { 670 if (const auto *MD = dyn_cast<MemoryDef>(this)) 671 return MD->isOptimized(); 672 return cast<MemoryUse>(this)->isOptimized(); 673 } 674 675 inline MemoryAccess *MemoryUseOrDef::getOptimized() const { 676 if (const auto *MD = dyn_cast<MemoryDef>(this)) 677 return MD->getOptimized(); 678 return cast<MemoryUse>(this)->getOptimized(); 679 } 680 681 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { 682 if (auto *MD = dyn_cast<MemoryDef>(this)) 683 MD->setOptimized(MA); 684 else 685 cast<MemoryUse>(this)->setOptimized(MA); 686 } 687 688 inline void MemoryUseOrDef::resetOptimized() { 689 if (auto *MD = dyn_cast<MemoryDef>(this)) 690 MD->resetOptimized(); 691 else 692 cast<MemoryUse>(this)->resetOptimized(); 693 } 694 695 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; 696 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess) 697 698 /// Encapsulates MemorySSA, including all data associated with memory 699 /// accesses. 700 class MemorySSA { 701 public: 702 MemorySSA(Function &, AliasAnalysis *, DominatorTree *); 703 704 // MemorySSA must remain where it's constructed; Walkers it creates store 705 // pointers to it. 706 MemorySSA(MemorySSA &&) = delete; 707 708 ~MemorySSA(); 709 710 MemorySSAWalker *getWalker(); 711 MemorySSAWalker *getSkipSelfWalker(); 712 713 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA 714 /// access associated with it. If passed a basic block gets the memory phi 715 /// node that exists for that block, if there is one. Otherwise, this will get 716 /// a MemoryUseOrDef. 717 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { 718 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); 719 } 720 721 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { 722 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); 723 } 724 725 DominatorTree &getDomTree() const { return *DT; } 726 727 void dump() const; 728 void print(raw_ostream &) const; 729 730 /// Return true if \p MA represents the live on entry value 731 /// 732 /// Loads and stores from pointer arguments and other global values may be 733 /// defined by memory operations that do not occur in the current function, so 734 /// they may be live on entry to the function. MemorySSA represents such 735 /// memory state by the live on entry definition, which is guaranteed to occur 736 /// before any other memory access in the function. 737 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { 738 return MA == LiveOnEntryDef.get(); 739 } 740 741 inline MemoryAccess *getLiveOnEntryDef() const { 742 return LiveOnEntryDef.get(); 743 } 744 745 // Sadly, iplists, by default, owns and deletes pointers added to the 746 // list. It's not currently possible to have two iplists for the same type, 747 // where one owns the pointers, and one does not. This is because the traits 748 // are per-type, not per-tag. If this ever changes, we should make the 749 // DefList an iplist. 750 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 751 using DefsList = 752 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 753 754 /// Return the list of MemoryAccess's for a given basic block. 755 /// 756 /// This list is not modifiable by the user. 757 const AccessList *getBlockAccesses(const BasicBlock *BB) const { 758 return getWritableBlockAccesses(BB); 759 } 760 761 /// Return the list of MemoryDef's and MemoryPhi's for a given basic 762 /// block. 763 /// 764 /// This list is not modifiable by the user. 765 const DefsList *getBlockDefs(const BasicBlock *BB) const { 766 return getWritableBlockDefs(BB); 767 } 768 769 /// Given two memory accesses in the same basic block, determine 770 /// whether MemoryAccess \p A dominates MemoryAccess \p B. 771 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; 772 773 /// Given two memory accesses in potentially different blocks, 774 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. 775 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; 776 777 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A 778 /// dominates Use \p B. 779 bool dominates(const MemoryAccess *A, const Use &B) const; 780 781 enum class VerificationLevel { Fast, Full }; 782 /// Verify that MemorySSA is self consistent (IE definitions dominate 783 /// all uses, uses appear in the right places). This is used by unit tests. 784 void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const; 785 786 /// Used in various insertion functions to specify whether we are talking 787 /// about the beginning or end of a block. 788 enum InsertionPlace { Beginning, End, BeforeTerminator }; 789 790 /// By default, uses are *not* optimized during MemorySSA construction. 791 /// Calling this method will attempt to optimize all MemoryUses, if this has 792 /// not happened yet for this MemorySSA instance. This should be done if you 793 /// plan to query the clobbering access for most uses, or if you walk the 794 /// def-use chain of uses. 795 void ensureOptimizedUses(); 796 797 AliasAnalysis &getAA() { return *AA; } 798 799 protected: 800 // Used by Memory SSA dumpers and wrapper pass 801 friend class MemorySSAUpdater; 802 803 void verifyOrderingDominationAndDefUses( 804 Function &F, VerificationLevel = VerificationLevel::Fast) const; 805 void verifyDominationNumbers(const Function &F) const; 806 void verifyPrevDefInPhis(Function &F) const; 807 808 // This is used by the use optimizer and updater. 809 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { 810 auto It = PerBlockAccesses.find(BB); 811 return It == PerBlockAccesses.end() ? nullptr : It->second.get(); 812 } 813 814 // This is used by the use optimizer and updater. 815 DefsList *getWritableBlockDefs(const BasicBlock *BB) const { 816 auto It = PerBlockDefs.find(BB); 817 return It == PerBlockDefs.end() ? nullptr : It->second.get(); 818 } 819 820 // These is used by the updater to perform various internal MemorySSA 821 // machinsations. They do not always leave the IR in a correct state, and 822 // relies on the updater to fixup what it breaks, so it is not public. 823 824 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); 825 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); 826 827 // Rename the dominator tree branch rooted at BB. 828 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, 829 SmallPtrSetImpl<BasicBlock *> &Visited) { 830 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); 831 } 832 833 void removeFromLookups(MemoryAccess *); 834 void removeFromLists(MemoryAccess *, bool ShouldDelete = true); 835 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, 836 InsertionPlace); 837 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, 838 AccessList::iterator); 839 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, 840 const MemoryUseOrDef *Template = nullptr, 841 bool CreationMustSucceed = true); 842 843 private: 844 class ClobberWalkerBase; 845 class CachingWalker; 846 class SkipSelfWalker; 847 class OptimizeUses; 848 849 CachingWalker *getWalkerImpl(); 850 void buildMemorySSA(BatchAAResults &BAA); 851 852 void prepareForMoveTo(MemoryAccess *, BasicBlock *); 853 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; 854 855 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; 856 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; 857 858 void markUnreachableAsLiveOnEntry(BasicBlock *BB); 859 MemoryPhi *createMemoryPhi(BasicBlock *BB); 860 template <typename AliasAnalysisType> 861 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, 862 const MemoryUseOrDef *Template = nullptr); 863 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); 864 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); 865 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); 866 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, 867 SmallPtrSetImpl<BasicBlock *> &Visited, 868 bool SkipVisited = false, bool RenameAllUses = false); 869 AccessList *getOrCreateAccessList(const BasicBlock *); 870 DefsList *getOrCreateDefsList(const BasicBlock *); 871 void renumberBlock(const BasicBlock *) const; 872 AliasAnalysis *AA = nullptr; 873 DominatorTree *DT; 874 Function &F; 875 876 // Memory SSA mappings 877 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; 878 879 // These two mappings contain the main block to access/def mappings for 880 // MemorySSA. The list contained in PerBlockAccesses really owns all the 881 // MemoryAccesses. 882 // Both maps maintain the invariant that if a block is found in them, the 883 // corresponding list is not empty, and if a block is not found in them, the 884 // corresponding list is empty. 885 AccessMap PerBlockAccesses; 886 DefsMap PerBlockDefs; 887 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; 888 889 // Domination mappings 890 // Note that the numbering is local to a block, even though the map is 891 // global. 892 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; 893 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; 894 895 // Memory SSA building info 896 std::unique_ptr<ClobberWalkerBase> WalkerBase; 897 std::unique_ptr<CachingWalker> Walker; 898 std::unique_ptr<SkipSelfWalker> SkipWalker; 899 unsigned NextID = 0; 900 bool IsOptimized = false; 901 }; 902 903 /// Enables verification of MemorySSA. 904 /// 905 /// The checks which this flag enables is exensive and disabled by default 906 /// unless `EXPENSIVE_CHECKS` is defined. The flag `-verify-memoryssa` can be 907 /// used to selectively enable the verification without re-compilation. 908 extern bool VerifyMemorySSA; 909 910 // Internal MemorySSA utils, for use by MemorySSA classes and walkers 911 class MemorySSAUtil { 912 protected: 913 friend class GVNHoist; 914 friend class MemorySSAWalker; 915 916 // This function should not be used by new passes. 917 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 918 AliasAnalysis &AA); 919 }; 920 921 /// An analysis that produces \c MemorySSA for a function. 922 /// 923 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { 924 friend AnalysisInfoMixin<MemorySSAAnalysis>; 925 926 static AnalysisKey Key; 927 928 public: 929 // Wrap MemorySSA result to ensure address stability of internal MemorySSA 930 // pointers after construction. Use a wrapper class instead of plain 931 // unique_ptr<MemorySSA> to avoid build breakage on MSVC. 932 struct Result { 933 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} 934 935 MemorySSA &getMSSA() { return *MSSA.get(); } 936 937 std::unique_ptr<MemorySSA> MSSA; 938 939 bool invalidate(Function &F, const PreservedAnalyses &PA, 940 FunctionAnalysisManager::Invalidator &Inv); 941 }; 942 943 Result run(Function &F, FunctionAnalysisManager &AM); 944 }; 945 946 /// Printer pass for \c MemorySSA. 947 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { 948 raw_ostream &OS; 949 bool EnsureOptimizedUses; 950 951 public: 952 explicit MemorySSAPrinterPass(raw_ostream &OS, bool EnsureOptimizedUses) 953 : OS(OS), EnsureOptimizedUses(EnsureOptimizedUses) {} 954 955 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 956 }; 957 958 /// Printer pass for \c MemorySSA via the walker. 959 class MemorySSAWalkerPrinterPass 960 : public PassInfoMixin<MemorySSAWalkerPrinterPass> { 961 raw_ostream &OS; 962 963 public: 964 explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {} 965 966 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 967 }; 968 969 /// Verifier pass for \c MemorySSA. 970 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { 971 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 972 }; 973 974 /// Legacy analysis pass which computes \c MemorySSA. 975 class MemorySSAWrapperPass : public FunctionPass { 976 public: 977 MemorySSAWrapperPass(); 978 979 static char ID; 980 981 bool runOnFunction(Function &) override; 982 void releaseMemory() override; 983 MemorySSA &getMSSA() { return *MSSA; } 984 const MemorySSA &getMSSA() const { return *MSSA; } 985 986 void getAnalysisUsage(AnalysisUsage &AU) const override; 987 988 void verifyAnalysis() const override; 989 void print(raw_ostream &OS, const Module *M = nullptr) const override; 990 991 private: 992 std::unique_ptr<MemorySSA> MSSA; 993 }; 994 995 /// This is the generic walker interface for walkers of MemorySSA. 996 /// Walkers are used to be able to further disambiguate the def-use chains 997 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives 998 /// you. 999 /// In particular, while the def-use chains provide basic information, and are 1000 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a 1001 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other 1002 /// information. In particular, they may want to use SCEV info to further 1003 /// disambiguate memory accesses, or they may want the nearest dominating 1004 /// may-aliasing MemoryDef for a call or a store. This API enables a 1005 /// standardized interface to getting and using that info. 1006 class MemorySSAWalker { 1007 public: 1008 MemorySSAWalker(MemorySSA *); 1009 virtual ~MemorySSAWalker() = default; 1010 1011 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; 1012 1013 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this 1014 /// will give you the nearest dominating MemoryAccess that Mod's the location 1015 /// the instruction accesses (by skipping any def which AA can prove does not 1016 /// alias the location(s) accessed by the instruction given). 1017 /// 1018 /// Note that this will return a single access, and it must dominate the 1019 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, 1020 /// this will return the MemoryPhi, not the operand. This means that 1021 /// given: 1022 /// if (a) { 1023 /// 1 = MemoryDef(liveOnEntry) 1024 /// store %a 1025 /// } else { 1026 /// 2 = MemoryDef(liveOnEntry) 1027 /// store %b 1028 /// } 1029 /// 3 = MemoryPhi(2, 1) 1030 /// MemoryUse(3) 1031 /// load %a 1032 /// 1033 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef 1034 /// in the if (a) branch. 1035 MemoryAccess *getClobberingMemoryAccess(const Instruction *I, 1036 BatchAAResults &AA) { 1037 MemoryAccess *MA = MSSA->getMemoryAccess(I); 1038 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); 1039 return getClobberingMemoryAccess(MA, AA); 1040 } 1041 1042 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), 1043 /// but takes a MemoryAccess instead of an Instruction. 1044 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1045 BatchAAResults &AA) = 0; 1046 1047 /// Given a potentially clobbering memory access and a new location, 1048 /// calling this will give you the nearest dominating clobbering MemoryAccess 1049 /// (by skipping non-aliasing def links). 1050 /// 1051 /// This version of the function is mainly used to disambiguate phi translated 1052 /// pointers, where the value of a pointer may have changed from the initial 1053 /// memory access. Note that this expects to be handed either a MemoryUse, 1054 /// or an already potentially clobbering access. Unlike the above API, if 1055 /// given a MemoryDef that clobbers the pointer as the starting access, it 1056 /// will return that MemoryDef, whereas the above would return the clobber 1057 /// starting from the use side of the memory def. 1058 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1059 const MemoryLocation &, 1060 BatchAAResults &AA) = 0; 1061 1062 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { 1063 BatchAAResults BAA(MSSA->getAA()); 1064 return getClobberingMemoryAccess(I, BAA); 1065 } 1066 1067 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) { 1068 BatchAAResults BAA(MSSA->getAA()); 1069 return getClobberingMemoryAccess(MA, BAA); 1070 } 1071 1072 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1073 const MemoryLocation &Loc) { 1074 BatchAAResults BAA(MSSA->getAA()); 1075 return getClobberingMemoryAccess(MA, Loc, BAA); 1076 } 1077 1078 /// Given a memory access, invalidate anything this walker knows about 1079 /// that access. 1080 /// This API is used by walkers that store information to perform basic cache 1081 /// invalidation. This will be called by MemorySSA at appropriate times for 1082 /// the walker it uses or returns. 1083 virtual void invalidateInfo(MemoryAccess *) {} 1084 1085 protected: 1086 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move 1087 // constructor. 1088 MemorySSA *MSSA; 1089 }; 1090 1091 /// A MemorySSAWalker that does no alias queries, or anything else. It 1092 /// simply returns the links as they were constructed by the builder. 1093 class DoNothingMemorySSAWalker final : public MemorySSAWalker { 1094 public: 1095 // Keep the overrides below from hiding the Instruction overload of 1096 // getClobberingMemoryAccess. 1097 using MemorySSAWalker::getClobberingMemoryAccess; 1098 1099 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1100 BatchAAResults &) override; 1101 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1102 const MemoryLocation &, 1103 BatchAAResults &) override; 1104 }; 1105 1106 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; 1107 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; 1108 1109 /// Iterator base class used to implement const and non-const iterators 1110 /// over the defining accesses of a MemoryAccess. 1111 template <class T> 1112 class memoryaccess_def_iterator_base 1113 : public iterator_facade_base<memoryaccess_def_iterator_base<T>, 1114 std::forward_iterator_tag, T, ptrdiff_t, T *, 1115 T *> { 1116 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; 1117 1118 public: 1119 memoryaccess_def_iterator_base(T *Start) : Access(Start) {} 1120 memoryaccess_def_iterator_base() = default; 1121 1122 bool operator==(const memoryaccess_def_iterator_base &Other) const { 1123 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); 1124 } 1125 1126 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the 1127 // block from the operand in constant time (In a PHINode, the uselist has 1128 // both, so it's just subtraction). We provide it as part of the 1129 // iterator to avoid callers having to linear walk to get the block. 1130 // If the operation becomes constant time on MemoryPHI's, this bit of 1131 // abstraction breaking should be removed. 1132 BasicBlock *getPhiArgBlock() const { 1133 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); 1134 assert(MP && "Tried to get phi arg block when not iterating over a PHI"); 1135 return MP->getIncomingBlock(ArgNo); 1136 } 1137 1138 typename std::iterator_traits<BaseT>::pointer operator*() const { 1139 assert(Access && "Tried to access past the end of our iterator"); 1140 // Go to the first argument for phis, and the defining access for everything 1141 // else. 1142 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) 1143 return MP->getIncomingValue(ArgNo); 1144 return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); 1145 } 1146 1147 using BaseT::operator++; 1148 memoryaccess_def_iterator_base &operator++() { 1149 assert(Access && "Hit end of iterator"); 1150 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { 1151 if (++ArgNo >= MP->getNumIncomingValues()) { 1152 ArgNo = 0; 1153 Access = nullptr; 1154 } 1155 } else { 1156 Access = nullptr; 1157 } 1158 return *this; 1159 } 1160 1161 private: 1162 T *Access = nullptr; 1163 unsigned ArgNo = 0; 1164 }; 1165 1166 inline memoryaccess_def_iterator MemoryAccess::defs_begin() { 1167 return memoryaccess_def_iterator(this); 1168 } 1169 1170 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { 1171 return const_memoryaccess_def_iterator(this); 1172 } 1173 1174 inline memoryaccess_def_iterator MemoryAccess::defs_end() { 1175 return memoryaccess_def_iterator(); 1176 } 1177 1178 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { 1179 return const_memoryaccess_def_iterator(); 1180 } 1181 1182 /// GraphTraits for a MemoryAccess, which walks defs in the normal case, 1183 /// and uses in the inverse case. 1184 template <> struct GraphTraits<MemoryAccess *> { 1185 using NodeRef = MemoryAccess *; 1186 using ChildIteratorType = memoryaccess_def_iterator; 1187 1188 static NodeRef getEntryNode(NodeRef N) { return N; } 1189 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } 1190 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } 1191 }; 1192 1193 template <> struct GraphTraits<Inverse<MemoryAccess *>> { 1194 using NodeRef = MemoryAccess *; 1195 using ChildIteratorType = MemoryAccess::iterator; 1196 1197 static NodeRef getEntryNode(NodeRef N) { return N; } 1198 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } 1199 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } 1200 }; 1201 1202 /// Provide an iterator that walks defs, giving both the memory access, 1203 /// and the current pointer location, updating the pointer location as it 1204 /// changes due to phi node translation. 1205 /// 1206 /// This iterator, while somewhat specialized, is what most clients actually 1207 /// want when walking upwards through MemorySSA def chains. It takes a pair of 1208 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the 1209 /// memory location through phi nodes for the user. 1210 class upward_defs_iterator 1211 : public iterator_facade_base<upward_defs_iterator, 1212 std::forward_iterator_tag, 1213 const MemoryAccessPair> { 1214 using BaseT = upward_defs_iterator::iterator_facade_base; 1215 1216 public: 1217 upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT) 1218 : DefIterator(Info.first), Location(Info.second), 1219 OriginalAccess(Info.first), DT(DT) { 1220 CurrentPair.first = nullptr; 1221 1222 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); 1223 fillInCurrentPair(); 1224 } 1225 1226 upward_defs_iterator() { CurrentPair.first = nullptr; } 1227 1228 bool operator==(const upward_defs_iterator &Other) const { 1229 return DefIterator == Other.DefIterator; 1230 } 1231 1232 typename std::iterator_traits<BaseT>::reference operator*() const { 1233 assert(DefIterator != OriginalAccess->defs_end() && 1234 "Tried to access past the end of our iterator"); 1235 return CurrentPair; 1236 } 1237 1238 using BaseT::operator++; 1239 upward_defs_iterator &operator++() { 1240 assert(DefIterator != OriginalAccess->defs_end() && 1241 "Tried to access past the end of the iterator"); 1242 ++DefIterator; 1243 if (DefIterator != OriginalAccess->defs_end()) 1244 fillInCurrentPair(); 1245 return *this; 1246 } 1247 1248 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } 1249 1250 private: 1251 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible 1252 /// loop. In particular, this guarantees that it only references a single 1253 /// MemoryLocation during execution of the containing function. 1254 bool IsGuaranteedLoopInvariant(const Value *Ptr) const; 1255 1256 void fillInCurrentPair() { 1257 CurrentPair.first = *DefIterator; 1258 CurrentPair.second = Location; 1259 if (WalkingPhi && Location.Ptr) { 1260 PHITransAddr Translator( 1261 const_cast<Value *>(Location.Ptr), 1262 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); 1263 1264 if (Value *Addr = 1265 Translator.translateValue(OriginalAccess->getBlock(), 1266 DefIterator.getPhiArgBlock(), DT, true)) 1267 if (Addr != CurrentPair.second.Ptr) 1268 CurrentPair.second = CurrentPair.second.getWithNewPtr(Addr); 1269 1270 // Mark size as unknown, if the location is not guaranteed to be 1271 // loop-invariant for any possible loop in the function. Setting the size 1272 // to unknown guarantees that any memory accesses that access locations 1273 // after the pointer are considered as clobbers, which is important to 1274 // catch loop carried dependences. 1275 if (!IsGuaranteedLoopInvariant(CurrentPair.second.Ptr)) 1276 CurrentPair.second = CurrentPair.second.getWithNewSize( 1277 LocationSize::beforeOrAfterPointer()); 1278 } 1279 } 1280 1281 MemoryAccessPair CurrentPair; 1282 memoryaccess_def_iterator DefIterator; 1283 MemoryLocation Location; 1284 MemoryAccess *OriginalAccess = nullptr; 1285 DominatorTree *DT = nullptr; 1286 bool WalkingPhi = false; 1287 }; 1288 1289 inline upward_defs_iterator 1290 upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT) { 1291 return upward_defs_iterator(Pair, &DT); 1292 } 1293 1294 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } 1295 1296 inline iterator_range<upward_defs_iterator> 1297 upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { 1298 return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); 1299 } 1300 1301 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that 1302 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when 1303 /// comparing against a null def_chain_iterator, this will compare equal only 1304 /// after walking said Phi/liveOnEntry. 1305 /// 1306 /// The UseOptimizedChain flag specifies whether to walk the clobbering 1307 /// access chain, or all the accesses. 1308 /// 1309 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on 1310 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits 1311 /// a phi node. The optimized chain walks the clobbering access of a store. 1312 /// So if you are just trying to find, given a store, what the next 1313 /// thing that would clobber the same memory is, you want the optimized chain. 1314 template <class T, bool UseOptimizedChain = false> 1315 struct def_chain_iterator 1316 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, 1317 std::forward_iterator_tag, MemoryAccess *> { 1318 def_chain_iterator() : MA(nullptr) {} 1319 def_chain_iterator(T MA) : MA(MA) {} 1320 1321 T operator*() const { return MA; } 1322 1323 def_chain_iterator &operator++() { 1324 // N.B. liveOnEntry has a null defining access. 1325 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { 1326 if (UseOptimizedChain && MUD->isOptimized()) 1327 MA = MUD->getOptimized(); 1328 else 1329 MA = MUD->getDefiningAccess(); 1330 } else { 1331 MA = nullptr; 1332 } 1333 1334 return *this; 1335 } 1336 1337 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } 1338 1339 private: 1340 T MA; 1341 }; 1342 1343 template <class T> 1344 inline iterator_range<def_chain_iterator<T>> 1345 def_chain(T MA, MemoryAccess *UpTo = nullptr) { 1346 #ifdef EXPENSIVE_CHECKS 1347 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && 1348 "UpTo isn't in the def chain!"); 1349 #endif 1350 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); 1351 } 1352 1353 template <class T> 1354 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { 1355 return make_range(def_chain_iterator<T, true>(MA), 1356 def_chain_iterator<T, true>(nullptr)); 1357 } 1358 1359 } // end namespace llvm 1360 1361 #endif // LLVM_ANALYSIS_MEMORYSSA_H 1362