1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file exposes an interface to building/using memory SSA to 11 /// walk memory instructions using a use/def graph. 12 /// 13 /// Memory SSA class builds an SSA form that links together memory access 14 /// instructions such as loads, stores, atomics, and calls. Additionally, it 15 /// does a trivial form of "heap versioning" Every time the memory state changes 16 /// in the program, we generate a new heap version. It generates 17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. 18 /// 19 /// As a trivial example, 20 /// define i32 @main() #0 { 21 /// entry: 22 /// %call = call noalias i8* @_Znwm(i64 4) #2 23 /// %0 = bitcast i8* %call to i32* 24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2 25 /// %1 = bitcast i8* %call1 to i32* 26 /// store i32 5, i32* %0, align 4 27 /// store i32 7, i32* %1, align 4 28 /// %2 = load i32* %0, align 4 29 /// %3 = load i32* %1, align 4 30 /// %add = add nsw i32 %2, %3 31 /// ret i32 %add 32 /// } 33 /// 34 /// Will become 35 /// define i32 @main() #0 { 36 /// entry: 37 /// ; 1 = MemoryDef(0) 38 /// %call = call noalias i8* @_Znwm(i64 4) #3 39 /// %2 = bitcast i8* %call to i32* 40 /// ; 2 = MemoryDef(1) 41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3 42 /// %4 = bitcast i8* %call1 to i32* 43 /// ; 3 = MemoryDef(2) 44 /// store i32 5, i32* %2, align 4 45 /// ; 4 = MemoryDef(3) 46 /// store i32 7, i32* %4, align 4 47 /// ; MemoryUse(3) 48 /// %7 = load i32* %2, align 4 49 /// ; MemoryUse(4) 50 /// %8 = load i32* %4, align 4 51 /// %add = add nsw i32 %7, %8 52 /// ret i32 %add 53 /// } 54 /// 55 /// Given this form, all the stores that could ever effect the load at %8 can be 56 /// gotten by using the MemoryUse associated with it, and walking from use to 57 /// def until you hit the top of the function. 58 /// 59 /// Each def also has a list of users associated with it, so you can walk from 60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses, 61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would 62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given 63 /// store, all the MemoryUses on its use lists are may-aliases of that store 64 /// (but the MemoryDefs on its use list may not be). 65 /// 66 /// MemoryDefs are not disambiguated because it would require multiple reaching 67 /// definitions, which would require multiple phis, and multiple memoryaccesses 68 /// per instruction. 69 /// 70 /// In addition to the def/use graph described above, MemoryDefs also contain 71 /// an "optimized" definition use. The "optimized" use points to some def 72 /// reachable through the memory def chain. The optimized def *may* (but is 73 /// not required to) alias the original MemoryDef, but no def *closer* to the 74 /// source def may alias it. As the name implies, the purpose of the optimized 75 /// use is to allow caching of clobber searches for memory defs. The optimized 76 /// def may be nullptr, in which case clients must walk the defining access 77 /// chain. 78 /// 79 /// When iterating the uses of a MemoryDef, both defining uses and optimized 80 /// uses will be encountered. If only one type is needed, the client must 81 /// filter the use walk. 82 // 83 //===----------------------------------------------------------------------===// 84 85 #ifndef LLVM_ANALYSIS_MEMORYSSA_H 86 #define LLVM_ANALYSIS_MEMORYSSA_H 87 88 #include "llvm/ADT/DenseMap.h" 89 #include "llvm/ADT/SmallPtrSet.h" 90 #include "llvm/ADT/SmallVector.h" 91 #include "llvm/ADT/ilist_node.h" 92 #include "llvm/ADT/iterator_range.h" 93 #include "llvm/Analysis/AliasAnalysis.h" 94 #include "llvm/Analysis/MemoryLocation.h" 95 #include "llvm/Analysis/PHITransAddr.h" 96 #include "llvm/IR/DerivedUser.h" 97 #include "llvm/IR/Dominators.h" 98 #include "llvm/IR/Type.h" 99 #include "llvm/IR/User.h" 100 #include "llvm/Pass.h" 101 #include <algorithm> 102 #include <cassert> 103 #include <cstddef> 104 #include <iterator> 105 #include <memory> 106 #include <utility> 107 108 namespace llvm { 109 110 template <class GraphType> struct GraphTraits; 111 class BasicBlock; 112 class Function; 113 class Instruction; 114 class LLVMContext; 115 class MemoryAccess; 116 class MemorySSAWalker; 117 class Module; 118 class Use; 119 class Value; 120 class raw_ostream; 121 122 namespace MSSAHelpers { 123 124 struct AllAccessTag {}; 125 struct DefsOnlyTag {}; 126 127 } // end namespace MSSAHelpers 128 129 enum : unsigned { 130 // Used to signify what the default invalid ID is for MemoryAccess's 131 // getID() 132 INVALID_MEMORYACCESS_ID = -1U 133 }; 134 135 template <class T> class memoryaccess_def_iterator_base; 136 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; 137 using const_memoryaccess_def_iterator = 138 memoryaccess_def_iterator_base<const MemoryAccess>; 139 140 // The base for all memory accesses. All memory accesses in a block are 141 // linked together using an intrusive list. 142 class MemoryAccess 143 : public DerivedUser, 144 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, 145 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { 146 public: 147 using AllAccessType = 148 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 149 using DefsOnlyType = 150 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 151 152 MemoryAccess(const MemoryAccess &) = delete; 153 MemoryAccess &operator=(const MemoryAccess &) = delete; 154 155 void *operator new(size_t) = delete; 156 157 // Methods for support type inquiry through isa, cast, and 158 // dyn_cast 159 static bool classof(const Value *V) { 160 unsigned ID = V->getValueID(); 161 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; 162 } 163 164 BasicBlock *getBlock() const { return Block; } 165 166 void print(raw_ostream &OS) const; 167 void dump() const; 168 169 /// The user iterators for a memory access 170 using iterator = user_iterator; 171 using const_iterator = const_user_iterator; 172 173 /// This iterator walks over all of the defs in a given 174 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For 175 /// MemoryUse/MemoryDef, this walks the defining access. 176 memoryaccess_def_iterator defs_begin(); 177 const_memoryaccess_def_iterator defs_begin() const; 178 memoryaccess_def_iterator defs_end(); 179 const_memoryaccess_def_iterator defs_end() const; 180 181 /// Get the iterators for the all access list and the defs only list 182 /// We default to the all access list. 183 AllAccessType::self_iterator getIterator() { 184 return this->AllAccessType::getIterator(); 185 } 186 AllAccessType::const_self_iterator getIterator() const { 187 return this->AllAccessType::getIterator(); 188 } 189 AllAccessType::reverse_self_iterator getReverseIterator() { 190 return this->AllAccessType::getReverseIterator(); 191 } 192 AllAccessType::const_reverse_self_iterator getReverseIterator() const { 193 return this->AllAccessType::getReverseIterator(); 194 } 195 DefsOnlyType::self_iterator getDefsIterator() { 196 return this->DefsOnlyType::getIterator(); 197 } 198 DefsOnlyType::const_self_iterator getDefsIterator() const { 199 return this->DefsOnlyType::getIterator(); 200 } 201 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { 202 return this->DefsOnlyType::getReverseIterator(); 203 } 204 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { 205 return this->DefsOnlyType::getReverseIterator(); 206 } 207 208 protected: 209 friend class MemoryDef; 210 friend class MemoryPhi; 211 friend class MemorySSA; 212 friend class MemoryUse; 213 friend class MemoryUseOrDef; 214 215 /// Used by MemorySSA to change the block of a MemoryAccess when it is 216 /// moved. 217 void setBlock(BasicBlock *BB) { Block = BB; } 218 219 /// Used for debugging and tracking things about MemoryAccesses. 220 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. 221 inline unsigned getID() const; 222 223 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, 224 BasicBlock *BB, unsigned NumOperands) 225 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), 226 Block(BB) {} 227 228 // Use deleteValue() to delete a generic MemoryAccess. 229 ~MemoryAccess() = default; 230 231 private: 232 BasicBlock *Block; 233 }; 234 235 template <> 236 struct ilist_alloc_traits<MemoryAccess> { 237 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } 238 }; 239 240 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { 241 MA.print(OS); 242 return OS; 243 } 244 245 /// Class that has the common methods + fields of memory uses/defs. It's 246 /// a little awkward to have, but there are many cases where we want either a 247 /// use or def, and there are many cases where uses are needed (defs aren't 248 /// acceptable), and vice-versa. 249 /// 250 /// This class should never be instantiated directly; make a MemoryUse or 251 /// MemoryDef instead. 252 class MemoryUseOrDef : public MemoryAccess { 253 public: 254 void *operator new(size_t) = delete; 255 256 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 257 258 /// Get the instruction that this MemoryUse represents. 259 Instruction *getMemoryInst() const { return MemoryInstruction; } 260 261 /// Get the access that produces the memory state used by this Use. 262 MemoryAccess *getDefiningAccess() const { return getOperand(0); } 263 264 static bool classof(const Value *MA) { 265 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; 266 } 267 268 /// Do we have an optimized use? 269 inline bool isOptimized() const; 270 /// Return the MemoryAccess associated with the optimized use, or nullptr. 271 inline MemoryAccess *getOptimized() const; 272 /// Sets the optimized use for a MemoryDef. 273 inline void setOptimized(MemoryAccess *); 274 275 // Retrieve AliasResult type of the optimized access. Ideally this would be 276 // returned by the caching walker and may go away in the future. 277 Optional<AliasResult> getOptimizedAccessType() const { 278 return isOptimized() ? OptimizedAccessAlias : None; 279 } 280 281 /// Reset the ID of what this MemoryUse was optimized to, causing it to 282 /// be rewalked by the walker if necessary. 283 /// This really should only be called by tests. 284 inline void resetOptimized(); 285 286 protected: 287 friend class MemorySSA; 288 friend class MemorySSAUpdater; 289 290 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, 291 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, 292 unsigned NumOperands) 293 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), 294 MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) { 295 setDefiningAccess(DMA); 296 } 297 298 // Use deleteValue() to delete a generic MemoryUseOrDef. 299 ~MemoryUseOrDef() = default; 300 301 void setOptimizedAccessType(Optional<AliasResult> AR) { 302 OptimizedAccessAlias = AR; 303 } 304 305 void setDefiningAccess( 306 MemoryAccess *DMA, bool Optimized = false, 307 Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) { 308 if (!Optimized) { 309 setOperand(0, DMA); 310 return; 311 } 312 setOptimized(DMA); 313 setOptimizedAccessType(AR); 314 } 315 316 private: 317 Instruction *MemoryInstruction; 318 Optional<AliasResult> OptimizedAccessAlias; 319 }; 320 321 /// Represents read-only accesses to memory 322 /// 323 /// In particular, the set of Instructions that will be represented by 324 /// MemoryUse's is exactly the set of Instructions for which 325 /// AliasAnalysis::getModRefInfo returns "Ref". 326 class MemoryUse final : public MemoryUseOrDef { 327 public: 328 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 329 330 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) 331 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, 332 /*NumOperands=*/1) {} 333 334 // allocate space for exactly one operand 335 void *operator new(size_t S) { return User::operator new(S, 1); } 336 void operator delete(void *Ptr) { User::operator delete(Ptr); } 337 338 static bool classof(const Value *MA) { 339 return MA->getValueID() == MemoryUseVal; 340 } 341 342 void print(raw_ostream &OS) const; 343 344 void setOptimized(MemoryAccess *DMA) { 345 OptimizedID = DMA->getID(); 346 setOperand(0, DMA); 347 } 348 349 /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called, 350 /// uses will usually be optimized, but this is not guaranteed (e.g. due to 351 /// invalidation and optimization limits.) 352 bool isOptimized() const { 353 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); 354 } 355 356 MemoryAccess *getOptimized() const { 357 return getDefiningAccess(); 358 } 359 360 void resetOptimized() { 361 OptimizedID = INVALID_MEMORYACCESS_ID; 362 } 363 364 protected: 365 friend class MemorySSA; 366 367 private: 368 static void deleteMe(DerivedUser *Self); 369 370 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 371 }; 372 373 template <> 374 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; 375 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess) 376 377 /// Represents a read-write access to memory, whether it is a must-alias, 378 /// or a may-alias. 379 /// 380 /// In particular, the set of Instructions that will be represented by 381 /// MemoryDef's is exactly the set of Instructions for which 382 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". 383 /// Note that, in order to provide def-def chains, all defs also have a use 384 /// associated with them. This use points to the nearest reaching 385 /// MemoryDef/MemoryPhi. 386 class MemoryDef final : public MemoryUseOrDef { 387 public: 388 friend class MemorySSA; 389 390 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 391 392 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, 393 unsigned Ver) 394 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, 395 /*NumOperands=*/2), 396 ID(Ver) {} 397 398 // allocate space for exactly two operands 399 void *operator new(size_t S) { return User::operator new(S, 2); } 400 void operator delete(void *Ptr) { User::operator delete(Ptr); } 401 402 static bool classof(const Value *MA) { 403 return MA->getValueID() == MemoryDefVal; 404 } 405 406 void setOptimized(MemoryAccess *MA) { 407 setOperand(1, MA); 408 OptimizedID = MA->getID(); 409 } 410 411 MemoryAccess *getOptimized() const { 412 return cast_or_null<MemoryAccess>(getOperand(1)); 413 } 414 415 bool isOptimized() const { 416 return getOptimized() && OptimizedID == getOptimized()->getID(); 417 } 418 419 void resetOptimized() { 420 OptimizedID = INVALID_MEMORYACCESS_ID; 421 setOperand(1, nullptr); 422 } 423 424 void print(raw_ostream &OS) const; 425 426 unsigned getID() const { return ID; } 427 428 private: 429 static void deleteMe(DerivedUser *Self); 430 431 const unsigned ID; 432 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 433 }; 434 435 template <> 436 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; 437 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess) 438 439 template <> 440 struct OperandTraits<MemoryUseOrDef> { 441 static Use *op_begin(MemoryUseOrDef *MUD) { 442 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 443 return OperandTraits<MemoryUse>::op_begin(MU); 444 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); 445 } 446 447 static Use *op_end(MemoryUseOrDef *MUD) { 448 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 449 return OperandTraits<MemoryUse>::op_end(MU); 450 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); 451 } 452 453 static unsigned operands(const MemoryUseOrDef *MUD) { 454 if (const auto *MU = dyn_cast<MemoryUse>(MUD)) 455 return OperandTraits<MemoryUse>::operands(MU); 456 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); 457 } 458 }; 459 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess) 460 461 /// Represents phi nodes for memory accesses. 462 /// 463 /// These have the same semantic as regular phi nodes, with the exception that 464 /// only one phi will ever exist in a given basic block. 465 /// Guaranteeing one phi per block means guaranteeing there is only ever one 466 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. 467 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or 468 /// a MemoryPhi's operands. 469 /// That is, given 470 /// if (a) { 471 /// store %a 472 /// store %b 473 /// } 474 /// it *must* be transformed into 475 /// if (a) { 476 /// 1 = MemoryDef(liveOnEntry) 477 /// store %a 478 /// 2 = MemoryDef(1) 479 /// store %b 480 /// } 481 /// and *not* 482 /// if (a) { 483 /// 1 = MemoryDef(liveOnEntry) 484 /// store %a 485 /// 2 = MemoryDef(liveOnEntry) 486 /// store %b 487 /// } 488 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the 489 /// end of the branch, and if there are not two phi nodes, one will be 490 /// disconnected completely from the SSA graph below that point. 491 /// Because MemoryUse's do not generate new definitions, they do not have this 492 /// issue. 493 class MemoryPhi final : public MemoryAccess { 494 // allocate space for exactly zero operands 495 void *operator new(size_t S) { return User::operator new(S); } 496 497 public: 498 void operator delete(void *Ptr) { User::operator delete(Ptr); } 499 500 /// Provide fast operand accessors 501 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 502 503 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) 504 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), 505 ReservedSpace(NumPreds) { 506 allocHungoffUses(ReservedSpace); 507 } 508 509 // Block iterator interface. This provides access to the list of incoming 510 // basic blocks, which parallels the list of incoming values. 511 using block_iterator = BasicBlock **; 512 using const_block_iterator = BasicBlock *const *; 513 514 block_iterator block_begin() { 515 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 516 } 517 518 const_block_iterator block_begin() const { 519 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 520 } 521 522 block_iterator block_end() { return block_begin() + getNumOperands(); } 523 524 const_block_iterator block_end() const { 525 return block_begin() + getNumOperands(); 526 } 527 528 iterator_range<block_iterator> blocks() { 529 return make_range(block_begin(), block_end()); 530 } 531 532 iterator_range<const_block_iterator> blocks() const { 533 return make_range(block_begin(), block_end()); 534 } 535 536 op_range incoming_values() { return operands(); } 537 538 const_op_range incoming_values() const { return operands(); } 539 540 /// Return the number of incoming edges 541 unsigned getNumIncomingValues() const { return getNumOperands(); } 542 543 /// Return incoming value number x 544 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } 545 void setIncomingValue(unsigned I, MemoryAccess *V) { 546 assert(V && "PHI node got a null value!"); 547 setOperand(I, V); 548 } 549 550 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } 551 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } 552 553 /// Return incoming basic block number @p i. 554 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } 555 556 /// Return incoming basic block corresponding 557 /// to an operand of the PHI. 558 BasicBlock *getIncomingBlock(const Use &U) const { 559 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 560 return getIncomingBlock(unsigned(&U - op_begin())); 561 } 562 563 /// Return incoming basic block corresponding 564 /// to value use iterator. 565 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { 566 return getIncomingBlock(I.getUse()); 567 } 568 569 void setIncomingBlock(unsigned I, BasicBlock *BB) { 570 assert(BB && "PHI node got a null basic block!"); 571 block_begin()[I] = BB; 572 } 573 574 /// Add an incoming value to the end of the PHI list 575 void addIncoming(MemoryAccess *V, BasicBlock *BB) { 576 if (getNumOperands() == ReservedSpace) 577 growOperands(); // Get more space! 578 // Initialize some new operands. 579 setNumHungOffUseOperands(getNumOperands() + 1); 580 setIncomingValue(getNumOperands() - 1, V); 581 setIncomingBlock(getNumOperands() - 1, BB); 582 } 583 584 /// Return the first index of the specified basic 585 /// block in the value list for this PHI. Returns -1 if no instance. 586 int getBasicBlockIndex(const BasicBlock *BB) const { 587 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 588 if (block_begin()[I] == BB) 589 return I; 590 return -1; 591 } 592 593 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { 594 int Idx = getBasicBlockIndex(BB); 595 assert(Idx >= 0 && "Invalid basic block argument!"); 596 return getIncomingValue(Idx); 597 } 598 599 // After deleting incoming position I, the order of incoming may be changed. 600 void unorderedDeleteIncoming(unsigned I) { 601 unsigned E = getNumOperands(); 602 assert(I < E && "Cannot remove out of bounds Phi entry."); 603 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi 604 // itself should be deleted. 605 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with " 606 "at least 2 values."); 607 setIncomingValue(I, getIncomingValue(E - 1)); 608 setIncomingBlock(I, block_begin()[E - 1]); 609 setOperand(E - 1, nullptr); 610 block_begin()[E - 1] = nullptr; 611 setNumHungOffUseOperands(getNumOperands() - 1); 612 } 613 614 // After deleting entries that satisfy Pred, remaining entries may have 615 // changed order. 616 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { 617 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 618 if (Pred(getIncomingValue(I), getIncomingBlock(I))) { 619 unorderedDeleteIncoming(I); 620 E = getNumOperands(); 621 --I; 622 } 623 assert(getNumOperands() >= 1 && 624 "Cannot remove all incoming blocks in a MemoryPhi."); 625 } 626 627 // After deleting incoming block BB, the incoming blocks order may be changed. 628 void unorderedDeleteIncomingBlock(const BasicBlock *BB) { 629 unorderedDeleteIncomingIf( 630 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); 631 } 632 633 // After deleting incoming memory access MA, the incoming accesses order may 634 // be changed. 635 void unorderedDeleteIncomingValue(const MemoryAccess *MA) { 636 unorderedDeleteIncomingIf( 637 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); 638 } 639 640 static bool classof(const Value *V) { 641 return V->getValueID() == MemoryPhiVal; 642 } 643 644 void print(raw_ostream &OS) const; 645 646 unsigned getID() const { return ID; } 647 648 protected: 649 friend class MemorySSA; 650 651 /// this is more complicated than the generic 652 /// User::allocHungoffUses, because we have to allocate Uses for the incoming 653 /// values and pointers to the incoming blocks, all in one allocation. 654 void allocHungoffUses(unsigned N) { 655 User::allocHungoffUses(N, /* IsPhi */ true); 656 } 657 658 private: 659 // For debugging only 660 const unsigned ID; 661 unsigned ReservedSpace; 662 663 /// This grows the operand list in response to a push_back style of 664 /// operation. This grows the number of ops by 1.5 times. 665 void growOperands() { 666 unsigned E = getNumOperands(); 667 // 2 op PHI nodes are VERY common, so reserve at least enough for that. 668 ReservedSpace = std::max(E + E / 2, 2u); 669 growHungoffUses(ReservedSpace, /* IsPhi */ true); 670 } 671 672 static void deleteMe(DerivedUser *Self); 673 }; 674 675 inline unsigned MemoryAccess::getID() const { 676 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && 677 "only memory defs and phis have ids"); 678 if (const auto *MD = dyn_cast<MemoryDef>(this)) 679 return MD->getID(); 680 return cast<MemoryPhi>(this)->getID(); 681 } 682 683 inline bool MemoryUseOrDef::isOptimized() const { 684 if (const auto *MD = dyn_cast<MemoryDef>(this)) 685 return MD->isOptimized(); 686 return cast<MemoryUse>(this)->isOptimized(); 687 } 688 689 inline MemoryAccess *MemoryUseOrDef::getOptimized() const { 690 if (const auto *MD = dyn_cast<MemoryDef>(this)) 691 return MD->getOptimized(); 692 return cast<MemoryUse>(this)->getOptimized(); 693 } 694 695 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { 696 if (auto *MD = dyn_cast<MemoryDef>(this)) 697 MD->setOptimized(MA); 698 else 699 cast<MemoryUse>(this)->setOptimized(MA); 700 } 701 702 inline void MemoryUseOrDef::resetOptimized() { 703 if (auto *MD = dyn_cast<MemoryDef>(this)) 704 MD->resetOptimized(); 705 else 706 cast<MemoryUse>(this)->resetOptimized(); 707 } 708 709 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; 710 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess) 711 712 /// Encapsulates MemorySSA, including all data associated with memory 713 /// accesses. 714 class MemorySSA { 715 public: 716 MemorySSA(Function &, AliasAnalysis *, DominatorTree *); 717 718 // MemorySSA must remain where it's constructed; Walkers it creates store 719 // pointers to it. 720 MemorySSA(MemorySSA &&) = delete; 721 722 ~MemorySSA(); 723 724 MemorySSAWalker *getWalker(); 725 MemorySSAWalker *getSkipSelfWalker(); 726 727 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA 728 /// access associated with it. If passed a basic block gets the memory phi 729 /// node that exists for that block, if there is one. Otherwise, this will get 730 /// a MemoryUseOrDef. 731 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { 732 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); 733 } 734 735 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { 736 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); 737 } 738 739 DominatorTree &getDomTree() const { return *DT; } 740 741 void dump() const; 742 void print(raw_ostream &) const; 743 744 /// Return true if \p MA represents the live on entry value 745 /// 746 /// Loads and stores from pointer arguments and other global values may be 747 /// defined by memory operations that do not occur in the current function, so 748 /// they may be live on entry to the function. MemorySSA represents such 749 /// memory state by the live on entry definition, which is guaranteed to occur 750 /// before any other memory access in the function. 751 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { 752 return MA == LiveOnEntryDef.get(); 753 } 754 755 inline MemoryAccess *getLiveOnEntryDef() const { 756 return LiveOnEntryDef.get(); 757 } 758 759 // Sadly, iplists, by default, owns and deletes pointers added to the 760 // list. It's not currently possible to have two iplists for the same type, 761 // where one owns the pointers, and one does not. This is because the traits 762 // are per-type, not per-tag. If this ever changes, we should make the 763 // DefList an iplist. 764 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 765 using DefsList = 766 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 767 768 /// Return the list of MemoryAccess's for a given basic block. 769 /// 770 /// This list is not modifiable by the user. 771 const AccessList *getBlockAccesses(const BasicBlock *BB) const { 772 return getWritableBlockAccesses(BB); 773 } 774 775 /// Return the list of MemoryDef's and MemoryPhi's for a given basic 776 /// block. 777 /// 778 /// This list is not modifiable by the user. 779 const DefsList *getBlockDefs(const BasicBlock *BB) const { 780 return getWritableBlockDefs(BB); 781 } 782 783 /// Given two memory accesses in the same basic block, determine 784 /// whether MemoryAccess \p A dominates MemoryAccess \p B. 785 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; 786 787 /// Given two memory accesses in potentially different blocks, 788 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. 789 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; 790 791 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A 792 /// dominates Use \p B. 793 bool dominates(const MemoryAccess *A, const Use &B) const; 794 795 enum class VerificationLevel { Fast, Full }; 796 /// Verify that MemorySSA is self consistent (IE definitions dominate 797 /// all uses, uses appear in the right places). This is used by unit tests. 798 void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const; 799 800 /// Used in various insertion functions to specify whether we are talking 801 /// about the beginning or end of a block. 802 enum InsertionPlace { Beginning, End, BeforeTerminator }; 803 804 /// By default, uses are *not* optimized during MemorySSA construction. 805 /// Calling this method will attempt to optimize all MemoryUses, if this has 806 /// not happened yet for this MemorySSA instance. This should be done if you 807 /// plan to query the clobbering access for most uses, or if you walk the 808 /// def-use chain of uses. 809 void ensureOptimizedUses(); 810 811 protected: 812 // Used by Memory SSA dumpers and wrapper pass 813 friend class MemorySSAPrinterLegacyPass; 814 friend class MemorySSAUpdater; 815 816 void verifyOrderingDominationAndDefUses( 817 Function &F, VerificationLevel = VerificationLevel::Fast) const; 818 void verifyDominationNumbers(const Function &F) const; 819 void verifyPrevDefInPhis(Function &F) const; 820 821 // This is used by the use optimizer and updater. 822 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { 823 auto It = PerBlockAccesses.find(BB); 824 return It == PerBlockAccesses.end() ? nullptr : It->second.get(); 825 } 826 827 // This is used by the use optimizer and updater. 828 DefsList *getWritableBlockDefs(const BasicBlock *BB) const { 829 auto It = PerBlockDefs.find(BB); 830 return It == PerBlockDefs.end() ? nullptr : It->second.get(); 831 } 832 833 // These is used by the updater to perform various internal MemorySSA 834 // machinsations. They do not always leave the IR in a correct state, and 835 // relies on the updater to fixup what it breaks, so it is not public. 836 837 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); 838 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); 839 840 // Rename the dominator tree branch rooted at BB. 841 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, 842 SmallPtrSetImpl<BasicBlock *> &Visited) { 843 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); 844 } 845 846 void removeFromLookups(MemoryAccess *); 847 void removeFromLists(MemoryAccess *, bool ShouldDelete = true); 848 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, 849 InsertionPlace); 850 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, 851 AccessList::iterator); 852 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, 853 const MemoryUseOrDef *Template = nullptr, 854 bool CreationMustSucceed = true); 855 856 private: 857 template <class AliasAnalysisType> class ClobberWalkerBase; 858 template <class AliasAnalysisType> class CachingWalker; 859 template <class AliasAnalysisType> class SkipSelfWalker; 860 class OptimizeUses; 861 862 CachingWalker<AliasAnalysis> *getWalkerImpl(); 863 void buildMemorySSA(BatchAAResults &BAA); 864 865 void prepareForMoveTo(MemoryAccess *, BasicBlock *); 866 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; 867 868 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; 869 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; 870 871 void markUnreachableAsLiveOnEntry(BasicBlock *BB); 872 MemoryPhi *createMemoryPhi(BasicBlock *BB); 873 template <typename AliasAnalysisType> 874 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, 875 const MemoryUseOrDef *Template = nullptr); 876 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); 877 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); 878 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); 879 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, 880 SmallPtrSetImpl<BasicBlock *> &Visited, 881 bool SkipVisited = false, bool RenameAllUses = false); 882 AccessList *getOrCreateAccessList(const BasicBlock *); 883 DefsList *getOrCreateDefsList(const BasicBlock *); 884 void renumberBlock(const BasicBlock *) const; 885 AliasAnalysis *AA = nullptr; 886 DominatorTree *DT; 887 Function &F; 888 889 // Memory SSA mappings 890 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; 891 892 // These two mappings contain the main block to access/def mappings for 893 // MemorySSA. The list contained in PerBlockAccesses really owns all the 894 // MemoryAccesses. 895 // Both maps maintain the invariant that if a block is found in them, the 896 // corresponding list is not empty, and if a block is not found in them, the 897 // corresponding list is empty. 898 AccessMap PerBlockAccesses; 899 DefsMap PerBlockDefs; 900 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; 901 902 // Domination mappings 903 // Note that the numbering is local to a block, even though the map is 904 // global. 905 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; 906 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; 907 908 // Memory SSA building info 909 std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; 910 std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; 911 std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; 912 unsigned NextID = 0; 913 bool IsOptimized = false; 914 }; 915 916 /// Enables verification of MemorySSA. 917 /// 918 /// The checks which this flag enables is exensive and disabled by default 919 /// unless `EXPENSIVE_CHECKS` is defined. The flag `-verify-memoryssa` can be 920 /// used to selectively enable the verification without re-compilation. 921 extern bool VerifyMemorySSA; 922 923 // Internal MemorySSA utils, for use by MemorySSA classes and walkers 924 class MemorySSAUtil { 925 protected: 926 friend class GVNHoist; 927 friend class MemorySSAWalker; 928 929 // This function should not be used by new passes. 930 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 931 AliasAnalysis &AA); 932 }; 933 934 // This pass does eager building and then printing of MemorySSA. It is used by 935 // the tests to be able to build, dump, and verify Memory SSA. 936 class MemorySSAPrinterLegacyPass : public FunctionPass { 937 public: 938 MemorySSAPrinterLegacyPass(); 939 940 bool runOnFunction(Function &) override; 941 void getAnalysisUsage(AnalysisUsage &AU) const override; 942 943 static char ID; 944 }; 945 946 /// An analysis that produces \c MemorySSA for a function. 947 /// 948 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { 949 friend AnalysisInfoMixin<MemorySSAAnalysis>; 950 951 static AnalysisKey Key; 952 953 public: 954 // Wrap MemorySSA result to ensure address stability of internal MemorySSA 955 // pointers after construction. Use a wrapper class instead of plain 956 // unique_ptr<MemorySSA> to avoid build breakage on MSVC. 957 struct Result { 958 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} 959 960 MemorySSA &getMSSA() { return *MSSA.get(); } 961 962 std::unique_ptr<MemorySSA> MSSA; 963 964 bool invalidate(Function &F, const PreservedAnalyses &PA, 965 FunctionAnalysisManager::Invalidator &Inv); 966 }; 967 968 Result run(Function &F, FunctionAnalysisManager &AM); 969 }; 970 971 /// Printer pass for \c MemorySSA. 972 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { 973 raw_ostream &OS; 974 975 public: 976 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} 977 978 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 979 }; 980 981 /// Printer pass for \c MemorySSA via the walker. 982 class MemorySSAWalkerPrinterPass 983 : public PassInfoMixin<MemorySSAWalkerPrinterPass> { 984 raw_ostream &OS; 985 986 public: 987 explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {} 988 989 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 990 }; 991 992 /// Verifier pass for \c MemorySSA. 993 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { 994 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 995 }; 996 997 /// Legacy analysis pass which computes \c MemorySSA. 998 class MemorySSAWrapperPass : public FunctionPass { 999 public: 1000 MemorySSAWrapperPass(); 1001 1002 static char ID; 1003 1004 bool runOnFunction(Function &) override; 1005 void releaseMemory() override; 1006 MemorySSA &getMSSA() { return *MSSA; } 1007 const MemorySSA &getMSSA() const { return *MSSA; } 1008 1009 void getAnalysisUsage(AnalysisUsage &AU) const override; 1010 1011 void verifyAnalysis() const override; 1012 void print(raw_ostream &OS, const Module *M = nullptr) const override; 1013 1014 private: 1015 std::unique_ptr<MemorySSA> MSSA; 1016 }; 1017 1018 /// This is the generic walker interface for walkers of MemorySSA. 1019 /// Walkers are used to be able to further disambiguate the def-use chains 1020 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives 1021 /// you. 1022 /// In particular, while the def-use chains provide basic information, and are 1023 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a 1024 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other 1025 /// information. In particular, they may want to use SCEV info to further 1026 /// disambiguate memory accesses, or they may want the nearest dominating 1027 /// may-aliasing MemoryDef for a call or a store. This API enables a 1028 /// standardized interface to getting and using that info. 1029 class MemorySSAWalker { 1030 public: 1031 MemorySSAWalker(MemorySSA *); 1032 virtual ~MemorySSAWalker() = default; 1033 1034 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; 1035 1036 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this 1037 /// will give you the nearest dominating MemoryAccess that Mod's the location 1038 /// the instruction accesses (by skipping any def which AA can prove does not 1039 /// alias the location(s) accessed by the instruction given). 1040 /// 1041 /// Note that this will return a single access, and it must dominate the 1042 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, 1043 /// this will return the MemoryPhi, not the operand. This means that 1044 /// given: 1045 /// if (a) { 1046 /// 1 = MemoryDef(liveOnEntry) 1047 /// store %a 1048 /// } else { 1049 /// 2 = MemoryDef(liveOnEntry) 1050 /// store %b 1051 /// } 1052 /// 3 = MemoryPhi(2, 1) 1053 /// MemoryUse(3) 1054 /// load %a 1055 /// 1056 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef 1057 /// in the if (a) branch. 1058 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { 1059 MemoryAccess *MA = MSSA->getMemoryAccess(I); 1060 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); 1061 return getClobberingMemoryAccess(MA); 1062 } 1063 1064 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), 1065 /// but takes a MemoryAccess instead of an Instruction. 1066 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; 1067 1068 /// Given a potentially clobbering memory access and a new location, 1069 /// calling this will give you the nearest dominating clobbering MemoryAccess 1070 /// (by skipping non-aliasing def links). 1071 /// 1072 /// This version of the function is mainly used to disambiguate phi translated 1073 /// pointers, where the value of a pointer may have changed from the initial 1074 /// memory access. Note that this expects to be handed either a MemoryUse, 1075 /// or an already potentially clobbering access. Unlike the above API, if 1076 /// given a MemoryDef that clobbers the pointer as the starting access, it 1077 /// will return that MemoryDef, whereas the above would return the clobber 1078 /// starting from the use side of the memory def. 1079 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1080 const MemoryLocation &) = 0; 1081 1082 /// Given a memory access, invalidate anything this walker knows about 1083 /// that access. 1084 /// This API is used by walkers that store information to perform basic cache 1085 /// invalidation. This will be called by MemorySSA at appropriate times for 1086 /// the walker it uses or returns. 1087 virtual void invalidateInfo(MemoryAccess *) {} 1088 1089 protected: 1090 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move 1091 // constructor. 1092 MemorySSA *MSSA; 1093 }; 1094 1095 /// A MemorySSAWalker that does no alias queries, or anything else. It 1096 /// simply returns the links as they were constructed by the builder. 1097 class DoNothingMemorySSAWalker final : public MemorySSAWalker { 1098 public: 1099 // Keep the overrides below from hiding the Instruction overload of 1100 // getClobberingMemoryAccess. 1101 using MemorySSAWalker::getClobberingMemoryAccess; 1102 1103 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; 1104 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1105 const MemoryLocation &) override; 1106 }; 1107 1108 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; 1109 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; 1110 1111 /// Iterator base class used to implement const and non-const iterators 1112 /// over the defining accesses of a MemoryAccess. 1113 template <class T> 1114 class memoryaccess_def_iterator_base 1115 : public iterator_facade_base<memoryaccess_def_iterator_base<T>, 1116 std::forward_iterator_tag, T, ptrdiff_t, T *, 1117 T *> { 1118 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; 1119 1120 public: 1121 memoryaccess_def_iterator_base(T *Start) : Access(Start) {} 1122 memoryaccess_def_iterator_base() = default; 1123 1124 bool operator==(const memoryaccess_def_iterator_base &Other) const { 1125 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); 1126 } 1127 1128 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the 1129 // block from the operand in constant time (In a PHINode, the uselist has 1130 // both, so it's just subtraction). We provide it as part of the 1131 // iterator to avoid callers having to linear walk to get the block. 1132 // If the operation becomes constant time on MemoryPHI's, this bit of 1133 // abstraction breaking should be removed. 1134 BasicBlock *getPhiArgBlock() const { 1135 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); 1136 assert(MP && "Tried to get phi arg block when not iterating over a PHI"); 1137 return MP->getIncomingBlock(ArgNo); 1138 } 1139 1140 typename std::iterator_traits<BaseT>::pointer operator*() const { 1141 assert(Access && "Tried to access past the end of our iterator"); 1142 // Go to the first argument for phis, and the defining access for everything 1143 // else. 1144 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) 1145 return MP->getIncomingValue(ArgNo); 1146 return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); 1147 } 1148 1149 using BaseT::operator++; 1150 memoryaccess_def_iterator_base &operator++() { 1151 assert(Access && "Hit end of iterator"); 1152 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { 1153 if (++ArgNo >= MP->getNumIncomingValues()) { 1154 ArgNo = 0; 1155 Access = nullptr; 1156 } 1157 } else { 1158 Access = nullptr; 1159 } 1160 return *this; 1161 } 1162 1163 private: 1164 T *Access = nullptr; 1165 unsigned ArgNo = 0; 1166 }; 1167 1168 inline memoryaccess_def_iterator MemoryAccess::defs_begin() { 1169 return memoryaccess_def_iterator(this); 1170 } 1171 1172 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { 1173 return const_memoryaccess_def_iterator(this); 1174 } 1175 1176 inline memoryaccess_def_iterator MemoryAccess::defs_end() { 1177 return memoryaccess_def_iterator(); 1178 } 1179 1180 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { 1181 return const_memoryaccess_def_iterator(); 1182 } 1183 1184 /// GraphTraits for a MemoryAccess, which walks defs in the normal case, 1185 /// and uses in the inverse case. 1186 template <> struct GraphTraits<MemoryAccess *> { 1187 using NodeRef = MemoryAccess *; 1188 using ChildIteratorType = memoryaccess_def_iterator; 1189 1190 static NodeRef getEntryNode(NodeRef N) { return N; } 1191 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } 1192 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } 1193 }; 1194 1195 template <> struct GraphTraits<Inverse<MemoryAccess *>> { 1196 using NodeRef = MemoryAccess *; 1197 using ChildIteratorType = MemoryAccess::iterator; 1198 1199 static NodeRef getEntryNode(NodeRef N) { return N; } 1200 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } 1201 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } 1202 }; 1203 1204 /// Provide an iterator that walks defs, giving both the memory access, 1205 /// and the current pointer location, updating the pointer location as it 1206 /// changes due to phi node translation. 1207 /// 1208 /// This iterator, while somewhat specialized, is what most clients actually 1209 /// want when walking upwards through MemorySSA def chains. It takes a pair of 1210 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the 1211 /// memory location through phi nodes for the user. 1212 class upward_defs_iterator 1213 : public iterator_facade_base<upward_defs_iterator, 1214 std::forward_iterator_tag, 1215 const MemoryAccessPair> { 1216 using BaseT = upward_defs_iterator::iterator_facade_base; 1217 1218 public: 1219 upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT, 1220 bool *PerformedPhiTranslation = nullptr) 1221 : DefIterator(Info.first), Location(Info.second), 1222 OriginalAccess(Info.first), DT(DT), 1223 PerformedPhiTranslation(PerformedPhiTranslation) { 1224 CurrentPair.first = nullptr; 1225 1226 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); 1227 fillInCurrentPair(); 1228 } 1229 1230 upward_defs_iterator() { CurrentPair.first = nullptr; } 1231 1232 bool operator==(const upward_defs_iterator &Other) const { 1233 return DefIterator == Other.DefIterator; 1234 } 1235 1236 typename std::iterator_traits<BaseT>::reference operator*() const { 1237 assert(DefIterator != OriginalAccess->defs_end() && 1238 "Tried to access past the end of our iterator"); 1239 return CurrentPair; 1240 } 1241 1242 using BaseT::operator++; 1243 upward_defs_iterator &operator++() { 1244 assert(DefIterator != OriginalAccess->defs_end() && 1245 "Tried to access past the end of the iterator"); 1246 ++DefIterator; 1247 if (DefIterator != OriginalAccess->defs_end()) 1248 fillInCurrentPair(); 1249 return *this; 1250 } 1251 1252 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } 1253 1254 private: 1255 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible 1256 /// loop. In particular, this guarantees that it only references a single 1257 /// MemoryLocation during execution of the containing function. 1258 bool IsGuaranteedLoopInvariant(Value *Ptr) const; 1259 1260 void fillInCurrentPair() { 1261 CurrentPair.first = *DefIterator; 1262 CurrentPair.second = Location; 1263 if (WalkingPhi && Location.Ptr) { 1264 // Mark size as unknown, if the location is not guaranteed to be 1265 // loop-invariant for any possible loop in the function. Setting the size 1266 // to unknown guarantees that any memory accesses that access locations 1267 // after the pointer are considered as clobbers, which is important to 1268 // catch loop carried dependences. 1269 if (Location.Ptr && 1270 !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr))) 1271 CurrentPair.second = 1272 Location.getWithNewSize(LocationSize::beforeOrAfterPointer()); 1273 PHITransAddr Translator( 1274 const_cast<Value *>(Location.Ptr), 1275 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); 1276 1277 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), 1278 DefIterator.getPhiArgBlock(), DT, 1279 true)) { 1280 Value *TransAddr = Translator.getAddr(); 1281 if (TransAddr != Location.Ptr) { 1282 CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr); 1283 1284 if (TransAddr && 1285 !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr))) 1286 CurrentPair.second = CurrentPair.second.getWithNewSize( 1287 LocationSize::beforeOrAfterPointer()); 1288 1289 if (PerformedPhiTranslation) 1290 *PerformedPhiTranslation = true; 1291 } 1292 } 1293 } 1294 } 1295 1296 MemoryAccessPair CurrentPair; 1297 memoryaccess_def_iterator DefIterator; 1298 MemoryLocation Location; 1299 MemoryAccess *OriginalAccess = nullptr; 1300 DominatorTree *DT = nullptr; 1301 bool WalkingPhi = false; 1302 bool *PerformedPhiTranslation = nullptr; 1303 }; 1304 1305 inline upward_defs_iterator 1306 upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT, 1307 bool *PerformedPhiTranslation = nullptr) { 1308 return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation); 1309 } 1310 1311 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } 1312 1313 inline iterator_range<upward_defs_iterator> 1314 upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { 1315 return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); 1316 } 1317 1318 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that 1319 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when 1320 /// comparing against a null def_chain_iterator, this will compare equal only 1321 /// after walking said Phi/liveOnEntry. 1322 /// 1323 /// The UseOptimizedChain flag specifies whether to walk the clobbering 1324 /// access chain, or all the accesses. 1325 /// 1326 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on 1327 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits 1328 /// a phi node. The optimized chain walks the clobbering access of a store. 1329 /// So if you are just trying to find, given a store, what the next 1330 /// thing that would clobber the same memory is, you want the optimized chain. 1331 template <class T, bool UseOptimizedChain = false> 1332 struct def_chain_iterator 1333 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, 1334 std::forward_iterator_tag, MemoryAccess *> { 1335 def_chain_iterator() : MA(nullptr) {} 1336 def_chain_iterator(T MA) : MA(MA) {} 1337 1338 T operator*() const { return MA; } 1339 1340 def_chain_iterator &operator++() { 1341 // N.B. liveOnEntry has a null defining access. 1342 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { 1343 if (UseOptimizedChain && MUD->isOptimized()) 1344 MA = MUD->getOptimized(); 1345 else 1346 MA = MUD->getDefiningAccess(); 1347 } else { 1348 MA = nullptr; 1349 } 1350 1351 return *this; 1352 } 1353 1354 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } 1355 1356 private: 1357 T MA; 1358 }; 1359 1360 template <class T> 1361 inline iterator_range<def_chain_iterator<T>> 1362 def_chain(T MA, MemoryAccess *UpTo = nullptr) { 1363 #ifdef EXPENSIVE_CHECKS 1364 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && 1365 "UpTo isn't in the def chain!"); 1366 #endif 1367 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); 1368 } 1369 1370 template <class T> 1371 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { 1372 return make_range(def_chain_iterator<T, true>(MA), 1373 def_chain_iterator<T, true>(nullptr)); 1374 } 1375 1376 } // end namespace llvm 1377 1378 #endif // LLVM_ANALYSIS_MEMORYSSA_H 1379