1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file exposes an interface to building/using memory SSA to 11 /// walk memory instructions using a use/def graph. 12 /// 13 /// Memory SSA class builds an SSA form that links together memory access 14 /// instructions such as loads, stores, atomics, and calls. Additionally, it 15 /// does a trivial form of "heap versioning" Every time the memory state changes 16 /// in the program, we generate a new heap version. It generates 17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions. 18 /// 19 /// As a trivial example, 20 /// define i32 @main() #0 { 21 /// entry: 22 /// %call = call noalias i8* @_Znwm(i64 4) #2 23 /// %0 = bitcast i8* %call to i32* 24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2 25 /// %1 = bitcast i8* %call1 to i32* 26 /// store i32 5, i32* %0, align 4 27 /// store i32 7, i32* %1, align 4 28 /// %2 = load i32* %0, align 4 29 /// %3 = load i32* %1, align 4 30 /// %add = add nsw i32 %2, %3 31 /// ret i32 %add 32 /// } 33 /// 34 /// Will become 35 /// define i32 @main() #0 { 36 /// entry: 37 /// ; 1 = MemoryDef(0) 38 /// %call = call noalias i8* @_Znwm(i64 4) #3 39 /// %2 = bitcast i8* %call to i32* 40 /// ; 2 = MemoryDef(1) 41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3 42 /// %4 = bitcast i8* %call1 to i32* 43 /// ; 3 = MemoryDef(2) 44 /// store i32 5, i32* %2, align 4 45 /// ; 4 = MemoryDef(3) 46 /// store i32 7, i32* %4, align 4 47 /// ; MemoryUse(3) 48 /// %7 = load i32* %2, align 4 49 /// ; MemoryUse(4) 50 /// %8 = load i32* %4, align 4 51 /// %add = add nsw i32 %7, %8 52 /// ret i32 %add 53 /// } 54 /// 55 /// Given this form, all the stores that could ever effect the load at %8 can be 56 /// gotten by using the MemoryUse associated with it, and walking from use to 57 /// def until you hit the top of the function. 58 /// 59 /// Each def also has a list of users associated with it, so you can walk from 60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses, 61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would 62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given 63 /// store, all the MemoryUses on its use lists are may-aliases of that store 64 /// (but the MemoryDefs on its use list may not be). 65 /// 66 /// MemoryDefs are not disambiguated because it would require multiple reaching 67 /// definitions, which would require multiple phis, and multiple memoryaccesses 68 /// per instruction. 69 // 70 //===----------------------------------------------------------------------===// 71 72 #ifndef LLVM_ANALYSIS_MEMORYSSA_H 73 #define LLVM_ANALYSIS_MEMORYSSA_H 74 75 #include "llvm/ADT/DenseMap.h" 76 #include "llvm/ADT/GraphTraits.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/ilist.h" 80 #include "llvm/ADT/ilist_node.h" 81 #include "llvm/ADT/iterator.h" 82 #include "llvm/ADT/iterator_range.h" 83 #include "llvm/ADT/simple_ilist.h" 84 #include "llvm/Analysis/AliasAnalysis.h" 85 #include "llvm/Analysis/MemoryLocation.h" 86 #include "llvm/Analysis/PHITransAddr.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/DerivedUser.h" 89 #include "llvm/IR/Dominators.h" 90 #include "llvm/IR/Module.h" 91 #include "llvm/IR/Type.h" 92 #include "llvm/IR/Use.h" 93 #include "llvm/IR/User.h" 94 #include "llvm/IR/Value.h" 95 #include "llvm/IR/ValueHandle.h" 96 #include "llvm/Pass.h" 97 #include "llvm/Support/Casting.h" 98 #include "llvm/Support/CommandLine.h" 99 #include <algorithm> 100 #include <cassert> 101 #include <cstddef> 102 #include <iterator> 103 #include <memory> 104 #include <utility> 105 106 namespace llvm { 107 108 /// Enables memory ssa as a dependency for loop passes. 109 extern cl::opt<bool> EnableMSSALoopDependency; 110 111 class Function; 112 class Instruction; 113 class MemoryAccess; 114 class MemorySSAWalker; 115 class LLVMContext; 116 class raw_ostream; 117 118 namespace MSSAHelpers { 119 120 struct AllAccessTag {}; 121 struct DefsOnlyTag {}; 122 123 } // end namespace MSSAHelpers 124 125 enum : unsigned { 126 // Used to signify what the default invalid ID is for MemoryAccess's 127 // getID() 128 INVALID_MEMORYACCESS_ID = -1U 129 }; 130 131 template <class T> class memoryaccess_def_iterator_base; 132 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>; 133 using const_memoryaccess_def_iterator = 134 memoryaccess_def_iterator_base<const MemoryAccess>; 135 136 // The base for all memory accesses. All memory accesses in a block are 137 // linked together using an intrusive list. 138 class MemoryAccess 139 : public DerivedUser, 140 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>, 141 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> { 142 public: 143 using AllAccessType = 144 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 145 using DefsOnlyType = 146 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 147 148 MemoryAccess(const MemoryAccess &) = delete; 149 MemoryAccess &operator=(const MemoryAccess &) = delete; 150 151 void *operator new(size_t) = delete; 152 153 // Methods for support type inquiry through isa, cast, and 154 // dyn_cast 155 static bool classof(const Value *V) { 156 unsigned ID = V->getValueID(); 157 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal; 158 } 159 160 BasicBlock *getBlock() const { return Block; } 161 162 void print(raw_ostream &OS) const; 163 void dump() const; 164 165 /// The user iterators for a memory access 166 using iterator = user_iterator; 167 using const_iterator = const_user_iterator; 168 169 /// This iterator walks over all of the defs in a given 170 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For 171 /// MemoryUse/MemoryDef, this walks the defining access. 172 memoryaccess_def_iterator defs_begin(); 173 const_memoryaccess_def_iterator defs_begin() const; 174 memoryaccess_def_iterator defs_end(); 175 const_memoryaccess_def_iterator defs_end() const; 176 177 /// Get the iterators for the all access list and the defs only list 178 /// We default to the all access list. 179 AllAccessType::self_iterator getIterator() { 180 return this->AllAccessType::getIterator(); 181 } 182 AllAccessType::const_self_iterator getIterator() const { 183 return this->AllAccessType::getIterator(); 184 } 185 AllAccessType::reverse_self_iterator getReverseIterator() { 186 return this->AllAccessType::getReverseIterator(); 187 } 188 AllAccessType::const_reverse_self_iterator getReverseIterator() const { 189 return this->AllAccessType::getReverseIterator(); 190 } 191 DefsOnlyType::self_iterator getDefsIterator() { 192 return this->DefsOnlyType::getIterator(); 193 } 194 DefsOnlyType::const_self_iterator getDefsIterator() const { 195 return this->DefsOnlyType::getIterator(); 196 } 197 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() { 198 return this->DefsOnlyType::getReverseIterator(); 199 } 200 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const { 201 return this->DefsOnlyType::getReverseIterator(); 202 } 203 204 protected: 205 friend class MemoryDef; 206 friend class MemoryPhi; 207 friend class MemorySSA; 208 friend class MemoryUse; 209 friend class MemoryUseOrDef; 210 211 /// Used by MemorySSA to change the block of a MemoryAccess when it is 212 /// moved. 213 void setBlock(BasicBlock *BB) { Block = BB; } 214 215 /// Used for debugging and tracking things about MemoryAccesses. 216 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise. 217 inline unsigned getID() const; 218 219 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue, 220 BasicBlock *BB, unsigned NumOperands) 221 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue), 222 Block(BB) {} 223 224 // Use deleteValue() to delete a generic MemoryAccess. 225 ~MemoryAccess() = default; 226 227 private: 228 BasicBlock *Block; 229 }; 230 231 template <> 232 struct ilist_alloc_traits<MemoryAccess> { 233 static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); } 234 }; 235 236 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) { 237 MA.print(OS); 238 return OS; 239 } 240 241 /// Class that has the common methods + fields of memory uses/defs. It's 242 /// a little awkward to have, but there are many cases where we want either a 243 /// use or def, and there are many cases where uses are needed (defs aren't 244 /// acceptable), and vice-versa. 245 /// 246 /// This class should never be instantiated directly; make a MemoryUse or 247 /// MemoryDef instead. 248 class MemoryUseOrDef : public MemoryAccess { 249 public: 250 void *operator new(size_t) = delete; 251 252 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 253 254 /// Get the instruction that this MemoryUse represents. 255 Instruction *getMemoryInst() const { return MemoryInstruction; } 256 257 /// Get the access that produces the memory state used by this Use. 258 MemoryAccess *getDefiningAccess() const { return getOperand(0); } 259 260 static bool classof(const Value *MA) { 261 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal; 262 } 263 264 // Sadly, these have to be public because they are needed in some of the 265 // iterators. 266 inline bool isOptimized() const; 267 inline MemoryAccess *getOptimized() const; 268 inline void setOptimized(MemoryAccess *); 269 270 // Retrieve AliasResult type of the optimized access. Ideally this would be 271 // returned by the caching walker and may go away in the future. 272 Optional<AliasResult> getOptimizedAccessType() const { 273 return OptimizedAccessAlias; 274 } 275 276 /// Reset the ID of what this MemoryUse was optimized to, causing it to 277 /// be rewalked by the walker if necessary. 278 /// This really should only be called by tests. 279 inline void resetOptimized(); 280 281 protected: 282 friend class MemorySSA; 283 friend class MemorySSAUpdater; 284 285 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty, 286 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB, 287 unsigned NumOperands) 288 : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands), 289 MemoryInstruction(MI), OptimizedAccessAlias(MayAlias) { 290 setDefiningAccess(DMA); 291 } 292 293 // Use deleteValue() to delete a generic MemoryUseOrDef. 294 ~MemoryUseOrDef() = default; 295 296 void setOptimizedAccessType(Optional<AliasResult> AR) { 297 OptimizedAccessAlias = AR; 298 } 299 300 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false, 301 Optional<AliasResult> AR = MayAlias) { 302 if (!Optimized) { 303 setOperand(0, DMA); 304 return; 305 } 306 setOptimized(DMA); 307 setOptimizedAccessType(AR); 308 } 309 310 private: 311 Instruction *MemoryInstruction; 312 Optional<AliasResult> OptimizedAccessAlias; 313 }; 314 315 /// Represents read-only accesses to memory 316 /// 317 /// In particular, the set of Instructions that will be represented by 318 /// MemoryUse's is exactly the set of Instructions for which 319 /// AliasAnalysis::getModRefInfo returns "Ref". 320 class MemoryUse final : public MemoryUseOrDef { 321 public: 322 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 323 324 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB) 325 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, 326 /*NumOperands=*/1) {} 327 328 // allocate space for exactly one operand 329 void *operator new(size_t s) { return User::operator new(s, 1); } 330 331 static bool classof(const Value *MA) { 332 return MA->getValueID() == MemoryUseVal; 333 } 334 335 void print(raw_ostream &OS) const; 336 337 void setOptimized(MemoryAccess *DMA) { 338 OptimizedID = DMA->getID(); 339 setOperand(0, DMA); 340 } 341 342 bool isOptimized() const { 343 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); 344 } 345 346 MemoryAccess *getOptimized() const { 347 return getDefiningAccess(); 348 } 349 350 void resetOptimized() { 351 OptimizedID = INVALID_MEMORYACCESS_ID; 352 } 353 354 protected: 355 friend class MemorySSA; 356 357 private: 358 static void deleteMe(DerivedUser *Self); 359 360 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 361 }; 362 363 template <> 364 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {}; 365 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess) 366 367 /// Represents a read-write access to memory, whether it is a must-alias, 368 /// or a may-alias. 369 /// 370 /// In particular, the set of Instructions that will be represented by 371 /// MemoryDef's is exactly the set of Instructions for which 372 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef". 373 /// Note that, in order to provide def-def chains, all defs also have a use 374 /// associated with them. This use points to the nearest reaching 375 /// MemoryDef/MemoryPhi. 376 class MemoryDef final : public MemoryUseOrDef { 377 public: 378 friend class MemorySSA; 379 380 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 381 382 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB, 383 unsigned Ver) 384 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, 385 /*NumOperands=*/2), 386 ID(Ver) {} 387 388 // allocate space for exactly two operands 389 void *operator new(size_t s) { return User::operator new(s, 2); } 390 391 static bool classof(const Value *MA) { 392 return MA->getValueID() == MemoryDefVal; 393 } 394 395 void setOptimized(MemoryAccess *MA) { 396 setOperand(1, MA); 397 OptimizedID = MA->getID(); 398 } 399 400 MemoryAccess *getOptimized() const { 401 return cast_or_null<MemoryAccess>(getOperand(1)); 402 } 403 404 bool isOptimized() const { 405 return getOptimized() && OptimizedID == getOptimized()->getID(); 406 } 407 408 void resetOptimized() { 409 OptimizedID = INVALID_MEMORYACCESS_ID; 410 setOperand(1, nullptr); 411 } 412 413 void print(raw_ostream &OS) const; 414 415 unsigned getID() const { return ID; } 416 417 private: 418 static void deleteMe(DerivedUser *Self); 419 420 const unsigned ID; 421 unsigned OptimizedID = INVALID_MEMORYACCESS_ID; 422 }; 423 424 template <> 425 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {}; 426 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess) 427 428 template <> 429 struct OperandTraits<MemoryUseOrDef> { 430 static Use *op_begin(MemoryUseOrDef *MUD) { 431 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 432 return OperandTraits<MemoryUse>::op_begin(MU); 433 return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD)); 434 } 435 436 static Use *op_end(MemoryUseOrDef *MUD) { 437 if (auto *MU = dyn_cast<MemoryUse>(MUD)) 438 return OperandTraits<MemoryUse>::op_end(MU); 439 return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD)); 440 } 441 442 static unsigned operands(const MemoryUseOrDef *MUD) { 443 if (const auto *MU = dyn_cast<MemoryUse>(MUD)) 444 return OperandTraits<MemoryUse>::operands(MU); 445 return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD)); 446 } 447 }; 448 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess) 449 450 /// Represents phi nodes for memory accesses. 451 /// 452 /// These have the same semantic as regular phi nodes, with the exception that 453 /// only one phi will ever exist in a given basic block. 454 /// Guaranteeing one phi per block means guaranteeing there is only ever one 455 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node. 456 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or 457 /// a MemoryPhi's operands. 458 /// That is, given 459 /// if (a) { 460 /// store %a 461 /// store %b 462 /// } 463 /// it *must* be transformed into 464 /// if (a) { 465 /// 1 = MemoryDef(liveOnEntry) 466 /// store %a 467 /// 2 = MemoryDef(1) 468 /// store %b 469 /// } 470 /// and *not* 471 /// if (a) { 472 /// 1 = MemoryDef(liveOnEntry) 473 /// store %a 474 /// 2 = MemoryDef(liveOnEntry) 475 /// store %b 476 /// } 477 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the 478 /// end of the branch, and if there are not two phi nodes, one will be 479 /// disconnected completely from the SSA graph below that point. 480 /// Because MemoryUse's do not generate new definitions, they do not have this 481 /// issue. 482 class MemoryPhi final : public MemoryAccess { 483 // allocate space for exactly zero operands 484 void *operator new(size_t s) { return User::operator new(s); } 485 486 public: 487 /// Provide fast operand accessors 488 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess); 489 490 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0) 491 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver), 492 ReservedSpace(NumPreds) { 493 allocHungoffUses(ReservedSpace); 494 } 495 496 // Block iterator interface. This provides access to the list of incoming 497 // basic blocks, which parallels the list of incoming values. 498 using block_iterator = BasicBlock **; 499 using const_block_iterator = BasicBlock *const *; 500 501 block_iterator block_begin() { 502 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 503 } 504 505 const_block_iterator block_begin() const { 506 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 507 } 508 509 block_iterator block_end() { return block_begin() + getNumOperands(); } 510 511 const_block_iterator block_end() const { 512 return block_begin() + getNumOperands(); 513 } 514 515 iterator_range<block_iterator> blocks() { 516 return make_range(block_begin(), block_end()); 517 } 518 519 iterator_range<const_block_iterator> blocks() const { 520 return make_range(block_begin(), block_end()); 521 } 522 523 op_range incoming_values() { return operands(); } 524 525 const_op_range incoming_values() const { return operands(); } 526 527 /// Return the number of incoming edges 528 unsigned getNumIncomingValues() const { return getNumOperands(); } 529 530 /// Return incoming value number x 531 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); } 532 void setIncomingValue(unsigned I, MemoryAccess *V) { 533 assert(V && "PHI node got a null value!"); 534 setOperand(I, V); 535 } 536 537 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; } 538 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; } 539 540 /// Return incoming basic block number @p i. 541 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; } 542 543 /// Return incoming basic block corresponding 544 /// to an operand of the PHI. 545 BasicBlock *getIncomingBlock(const Use &U) const { 546 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 547 return getIncomingBlock(unsigned(&U - op_begin())); 548 } 549 550 /// Return incoming basic block corresponding 551 /// to value use iterator. 552 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const { 553 return getIncomingBlock(I.getUse()); 554 } 555 556 void setIncomingBlock(unsigned I, BasicBlock *BB) { 557 assert(BB && "PHI node got a null basic block!"); 558 block_begin()[I] = BB; 559 } 560 561 /// Add an incoming value to the end of the PHI list 562 void addIncoming(MemoryAccess *V, BasicBlock *BB) { 563 if (getNumOperands() == ReservedSpace) 564 growOperands(); // Get more space! 565 // Initialize some new operands. 566 setNumHungOffUseOperands(getNumOperands() + 1); 567 setIncomingValue(getNumOperands() - 1, V); 568 setIncomingBlock(getNumOperands() - 1, BB); 569 } 570 571 /// Return the first index of the specified basic 572 /// block in the value list for this PHI. Returns -1 if no instance. 573 int getBasicBlockIndex(const BasicBlock *BB) const { 574 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 575 if (block_begin()[I] == BB) 576 return I; 577 return -1; 578 } 579 580 MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const { 581 int Idx = getBasicBlockIndex(BB); 582 assert(Idx >= 0 && "Invalid basic block argument!"); 583 return getIncomingValue(Idx); 584 } 585 586 // After deleting incoming position I, the order of incoming may be changed. 587 void unorderedDeleteIncoming(unsigned I) { 588 unsigned E = getNumOperands(); 589 assert(I < E && "Cannot remove out of bounds Phi entry."); 590 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi 591 // itself should be deleted. 592 assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with " 593 "at least 2 values."); 594 setIncomingValue(I, getIncomingValue(E - 1)); 595 setIncomingBlock(I, block_begin()[E - 1]); 596 setOperand(E - 1, nullptr); 597 block_begin()[E - 1] = nullptr; 598 setNumHungOffUseOperands(getNumOperands() - 1); 599 } 600 601 // After deleting entries that satisfy Pred, remaining entries may have 602 // changed order. 603 template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) { 604 for (unsigned I = 0, E = getNumOperands(); I != E; ++I) 605 if (Pred(getIncomingValue(I), getIncomingBlock(I))) { 606 unorderedDeleteIncoming(I); 607 E = getNumOperands(); 608 --I; 609 } 610 assert(getNumOperands() >= 1 && 611 "Cannot remove all incoming blocks in a MemoryPhi."); 612 } 613 614 // After deleting incoming block BB, the incoming blocks order may be changed. 615 void unorderedDeleteIncomingBlock(const BasicBlock *BB) { 616 unorderedDeleteIncomingIf( 617 [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; }); 618 } 619 620 // After deleting incoming memory access MA, the incoming accesses order may 621 // be changed. 622 void unorderedDeleteIncomingValue(const MemoryAccess *MA) { 623 unorderedDeleteIncomingIf( 624 [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; }); 625 } 626 627 static bool classof(const Value *V) { 628 return V->getValueID() == MemoryPhiVal; 629 } 630 631 void print(raw_ostream &OS) const; 632 633 unsigned getID() const { return ID; } 634 635 protected: 636 friend class MemorySSA; 637 638 /// this is more complicated than the generic 639 /// User::allocHungoffUses, because we have to allocate Uses for the incoming 640 /// values and pointers to the incoming blocks, all in one allocation. 641 void allocHungoffUses(unsigned N) { 642 User::allocHungoffUses(N, /* IsPhi */ true); 643 } 644 645 private: 646 // For debugging only 647 const unsigned ID; 648 unsigned ReservedSpace; 649 650 /// This grows the operand list in response to a push_back style of 651 /// operation. This grows the number of ops by 1.5 times. 652 void growOperands() { 653 unsigned E = getNumOperands(); 654 // 2 op PHI nodes are VERY common, so reserve at least enough for that. 655 ReservedSpace = std::max(E + E / 2, 2u); 656 growHungoffUses(ReservedSpace, /* IsPhi */ true); 657 } 658 659 static void deleteMe(DerivedUser *Self); 660 }; 661 662 inline unsigned MemoryAccess::getID() const { 663 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) && 664 "only memory defs and phis have ids"); 665 if (const auto *MD = dyn_cast<MemoryDef>(this)) 666 return MD->getID(); 667 return cast<MemoryPhi>(this)->getID(); 668 } 669 670 inline bool MemoryUseOrDef::isOptimized() const { 671 if (const auto *MD = dyn_cast<MemoryDef>(this)) 672 return MD->isOptimized(); 673 return cast<MemoryUse>(this)->isOptimized(); 674 } 675 676 inline MemoryAccess *MemoryUseOrDef::getOptimized() const { 677 if (const auto *MD = dyn_cast<MemoryDef>(this)) 678 return MD->getOptimized(); 679 return cast<MemoryUse>(this)->getOptimized(); 680 } 681 682 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) { 683 if (auto *MD = dyn_cast<MemoryDef>(this)) 684 MD->setOptimized(MA); 685 else 686 cast<MemoryUse>(this)->setOptimized(MA); 687 } 688 689 inline void MemoryUseOrDef::resetOptimized() { 690 if (auto *MD = dyn_cast<MemoryDef>(this)) 691 MD->resetOptimized(); 692 else 693 cast<MemoryUse>(this)->resetOptimized(); 694 } 695 696 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {}; 697 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess) 698 699 /// Encapsulates MemorySSA, including all data associated with memory 700 /// accesses. 701 class MemorySSA { 702 public: 703 MemorySSA(Function &, AliasAnalysis *, DominatorTree *); 704 705 // MemorySSA must remain where it's constructed; Walkers it creates store 706 // pointers to it. 707 MemorySSA(MemorySSA &&) = delete; 708 709 ~MemorySSA(); 710 711 MemorySSAWalker *getWalker(); 712 MemorySSAWalker *getSkipSelfWalker(); 713 714 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA 715 /// access associated with it. If passed a basic block gets the memory phi 716 /// node that exists for that block, if there is one. Otherwise, this will get 717 /// a MemoryUseOrDef. 718 MemoryUseOrDef *getMemoryAccess(const Instruction *I) const { 719 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I)); 720 } 721 722 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const { 723 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB))); 724 } 725 726 DominatorTree &getDomTree() const { return *DT; } 727 728 void dump() const; 729 void print(raw_ostream &) const; 730 731 /// Return true if \p MA represents the live on entry value 732 /// 733 /// Loads and stores from pointer arguments and other global values may be 734 /// defined by memory operations that do not occur in the current function, so 735 /// they may be live on entry to the function. MemorySSA represents such 736 /// memory state by the live on entry definition, which is guaranteed to occur 737 /// before any other memory access in the function. 738 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const { 739 return MA == LiveOnEntryDef.get(); 740 } 741 742 inline MemoryAccess *getLiveOnEntryDef() const { 743 return LiveOnEntryDef.get(); 744 } 745 746 // Sadly, iplists, by default, owns and deletes pointers added to the 747 // list. It's not currently possible to have two iplists for the same type, 748 // where one owns the pointers, and one does not. This is because the traits 749 // are per-type, not per-tag. If this ever changes, we should make the 750 // DefList an iplist. 751 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>; 752 using DefsList = 753 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>; 754 755 /// Return the list of MemoryAccess's for a given basic block. 756 /// 757 /// This list is not modifiable by the user. 758 const AccessList *getBlockAccesses(const BasicBlock *BB) const { 759 return getWritableBlockAccesses(BB); 760 } 761 762 /// Return the list of MemoryDef's and MemoryPhi's for a given basic 763 /// block. 764 /// 765 /// This list is not modifiable by the user. 766 const DefsList *getBlockDefs(const BasicBlock *BB) const { 767 return getWritableBlockDefs(BB); 768 } 769 770 /// Given two memory accesses in the same basic block, determine 771 /// whether MemoryAccess \p A dominates MemoryAccess \p B. 772 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const; 773 774 /// Given two memory accesses in potentially different blocks, 775 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B. 776 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const; 777 778 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A 779 /// dominates Use \p B. 780 bool dominates(const MemoryAccess *A, const Use &B) const; 781 782 /// Verify that MemorySSA is self consistent (IE definitions dominate 783 /// all uses, uses appear in the right places). This is used by unit tests. 784 void verifyMemorySSA() const; 785 786 /// Used in various insertion functions to specify whether we are talking 787 /// about the beginning or end of a block. 788 enum InsertionPlace { Beginning, End, BeforeTerminator }; 789 790 protected: 791 // Used by Memory SSA annotater, dumpers, and wrapper pass 792 friend class MemorySSAAnnotatedWriter; 793 friend class MemorySSAPrinterLegacyPass; 794 friend class MemorySSAUpdater; 795 796 void verifyOrderingDominationAndDefUses(Function &F) const; 797 void verifyDominationNumbers(const Function &F) const; 798 void verifyPrevDefInPhis(Function &F) const; 799 800 // This is used by the use optimizer and updater. 801 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const { 802 auto It = PerBlockAccesses.find(BB); 803 return It == PerBlockAccesses.end() ? nullptr : It->second.get(); 804 } 805 806 // This is used by the use optimizer and updater. 807 DefsList *getWritableBlockDefs(const BasicBlock *BB) const { 808 auto It = PerBlockDefs.find(BB); 809 return It == PerBlockDefs.end() ? nullptr : It->second.get(); 810 } 811 812 // These is used by the updater to perform various internal MemorySSA 813 // machinsations. They do not always leave the IR in a correct state, and 814 // relies on the updater to fixup what it breaks, so it is not public. 815 816 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where); 817 void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point); 818 819 // Rename the dominator tree branch rooted at BB. 820 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, 821 SmallPtrSetImpl<BasicBlock *> &Visited) { 822 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true); 823 } 824 825 void removeFromLookups(MemoryAccess *); 826 void removeFromLists(MemoryAccess *, bool ShouldDelete = true); 827 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, 828 InsertionPlace); 829 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, 830 AccessList::iterator); 831 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *, 832 const MemoryUseOrDef *Template = nullptr, 833 bool CreationMustSucceed = true); 834 835 private: 836 template <class AliasAnalysisType> class ClobberWalkerBase; 837 template <class AliasAnalysisType> class CachingWalker; 838 template <class AliasAnalysisType> class SkipSelfWalker; 839 class OptimizeUses; 840 841 CachingWalker<AliasAnalysis> *getWalkerImpl(); 842 void buildMemorySSA(BatchAAResults &BAA); 843 void optimizeUses(); 844 845 void prepareForMoveTo(MemoryAccess *, BasicBlock *); 846 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const; 847 848 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>; 849 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>; 850 851 void 852 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks); 853 void markUnreachableAsLiveOnEntry(BasicBlock *BB); 854 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const; 855 MemoryPhi *createMemoryPhi(BasicBlock *BB); 856 template <typename AliasAnalysisType> 857 MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *, 858 const MemoryUseOrDef *Template = nullptr); 859 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace); 860 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &); 861 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool); 862 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool); 863 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal, 864 SmallPtrSetImpl<BasicBlock *> &Visited, 865 bool SkipVisited = false, bool RenameAllUses = false); 866 AccessList *getOrCreateAccessList(const BasicBlock *); 867 DefsList *getOrCreateDefsList(const BasicBlock *); 868 void renumberBlock(const BasicBlock *) const; 869 AliasAnalysis *AA; 870 DominatorTree *DT; 871 Function &F; 872 873 // Memory SSA mappings 874 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess; 875 876 // These two mappings contain the main block to access/def mappings for 877 // MemorySSA. The list contained in PerBlockAccesses really owns all the 878 // MemoryAccesses. 879 // Both maps maintain the invariant that if a block is found in them, the 880 // corresponding list is not empty, and if a block is not found in them, the 881 // corresponding list is empty. 882 AccessMap PerBlockAccesses; 883 DefsMap PerBlockDefs; 884 std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef; 885 886 // Domination mappings 887 // Note that the numbering is local to a block, even though the map is 888 // global. 889 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid; 890 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering; 891 892 // Memory SSA building info 893 std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase; 894 std::unique_ptr<CachingWalker<AliasAnalysis>> Walker; 895 std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker; 896 unsigned NextID; 897 }; 898 899 // Internal MemorySSA utils, for use by MemorySSA classes and walkers 900 class MemorySSAUtil { 901 protected: 902 friend class GVNHoist; 903 friend class MemorySSAWalker; 904 905 // This function should not be used by new passes. 906 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 907 AliasAnalysis &AA); 908 }; 909 910 // This pass does eager building and then printing of MemorySSA. It is used by 911 // the tests to be able to build, dump, and verify Memory SSA. 912 class MemorySSAPrinterLegacyPass : public FunctionPass { 913 public: 914 MemorySSAPrinterLegacyPass(); 915 916 bool runOnFunction(Function &) override; 917 void getAnalysisUsage(AnalysisUsage &AU) const override; 918 919 static char ID; 920 }; 921 922 /// An analysis that produces \c MemorySSA for a function. 923 /// 924 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> { 925 friend AnalysisInfoMixin<MemorySSAAnalysis>; 926 927 static AnalysisKey Key; 928 929 public: 930 // Wrap MemorySSA result to ensure address stability of internal MemorySSA 931 // pointers after construction. Use a wrapper class instead of plain 932 // unique_ptr<MemorySSA> to avoid build breakage on MSVC. 933 struct Result { 934 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {} 935 936 MemorySSA &getMSSA() { return *MSSA.get(); } 937 938 std::unique_ptr<MemorySSA> MSSA; 939 940 bool invalidate(Function &F, const PreservedAnalyses &PA, 941 FunctionAnalysisManager::Invalidator &Inv); 942 }; 943 944 Result run(Function &F, FunctionAnalysisManager &AM); 945 }; 946 947 /// Printer pass for \c MemorySSA. 948 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> { 949 raw_ostream &OS; 950 951 public: 952 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {} 953 954 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 955 }; 956 957 /// Verifier pass for \c MemorySSA. 958 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> { 959 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); 960 }; 961 962 /// Legacy analysis pass which computes \c MemorySSA. 963 class MemorySSAWrapperPass : public FunctionPass { 964 public: 965 MemorySSAWrapperPass(); 966 967 static char ID; 968 969 bool runOnFunction(Function &) override; 970 void releaseMemory() override; 971 MemorySSA &getMSSA() { return *MSSA; } 972 const MemorySSA &getMSSA() const { return *MSSA; } 973 974 void getAnalysisUsage(AnalysisUsage &AU) const override; 975 976 void verifyAnalysis() const override; 977 void print(raw_ostream &OS, const Module *M = nullptr) const override; 978 979 private: 980 std::unique_ptr<MemorySSA> MSSA; 981 }; 982 983 /// This is the generic walker interface for walkers of MemorySSA. 984 /// Walkers are used to be able to further disambiguate the def-use chains 985 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives 986 /// you. 987 /// In particular, while the def-use chains provide basic information, and are 988 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a 989 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other 990 /// information. In particular, they may want to use SCEV info to further 991 /// disambiguate memory accesses, or they may want the nearest dominating 992 /// may-aliasing MemoryDef for a call or a store. This API enables a 993 /// standardized interface to getting and using that info. 994 class MemorySSAWalker { 995 public: 996 MemorySSAWalker(MemorySSA *); 997 virtual ~MemorySSAWalker() = default; 998 999 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>; 1000 1001 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this 1002 /// will give you the nearest dominating MemoryAccess that Mod's the location 1003 /// the instruction accesses (by skipping any def which AA can prove does not 1004 /// alias the location(s) accessed by the instruction given). 1005 /// 1006 /// Note that this will return a single access, and it must dominate the 1007 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction, 1008 /// this will return the MemoryPhi, not the operand. This means that 1009 /// given: 1010 /// if (a) { 1011 /// 1 = MemoryDef(liveOnEntry) 1012 /// store %a 1013 /// } else { 1014 /// 2 = MemoryDef(liveOnEntry) 1015 /// store %b 1016 /// } 1017 /// 3 = MemoryPhi(2, 1) 1018 /// MemoryUse(3) 1019 /// load %a 1020 /// 1021 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef 1022 /// in the if (a) branch. 1023 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { 1024 MemoryAccess *MA = MSSA->getMemoryAccess(I); 1025 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); 1026 return getClobberingMemoryAccess(MA); 1027 } 1028 1029 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), 1030 /// but takes a MemoryAccess instead of an Instruction. 1031 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; 1032 1033 /// Given a potentially clobbering memory access and a new location, 1034 /// calling this will give you the nearest dominating clobbering MemoryAccess 1035 /// (by skipping non-aliasing def links). 1036 /// 1037 /// This version of the function is mainly used to disambiguate phi translated 1038 /// pointers, where the value of a pointer may have changed from the initial 1039 /// memory access. Note that this expects to be handed either a MemoryUse, 1040 /// or an already potentially clobbering access. Unlike the above API, if 1041 /// given a MemoryDef that clobbers the pointer as the starting access, it 1042 /// will return that MemoryDef, whereas the above would return the clobber 1043 /// starting from the use side of the memory def. 1044 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1045 const MemoryLocation &) = 0; 1046 1047 /// Given a memory access, invalidate anything this walker knows about 1048 /// that access. 1049 /// This API is used by walkers that store information to perform basic cache 1050 /// invalidation. This will be called by MemorySSA at appropriate times for 1051 /// the walker it uses or returns. 1052 virtual void invalidateInfo(MemoryAccess *) {} 1053 1054 protected: 1055 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move 1056 // constructor. 1057 MemorySSA *MSSA; 1058 }; 1059 1060 /// A MemorySSAWalker that does no alias queries, or anything else. It 1061 /// simply returns the links as they were constructed by the builder. 1062 class DoNothingMemorySSAWalker final : public MemorySSAWalker { 1063 public: 1064 // Keep the overrides below from hiding the Instruction overload of 1065 // getClobberingMemoryAccess. 1066 using MemorySSAWalker::getClobberingMemoryAccess; 1067 1068 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; 1069 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 1070 const MemoryLocation &) override; 1071 }; 1072 1073 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>; 1074 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>; 1075 1076 /// Iterator base class used to implement const and non-const iterators 1077 /// over the defining accesses of a MemoryAccess. 1078 template <class T> 1079 class memoryaccess_def_iterator_base 1080 : public iterator_facade_base<memoryaccess_def_iterator_base<T>, 1081 std::forward_iterator_tag, T, ptrdiff_t, T *, 1082 T *> { 1083 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base; 1084 1085 public: 1086 memoryaccess_def_iterator_base(T *Start) : Access(Start) {} 1087 memoryaccess_def_iterator_base() = default; 1088 1089 bool operator==(const memoryaccess_def_iterator_base &Other) const { 1090 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo); 1091 } 1092 1093 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the 1094 // block from the operand in constant time (In a PHINode, the uselist has 1095 // both, so it's just subtraction). We provide it as part of the 1096 // iterator to avoid callers having to linear walk to get the block. 1097 // If the operation becomes constant time on MemoryPHI's, this bit of 1098 // abstraction breaking should be removed. 1099 BasicBlock *getPhiArgBlock() const { 1100 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access); 1101 assert(MP && "Tried to get phi arg block when not iterating over a PHI"); 1102 return MP->getIncomingBlock(ArgNo); 1103 } 1104 1105 typename BaseT::iterator::pointer operator*() const { 1106 assert(Access && "Tried to access past the end of our iterator"); 1107 // Go to the first argument for phis, and the defining access for everything 1108 // else. 1109 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) 1110 return MP->getIncomingValue(ArgNo); 1111 return cast<MemoryUseOrDef>(Access)->getDefiningAccess(); 1112 } 1113 1114 using BaseT::operator++; 1115 memoryaccess_def_iterator_base &operator++() { 1116 assert(Access && "Hit end of iterator"); 1117 if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) { 1118 if (++ArgNo >= MP->getNumIncomingValues()) { 1119 ArgNo = 0; 1120 Access = nullptr; 1121 } 1122 } else { 1123 Access = nullptr; 1124 } 1125 return *this; 1126 } 1127 1128 private: 1129 T *Access = nullptr; 1130 unsigned ArgNo = 0; 1131 }; 1132 1133 inline memoryaccess_def_iterator MemoryAccess::defs_begin() { 1134 return memoryaccess_def_iterator(this); 1135 } 1136 1137 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const { 1138 return const_memoryaccess_def_iterator(this); 1139 } 1140 1141 inline memoryaccess_def_iterator MemoryAccess::defs_end() { 1142 return memoryaccess_def_iterator(); 1143 } 1144 1145 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const { 1146 return const_memoryaccess_def_iterator(); 1147 } 1148 1149 /// GraphTraits for a MemoryAccess, which walks defs in the normal case, 1150 /// and uses in the inverse case. 1151 template <> struct GraphTraits<MemoryAccess *> { 1152 using NodeRef = MemoryAccess *; 1153 using ChildIteratorType = memoryaccess_def_iterator; 1154 1155 static NodeRef getEntryNode(NodeRef N) { return N; } 1156 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); } 1157 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); } 1158 }; 1159 1160 template <> struct GraphTraits<Inverse<MemoryAccess *>> { 1161 using NodeRef = MemoryAccess *; 1162 using ChildIteratorType = MemoryAccess::iterator; 1163 1164 static NodeRef getEntryNode(NodeRef N) { return N; } 1165 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); } 1166 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); } 1167 }; 1168 1169 /// Provide an iterator that walks defs, giving both the memory access, 1170 /// and the current pointer location, updating the pointer location as it 1171 /// changes due to phi node translation. 1172 /// 1173 /// This iterator, while somewhat specialized, is what most clients actually 1174 /// want when walking upwards through MemorySSA def chains. It takes a pair of 1175 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the 1176 /// memory location through phi nodes for the user. 1177 class upward_defs_iterator 1178 : public iterator_facade_base<upward_defs_iterator, 1179 std::forward_iterator_tag, 1180 const MemoryAccessPair> { 1181 using BaseT = upward_defs_iterator::iterator_facade_base; 1182 1183 public: 1184 upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT) 1185 : DefIterator(Info.first), Location(Info.second), 1186 OriginalAccess(Info.first), DT(DT) { 1187 CurrentPair.first = nullptr; 1188 1189 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first); 1190 fillInCurrentPair(); 1191 } 1192 1193 upward_defs_iterator() { CurrentPair.first = nullptr; } 1194 1195 bool operator==(const upward_defs_iterator &Other) const { 1196 return DefIterator == Other.DefIterator; 1197 } 1198 1199 BaseT::iterator::reference operator*() const { 1200 assert(DefIterator != OriginalAccess->defs_end() && 1201 "Tried to access past the end of our iterator"); 1202 return CurrentPair; 1203 } 1204 1205 using BaseT::operator++; 1206 upward_defs_iterator &operator++() { 1207 assert(DefIterator != OriginalAccess->defs_end() && 1208 "Tried to access past the end of the iterator"); 1209 ++DefIterator; 1210 if (DefIterator != OriginalAccess->defs_end()) 1211 fillInCurrentPair(); 1212 return *this; 1213 } 1214 1215 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); } 1216 1217 private: 1218 void fillInCurrentPair() { 1219 CurrentPair.first = *DefIterator; 1220 if (WalkingPhi && Location.Ptr) { 1221 PHITransAddr Translator( 1222 const_cast<Value *>(Location.Ptr), 1223 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); 1224 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), 1225 DefIterator.getPhiArgBlock(), DT, 1226 false)) { 1227 if (Translator.getAddr() != Location.Ptr) { 1228 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); 1229 return; 1230 } 1231 } else { 1232 CurrentPair.second = Location.getWithNewSize(LocationSize::unknown()); 1233 return; 1234 } 1235 } 1236 CurrentPair.second = Location; 1237 } 1238 1239 MemoryAccessPair CurrentPair; 1240 memoryaccess_def_iterator DefIterator; 1241 MemoryLocation Location; 1242 MemoryAccess *OriginalAccess = nullptr; 1243 bool WalkingPhi = false; 1244 DominatorTree *DT = nullptr; 1245 }; 1246 1247 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, 1248 DominatorTree &DT) { 1249 return upward_defs_iterator(Pair, &DT); 1250 } 1251 1252 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } 1253 1254 inline iterator_range<upward_defs_iterator> 1255 upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { 1256 return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); 1257 } 1258 1259 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that 1260 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when 1261 /// comparing against a null def_chain_iterator, this will compare equal only 1262 /// after walking said Phi/liveOnEntry. 1263 /// 1264 /// The UseOptimizedChain flag specifies whether to walk the clobbering 1265 /// access chain, or all the accesses. 1266 /// 1267 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on 1268 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits 1269 /// a phi node. The optimized chain walks the clobbering access of a store. 1270 /// So if you are just trying to find, given a store, what the next 1271 /// thing that would clobber the same memory is, you want the optimized chain. 1272 template <class T, bool UseOptimizedChain = false> 1273 struct def_chain_iterator 1274 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>, 1275 std::forward_iterator_tag, MemoryAccess *> { 1276 def_chain_iterator() : MA(nullptr) {} 1277 def_chain_iterator(T MA) : MA(MA) {} 1278 1279 T operator*() const { return MA; } 1280 1281 def_chain_iterator &operator++() { 1282 // N.B. liveOnEntry has a null defining access. 1283 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { 1284 if (UseOptimizedChain && MUD->isOptimized()) 1285 MA = MUD->getOptimized(); 1286 else 1287 MA = MUD->getDefiningAccess(); 1288 } else { 1289 MA = nullptr; 1290 } 1291 1292 return *this; 1293 } 1294 1295 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; } 1296 1297 private: 1298 T MA; 1299 }; 1300 1301 template <class T> 1302 inline iterator_range<def_chain_iterator<T>> 1303 def_chain(T MA, MemoryAccess *UpTo = nullptr) { 1304 #ifdef EXPENSIVE_CHECKS 1305 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) && 1306 "UpTo isn't in the def chain!"); 1307 #endif 1308 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo)); 1309 } 1310 1311 template <class T> 1312 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) { 1313 return make_range(def_chain_iterator<T, true>(MA), 1314 def_chain_iterator<T, true>(nullptr)); 1315 } 1316 1317 } // end namespace llvm 1318 1319 #endif // LLVM_ANALYSIS_MEMORYSSA_H 1320