1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an analysis that determines, for a given memory 10 // operation, what preceding memory operations it depends on. It builds on 11 // alias analysis information, and tries to provide a lazy, caching interface to 12 // a common kind of alias information query. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/MemoryBuiltins.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/PHITransAddr.h" 27 #include "llvm/Analysis/PhiValues.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/InstrTypes.h" 34 #include "llvm/IR/Instruction.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/LLVMContext.h" 38 #include "llvm/IR/Metadata.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/IR/PredIteratorCache.h" 41 #include "llvm/IR/Type.h" 42 #include "llvm/IR/Use.h" 43 #include "llvm/IR/Value.h" 44 #include "llvm/InitializePasses.h" 45 #include "llvm/Pass.h" 46 #include "llvm/Support/AtomicOrdering.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Support/Compiler.h" 50 #include "llvm/Support/Debug.h" 51 #include <algorithm> 52 #include <cassert> 53 #include <iterator> 54 #include <utility> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "memdep" 59 60 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 61 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 62 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 63 64 STATISTIC(NumCacheNonLocalPtr, 65 "Number of fully cached non-local ptr responses"); 66 STATISTIC(NumCacheDirtyNonLocalPtr, 67 "Number of cached, but dirty, non-local ptr responses"); 68 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); 69 STATISTIC(NumCacheCompleteNonLocalPtr, 70 "Number of block queries that were completely cached"); 71 72 // Limit for the number of instructions to scan in a block. 73 74 static cl::opt<unsigned> BlockScanLimit( 75 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 76 cl::desc("The number of instructions to scan in a block in memory " 77 "dependency analysis (default = 100)")); 78 79 static cl::opt<unsigned> 80 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), 81 cl::desc("The number of blocks to scan during memory " 82 "dependency analysis (default = 1000)")); 83 84 // Limit on the number of memdep results to process. 85 static const unsigned int NumResultsLimit = 100; 86 87 /// This is a helper function that removes Val from 'Inst's set in ReverseMap. 88 /// 89 /// If the set becomes empty, remove Inst's entry. 90 template <typename KeyTy> 91 static void 92 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, 93 Instruction *Inst, KeyTy Val) { 94 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = 95 ReverseMap.find(Inst); 96 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 97 bool Found = InstIt->second.erase(Val); 98 assert(Found && "Invalid reverse map!"); 99 (void)Found; 100 if (InstIt->second.empty()) 101 ReverseMap.erase(InstIt); 102 } 103 104 /// If the given instruction references a specific memory location, fill in Loc 105 /// with the details, otherwise set Loc.Ptr to null. 106 /// 107 /// Returns a ModRefInfo value describing the general behavior of the 108 /// instruction. 109 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, 110 const TargetLibraryInfo &TLI) { 111 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 112 if (LI->isUnordered()) { 113 Loc = MemoryLocation::get(LI); 114 return ModRefInfo::Ref; 115 } 116 if (LI->getOrdering() == AtomicOrdering::Monotonic) { 117 Loc = MemoryLocation::get(LI); 118 return ModRefInfo::ModRef; 119 } 120 Loc = MemoryLocation(); 121 return ModRefInfo::ModRef; 122 } 123 124 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 125 if (SI->isUnordered()) { 126 Loc = MemoryLocation::get(SI); 127 return ModRefInfo::Mod; 128 } 129 if (SI->getOrdering() == AtomicOrdering::Monotonic) { 130 Loc = MemoryLocation::get(SI); 131 return ModRefInfo::ModRef; 132 } 133 Loc = MemoryLocation(); 134 return ModRefInfo::ModRef; 135 } 136 137 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 138 Loc = MemoryLocation::get(V); 139 return ModRefInfo::ModRef; 140 } 141 142 if (const CallInst *CI = isFreeCall(Inst, &TLI)) { 143 // calls to free() deallocate the entire structure 144 Loc = MemoryLocation::getAfter(CI->getArgOperand(0)); 145 return ModRefInfo::Mod; 146 } 147 148 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 149 switch (II->getIntrinsicID()) { 150 case Intrinsic::lifetime_start: 151 case Intrinsic::lifetime_end: 152 case Intrinsic::invariant_start: 153 Loc = MemoryLocation::getForArgument(II, 1, TLI); 154 // These intrinsics don't really modify the memory, but returning Mod 155 // will allow them to be handled conservatively. 156 return ModRefInfo::Mod; 157 case Intrinsic::invariant_end: 158 Loc = MemoryLocation::getForArgument(II, 2, TLI); 159 // These intrinsics don't really modify the memory, but returning Mod 160 // will allow them to be handled conservatively. 161 return ModRefInfo::Mod; 162 case Intrinsic::masked_load: 163 Loc = MemoryLocation::getForArgument(II, 0, TLI); 164 return ModRefInfo::Ref; 165 case Intrinsic::masked_store: 166 Loc = MemoryLocation::getForArgument(II, 1, TLI); 167 return ModRefInfo::Mod; 168 default: 169 break; 170 } 171 } 172 173 // Otherwise, just do the coarse-grained thing that always works. 174 if (Inst->mayWriteToMemory()) 175 return ModRefInfo::ModRef; 176 if (Inst->mayReadFromMemory()) 177 return ModRefInfo::Ref; 178 return ModRefInfo::NoModRef; 179 } 180 181 /// Private helper for finding the local dependencies of a call site. 182 MemDepResult MemoryDependenceResults::getCallDependencyFrom( 183 CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt, 184 BasicBlock *BB) { 185 unsigned Limit = getDefaultBlockScanLimit(); 186 187 // Walk backwards through the block, looking for dependencies. 188 while (ScanIt != BB->begin()) { 189 Instruction *Inst = &*--ScanIt; 190 // Debug intrinsics don't cause dependences and should not affect Limit 191 if (isa<DbgInfoIntrinsic>(Inst)) 192 continue; 193 194 // Limit the amount of scanning we do so we don't end up with quadratic 195 // running time on extreme testcases. 196 --Limit; 197 if (!Limit) 198 return MemDepResult::getUnknown(); 199 200 // If this inst is a memory op, get the pointer it accessed 201 MemoryLocation Loc; 202 ModRefInfo MR = GetLocation(Inst, Loc, TLI); 203 if (Loc.Ptr) { 204 // A simple instruction. 205 if (isModOrRefSet(AA.getModRefInfo(Call, Loc))) 206 return MemDepResult::getClobber(Inst); 207 continue; 208 } 209 210 if (auto *CallB = dyn_cast<CallBase>(Inst)) { 211 // If these two calls do not interfere, look past it. 212 if (isNoModRef(AA.getModRefInfo(Call, CallB))) { 213 // If the two calls are the same, return Inst as a Def, so that 214 // Call can be found redundant and eliminated. 215 if (isReadOnlyCall && !isModSet(MR) && 216 Call->isIdenticalToWhenDefined(CallB)) 217 return MemDepResult::getDef(Inst); 218 219 // Otherwise if the two calls don't interact (e.g. CallB is readnone) 220 // keep scanning. 221 continue; 222 } else 223 return MemDepResult::getClobber(Inst); 224 } 225 226 // If we could not obtain a pointer for the instruction and the instruction 227 // touches memory then assume that this is a dependency. 228 if (isModOrRefSet(MR)) 229 return MemDepResult::getClobber(Inst); 230 } 231 232 // No dependence found. If this is the entry block of the function, it is 233 // unknown, otherwise it is non-local. 234 if (BB != &BB->getParent()->getEntryBlock()) 235 return MemDepResult::getNonLocal(); 236 return MemDepResult::getNonFuncLocal(); 237 } 238 239 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 240 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 241 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, 242 BatchAAResults &BatchAA) { 243 MemDepResult InvariantGroupDependency = MemDepResult::getUnknown(); 244 if (QueryInst != nullptr) { 245 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { 246 InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB); 247 248 if (InvariantGroupDependency.isDef()) 249 return InvariantGroupDependency; 250 } 251 } 252 MemDepResult SimpleDep = getSimplePointerDependencyFrom( 253 MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, BatchAA); 254 if (SimpleDep.isDef()) 255 return SimpleDep; 256 // Non-local invariant group dependency indicates there is non local Def 257 // (it only returns nonLocal if it finds nonLocal def), which is better than 258 // local clobber and everything else. 259 if (InvariantGroupDependency.isNonLocal()) 260 return InvariantGroupDependency; 261 262 assert(InvariantGroupDependency.isUnknown() && 263 "InvariantGroupDependency should be only unknown at this point"); 264 return SimpleDep; 265 } 266 267 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 268 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 269 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 270 BatchAAResults BatchAA(AA); 271 return getPointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, 272 BatchAA); 273 } 274 275 MemDepResult 276 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, 277 BasicBlock *BB) { 278 279 if (!LI->hasMetadata(LLVMContext::MD_invariant_group)) 280 return MemDepResult::getUnknown(); 281 282 // Take the ptr operand after all casts and geps 0. This way we can search 283 // cast graph down only. 284 Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts(); 285 286 // It's is not safe to walk the use list of global value, because function 287 // passes aren't allowed to look outside their functions. 288 // FIXME: this could be fixed by filtering instructions from outside 289 // of current function. 290 if (isa<GlobalValue>(LoadOperand)) 291 return MemDepResult::getUnknown(); 292 293 // Queue to process all pointers that are equivalent to load operand. 294 SmallVector<const Value *, 8> LoadOperandsQueue; 295 LoadOperandsQueue.push_back(LoadOperand); 296 297 Instruction *ClosestDependency = nullptr; 298 // Order of instructions in uses list is unpredictible. In order to always 299 // get the same result, we will look for the closest dominance. 300 auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) { 301 assert(Other && "Must call it with not null instruction"); 302 if (Best == nullptr || DT.dominates(Best, Other)) 303 return Other; 304 return Best; 305 }; 306 307 // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case 308 // we will see all the instructions. This should be fixed in MSSA. 309 while (!LoadOperandsQueue.empty()) { 310 const Value *Ptr = LoadOperandsQueue.pop_back_val(); 311 assert(Ptr && !isa<GlobalValue>(Ptr) && 312 "Null or GlobalValue should not be inserted"); 313 314 for (const Use &Us : Ptr->uses()) { 315 auto *U = dyn_cast<Instruction>(Us.getUser()); 316 if (!U || U == LI || !DT.dominates(U, LI)) 317 continue; 318 319 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's 320 // users. U = bitcast Ptr 321 if (isa<BitCastInst>(U)) { 322 LoadOperandsQueue.push_back(U); 323 continue; 324 } 325 // Gep with zeros is equivalent to bitcast. 326 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0 327 // or gep 0 to bitcast because of SROA, so there are 2 forms. When 328 // typeless pointers will be ready then both cases will be gone 329 // (and this BFS also won't be needed). 330 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) 331 if (GEP->hasAllZeroIndices()) { 332 LoadOperandsQueue.push_back(U); 333 continue; 334 } 335 336 // If we hit load/store with the same invariant.group metadata (and the 337 // same pointer operand) we can assume that value pointed by pointer 338 // operand didn't change. 339 if ((isa<LoadInst>(U) || 340 (isa<StoreInst>(U) && 341 cast<StoreInst>(U)->getPointerOperand() == Ptr)) && 342 U->hasMetadata(LLVMContext::MD_invariant_group)) 343 ClosestDependency = GetClosestDependency(ClosestDependency, U); 344 } 345 } 346 347 if (!ClosestDependency) 348 return MemDepResult::getUnknown(); 349 if (ClosestDependency->getParent() == BB) 350 return MemDepResult::getDef(ClosestDependency); 351 // Def(U) can't be returned here because it is non-local. If local 352 // dependency won't be found then return nonLocal counting that the 353 // user will call getNonLocalPointerDependency, which will return cached 354 // result. 355 NonLocalDefsCache.try_emplace( 356 LI, NonLocalDepResult(ClosestDependency->getParent(), 357 MemDepResult::getDef(ClosestDependency), nullptr)); 358 ReverseNonLocalDefsCache[ClosestDependency].insert(LI); 359 return MemDepResult::getNonLocal(); 360 } 361 362 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( 363 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 364 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, 365 BatchAAResults &BatchAA) { 366 bool isInvariantLoad = false; 367 368 unsigned DefaultLimit = getDefaultBlockScanLimit(); 369 if (!Limit) 370 Limit = &DefaultLimit; 371 372 // We must be careful with atomic accesses, as they may allow another thread 373 // to touch this location, clobbering it. We are conservative: if the 374 // QueryInst is not a simple (non-atomic) memory access, we automatically 375 // return getClobber. 376 // If it is simple, we know based on the results of 377 // "Compiler testing via a theory of sound optimisations in the C11/C++11 378 // memory model" in PLDI 2013, that a non-atomic location can only be 379 // clobbered between a pair of a release and an acquire action, with no 380 // access to the location in between. 381 // Here is an example for giving the general intuition behind this rule. 382 // In the following code: 383 // store x 0; 384 // release action; [1] 385 // acquire action; [4] 386 // %val = load x; 387 // It is unsafe to replace %val by 0 because another thread may be running: 388 // acquire action; [2] 389 // store x 42; 390 // release action; [3] 391 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 392 // being 42. A key property of this program however is that if either 393 // 1 or 4 were missing, there would be a race between the store of 42 394 // either the store of 0 or the load (making the whole program racy). 395 // The paper mentioned above shows that the same property is respected 396 // by every program that can detect any optimization of that kind: either 397 // it is racy (undefined) or there is a release followed by an acquire 398 // between the pair of accesses under consideration. 399 400 // If the load is invariant, we "know" that it doesn't alias *any* write. We 401 // do want to respect mustalias results since defs are useful for value 402 // forwarding, but any mayalias write can be assumed to be noalias. 403 // Arguably, this logic should be pushed inside AliasAnalysis itself. 404 if (isLoad && QueryInst) { 405 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 406 if (LI && LI->hasMetadata(LLVMContext::MD_invariant_load)) 407 isInvariantLoad = true; 408 } 409 410 // True for volatile instruction. 411 // For Load/Store return true if atomic ordering is stronger than AO, 412 // for other instruction just true if it can read or write to memory. 413 auto isComplexForReordering = [](Instruction * I, AtomicOrdering AO)->bool { 414 if (I->isVolatile()) 415 return true; 416 if (auto *LI = dyn_cast<LoadInst>(I)) 417 return isStrongerThan(LI->getOrdering(), AO); 418 if (auto *SI = dyn_cast<StoreInst>(I)) 419 return isStrongerThan(SI->getOrdering(), AO); 420 return I->mayReadOrWriteMemory(); 421 }; 422 423 // Walk backwards through the basic block, looking for dependencies. 424 while (ScanIt != BB->begin()) { 425 Instruction *Inst = &*--ScanIt; 426 427 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 428 // Debug intrinsics don't (and can't) cause dependencies. 429 if (isa<DbgInfoIntrinsic>(II)) 430 continue; 431 432 // Limit the amount of scanning we do so we don't end up with quadratic 433 // running time on extreme testcases. 434 --*Limit; 435 if (!*Limit) 436 return MemDepResult::getUnknown(); 437 438 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 439 // If we reach a lifetime begin or end marker, then the query ends here 440 // because the value is undefined. 441 Intrinsic::ID ID = II->getIntrinsicID(); 442 switch (ID) { 443 case Intrinsic::lifetime_start: { 444 // FIXME: This only considers queries directly on the invariant-tagged 445 // pointer, not on query pointers that are indexed off of them. It'd 446 // be nice to handle that at some point (the right approach is to use 447 // GetPointerBaseWithConstantOffset). 448 MemoryLocation ArgLoc = MemoryLocation::getAfter(II->getArgOperand(1)); 449 if (BatchAA.isMustAlias(ArgLoc, MemLoc)) 450 return MemDepResult::getDef(II); 451 continue; 452 } 453 case Intrinsic::masked_load: 454 case Intrinsic::masked_store: { 455 MemoryLocation Loc; 456 /*ModRefInfo MR =*/ GetLocation(II, Loc, TLI); 457 AliasResult R = BatchAA.alias(Loc, MemLoc); 458 if (R == AliasResult::NoAlias) 459 continue; 460 if (R == AliasResult::MustAlias) 461 return MemDepResult::getDef(II); 462 if (ID == Intrinsic::masked_load) 463 continue; 464 return MemDepResult::getClobber(II); 465 } 466 } 467 } 468 469 // Values depend on loads if the pointers are must aliased. This means 470 // that a load depends on another must aliased load from the same value. 471 // One exception is atomic loads: a value can depend on an atomic load that 472 // it does not alias with when this atomic load indicates that another 473 // thread may be accessing the location. 474 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 475 // While volatile access cannot be eliminated, they do not have to clobber 476 // non-aliasing locations, as normal accesses, for example, can be safely 477 // reordered with volatile accesses. 478 if (LI->isVolatile()) { 479 if (!QueryInst) 480 // Original QueryInst *may* be volatile 481 return MemDepResult::getClobber(LI); 482 if (QueryInst->isVolatile()) 483 // Ordering required if QueryInst is itself volatile 484 return MemDepResult::getClobber(LI); 485 // Otherwise, volatile doesn't imply any special ordering 486 } 487 488 // Atomic loads have complications involved. 489 // A Monotonic (or higher) load is OK if the query inst is itself not 490 // atomic. 491 // FIXME: This is overly conservative. 492 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { 493 if (!QueryInst || 494 isComplexForReordering(QueryInst, AtomicOrdering::NotAtomic)) 495 return MemDepResult::getClobber(LI); 496 if (LI->getOrdering() != AtomicOrdering::Monotonic) 497 return MemDepResult::getClobber(LI); 498 } 499 500 MemoryLocation LoadLoc = MemoryLocation::get(LI); 501 502 // If we found a pointer, check if it could be the same as our pointer. 503 AliasResult R = BatchAA.alias(LoadLoc, MemLoc); 504 505 if (R == AliasResult::NoAlias) 506 continue; 507 508 if (isLoad) { 509 // Must aliased loads are defs of each other. 510 if (R == AliasResult::MustAlias) 511 return MemDepResult::getDef(Inst); 512 513 // If we have a partial alias, then return this as a clobber for the 514 // client to handle. 515 if (R == AliasResult::PartialAlias && R.hasOffset()) { 516 ClobberOffsets[LI] = R.getOffset(); 517 return MemDepResult::getClobber(Inst); 518 } 519 520 // Random may-alias loads don't depend on each other without a 521 // dependence. 522 continue; 523 } 524 525 // Stores don't alias loads from read-only memory. 526 if (BatchAA.pointsToConstantMemory(LoadLoc)) 527 continue; 528 529 // Stores depend on may/must aliased loads. 530 return MemDepResult::getDef(Inst); 531 } 532 533 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 534 // Atomic stores have complications involved. 535 // A Monotonic store is OK if the query inst is itself not atomic. 536 // FIXME: This is overly conservative. 537 if (!SI->isUnordered() && SI->isAtomic()) { 538 if (!QueryInst || 539 isComplexForReordering(QueryInst, AtomicOrdering::Unordered)) 540 return MemDepResult::getClobber(SI); 541 // Ok, if we are here the guard above guarantee us that 542 // QueryInst is a non-atomic or unordered load/store. 543 // SI is atomic with monotonic or release semantic (seq_cst for store 544 // is actually a release semantic plus total order over other seq_cst 545 // instructions, as soon as QueryInst is not seq_cst we can consider it 546 // as simple release semantic). 547 // Monotonic and Release semantic allows re-ordering before store 548 // so we are safe to go further and check the aliasing. It will prohibit 549 // re-ordering in case locations are may or must alias. 550 } 551 552 // While volatile access cannot be eliminated, they do not have to clobber 553 // non-aliasing locations, as normal accesses can for example be reordered 554 // with volatile accesses. 555 if (SI->isVolatile()) 556 if (!QueryInst || QueryInst->isVolatile()) 557 return MemDepResult::getClobber(SI); 558 559 // If alias analysis can tell that this store is guaranteed to not modify 560 // the query pointer, ignore it. Use getModRefInfo to handle cases where 561 // the query pointer points to constant memory etc. 562 if (!isModOrRefSet(BatchAA.getModRefInfo(SI, MemLoc))) 563 continue; 564 565 // Ok, this store might clobber the query pointer. Check to see if it is 566 // a must alias: in this case, we want to return this as a def. 567 // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above. 568 MemoryLocation StoreLoc = MemoryLocation::get(SI); 569 570 // If we found a pointer, check if it could be the same as our pointer. 571 AliasResult R = BatchAA.alias(StoreLoc, MemLoc); 572 573 if (R == AliasResult::NoAlias) 574 continue; 575 if (R == AliasResult::MustAlias) 576 return MemDepResult::getDef(Inst); 577 if (isInvariantLoad) 578 continue; 579 return MemDepResult::getClobber(Inst); 580 } 581 582 // If this is an allocation, and if we know that the accessed pointer is to 583 // the allocation, return Def. This means that there is no dependence and 584 // the access can be optimized based on that. For example, a load could 585 // turn into undef. Note that we can bypass the allocation itself when 586 // looking for a clobber in many cases; that's an alias property and is 587 // handled by BasicAA. 588 if (isa<AllocaInst>(Inst) || isNoAliasCall(Inst)) { 589 const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr); 590 if (AccessPtr == Inst || BatchAA.isMustAlias(Inst, AccessPtr)) 591 return MemDepResult::getDef(Inst); 592 } 593 594 if (isInvariantLoad) 595 continue; 596 597 // A release fence requires that all stores complete before it, but does 598 // not prevent the reordering of following loads or stores 'before' the 599 // fence. As a result, we look past it when finding a dependency for 600 // loads. DSE uses this to find preceding stores to delete and thus we 601 // can't bypass the fence if the query instruction is a store. 602 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 603 if (isLoad && FI->getOrdering() == AtomicOrdering::Release) 604 continue; 605 606 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 607 ModRefInfo MR = BatchAA.getModRefInfo(Inst, MemLoc); 608 // If necessary, perform additional analysis. 609 if (isModAndRefSet(MR)) 610 MR = BatchAA.callCapturesBefore(Inst, MemLoc, &DT); 611 switch (clearMust(MR)) { 612 case ModRefInfo::NoModRef: 613 // If the call has no effect on the queried pointer, just ignore it. 614 continue; 615 case ModRefInfo::Mod: 616 return MemDepResult::getClobber(Inst); 617 case ModRefInfo::Ref: 618 // If the call is known to never store to the pointer, and if this is a 619 // load query, we can safely ignore it (scan past it). 620 if (isLoad) 621 continue; 622 LLVM_FALLTHROUGH; 623 default: 624 // Otherwise, there is a potential dependence. Return a clobber. 625 return MemDepResult::getClobber(Inst); 626 } 627 } 628 629 // No dependence found. If this is the entry block of the function, it is 630 // unknown, otherwise it is non-local. 631 if (BB != &BB->getParent()->getEntryBlock()) 632 return MemDepResult::getNonLocal(); 633 return MemDepResult::getNonFuncLocal(); 634 } 635 636 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { 637 ClobberOffsets.clear(); 638 Instruction *ScanPos = QueryInst; 639 640 // Check for a cached result 641 MemDepResult &LocalCache = LocalDeps[QueryInst]; 642 643 // If the cached entry is non-dirty, just return it. Note that this depends 644 // on MemDepResult's default constructing to 'dirty'. 645 if (!LocalCache.isDirty()) 646 return LocalCache; 647 648 // Otherwise, if we have a dirty entry, we know we can start the scan at that 649 // instruction, which may save us some work. 650 if (Instruction *Inst = LocalCache.getInst()) { 651 ScanPos = Inst; 652 653 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 654 } 655 656 BasicBlock *QueryParent = QueryInst->getParent(); 657 658 // Do the scan. 659 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 660 // No dependence found. If this is the entry block of the function, it is 661 // unknown, otherwise it is non-local. 662 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 663 LocalCache = MemDepResult::getNonLocal(); 664 else 665 LocalCache = MemDepResult::getNonFuncLocal(); 666 } else { 667 MemoryLocation MemLoc; 668 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI); 669 if (MemLoc.Ptr) { 670 // If we can do a pointer scan, make it happen. 671 bool isLoad = !isModSet(MR); 672 if (auto *II = dyn_cast<IntrinsicInst>(QueryInst)) 673 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 674 675 LocalCache = 676 getPointerDependencyFrom(MemLoc, isLoad, ScanPos->getIterator(), 677 QueryParent, QueryInst, nullptr); 678 } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) { 679 bool isReadOnly = AA.onlyReadsMemory(QueryCall); 680 LocalCache = getCallDependencyFrom(QueryCall, isReadOnly, 681 ScanPos->getIterator(), QueryParent); 682 } else 683 // Non-memory instruction. 684 LocalCache = MemDepResult::getUnknown(); 685 } 686 687 // Remember the result! 688 if (Instruction *I = LocalCache.getInst()) 689 ReverseLocalDeps[I].insert(QueryInst); 690 691 return LocalCache; 692 } 693 694 #ifndef NDEBUG 695 /// This method is used when -debug is specified to verify that cache arrays 696 /// are properly kept sorted. 697 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, 698 int Count = -1) { 699 if (Count == -1) 700 Count = Cache.size(); 701 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && 702 "Cache isn't sorted!"); 703 } 704 #endif 705 706 const MemoryDependenceResults::NonLocalDepInfo & 707 MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { 708 assert(getDependency(QueryCall).isNonLocal() && 709 "getNonLocalCallDependency should only be used on calls with " 710 "non-local deps!"); 711 PerInstNLInfo &CacheP = NonLocalDepsMap[QueryCall]; 712 NonLocalDepInfo &Cache = CacheP.first; 713 714 // This is the set of blocks that need to be recomputed. In the cached case, 715 // this can happen due to instructions being deleted etc. In the uncached 716 // case, this starts out as the set of predecessors we care about. 717 SmallVector<BasicBlock *, 32> DirtyBlocks; 718 719 if (!Cache.empty()) { 720 // Okay, we have a cache entry. If we know it is not dirty, just return it 721 // with no computation. 722 if (!CacheP.second) { 723 ++NumCacheNonLocal; 724 return Cache; 725 } 726 727 // If we already have a partially computed set of results, scan them to 728 // determine what is dirty, seeding our initial DirtyBlocks worklist. 729 for (auto &Entry : Cache) 730 if (Entry.getResult().isDirty()) 731 DirtyBlocks.push_back(Entry.getBB()); 732 733 // Sort the cache so that we can do fast binary search lookups below. 734 llvm::sort(Cache); 735 736 ++NumCacheDirtyNonLocal; 737 } else { 738 // Seed DirtyBlocks with each of the preds of QueryInst's block. 739 BasicBlock *QueryBB = QueryCall->getParent(); 740 append_range(DirtyBlocks, PredCache.get(QueryBB)); 741 ++NumUncacheNonLocal; 742 } 743 744 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 745 bool isReadonlyCall = AA.onlyReadsMemory(QueryCall); 746 747 SmallPtrSet<BasicBlock *, 32> Visited; 748 749 unsigned NumSortedEntries = Cache.size(); 750 LLVM_DEBUG(AssertSorted(Cache)); 751 752 // Iterate while we still have blocks to update. 753 while (!DirtyBlocks.empty()) { 754 BasicBlock *DirtyBB = DirtyBlocks.pop_back_val(); 755 756 // Already processed this block? 757 if (!Visited.insert(DirtyBB).second) 758 continue; 759 760 // Do a binary search to see if we already have an entry for this block in 761 // the cache set. If so, find it. 762 LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries)); 763 NonLocalDepInfo::iterator Entry = 764 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, 765 NonLocalDepEntry(DirtyBB)); 766 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 767 --Entry; 768 769 NonLocalDepEntry *ExistingResult = nullptr; 770 if (Entry != Cache.begin() + NumSortedEntries && 771 Entry->getBB() == DirtyBB) { 772 // If we already have an entry, and if it isn't already dirty, the block 773 // is done. 774 if (!Entry->getResult().isDirty()) 775 continue; 776 777 // Otherwise, remember this slot so we can update the value. 778 ExistingResult = &*Entry; 779 } 780 781 // If the dirty entry has a pointer, start scanning from it so we don't have 782 // to rescan the entire block. 783 BasicBlock::iterator ScanPos = DirtyBB->end(); 784 if (ExistingResult) { 785 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 786 ScanPos = Inst->getIterator(); 787 // We're removing QueryInst's use of Inst. 788 RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst, 789 QueryCall); 790 } 791 } 792 793 // Find out if this block has a local dependency for QueryInst. 794 MemDepResult Dep; 795 796 if (ScanPos != DirtyBB->begin()) { 797 Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB); 798 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 799 // No dependence found. If this is the entry block of the function, it is 800 // a clobber, otherwise it is unknown. 801 Dep = MemDepResult::getNonLocal(); 802 } else { 803 Dep = MemDepResult::getNonFuncLocal(); 804 } 805 806 // If we had a dirty entry for the block, update it. Otherwise, just add 807 // a new entry. 808 if (ExistingResult) 809 ExistingResult->setResult(Dep); 810 else 811 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 812 813 // If the block has a dependency (i.e. it isn't completely transparent to 814 // the value), remember the association! 815 if (!Dep.isNonLocal()) { 816 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 817 // update this when we remove instructions. 818 if (Instruction *Inst = Dep.getInst()) 819 ReverseNonLocalDeps[Inst].insert(QueryCall); 820 } else { 821 822 // If the block *is* completely transparent to the load, we need to check 823 // the predecessors of this block. Add them to our worklist. 824 append_range(DirtyBlocks, PredCache.get(DirtyBB)); 825 } 826 } 827 828 return Cache; 829 } 830 831 void MemoryDependenceResults::getNonLocalPointerDependency( 832 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { 833 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 834 bool isLoad = isa<LoadInst>(QueryInst); 835 BasicBlock *FromBB = QueryInst->getParent(); 836 assert(FromBB); 837 838 assert(Loc.Ptr->getType()->isPointerTy() && 839 "Can't get pointer deps of a non-pointer!"); 840 Result.clear(); 841 { 842 // Check if there is cached Def with invariant.group. 843 auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst); 844 if (NonLocalDefIt != NonLocalDefsCache.end()) { 845 Result.push_back(NonLocalDefIt->second); 846 ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()] 847 .erase(QueryInst); 848 NonLocalDefsCache.erase(NonLocalDefIt); 849 return; 850 } 851 } 852 // This routine does not expect to deal with volatile instructions. 853 // Doing so would require piping through the QueryInst all the way through. 854 // TODO: volatiles can't be elided, but they can be reordered with other 855 // non-volatile accesses. 856 857 // We currently give up on any instruction which is ordered, but we do handle 858 // atomic instructions which are unordered. 859 // TODO: Handle ordered instructions 860 auto isOrdered = [](Instruction *Inst) { 861 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 862 return !LI->isUnordered(); 863 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 864 return !SI->isUnordered(); 865 } 866 return false; 867 }; 868 if (QueryInst->isVolatile() || isOrdered(QueryInst)) { 869 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 870 const_cast<Value *>(Loc.Ptr))); 871 return; 872 } 873 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 874 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); 875 876 // This is the set of blocks we've inspected, and the pointer we consider in 877 // each block. Because of critical edges, we currently bail out if querying 878 // a block with multiple different pointers. This can happen during PHI 879 // translation. 880 DenseMap<BasicBlock *, Value *> Visited; 881 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 882 Result, Visited, true)) 883 return; 884 Result.clear(); 885 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 886 const_cast<Value *>(Loc.Ptr))); 887 } 888 889 /// Compute the memdep value for BB with Pointer/PointeeSize using either 890 /// cached information in Cache or by doing a lookup (which may use dirty cache 891 /// info if available). 892 /// 893 /// If we do a lookup, add the result to the cache. 894 MemDepResult MemoryDependenceResults::getNonLocalInfoForBlock( 895 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 896 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries, 897 BatchAAResults &BatchAA) { 898 899 bool isInvariantLoad = false; 900 901 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 902 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 903 904 // Do a binary search to see if we already have an entry for this block in 905 // the cache set. If so, find it. 906 NonLocalDepInfo::iterator Entry = std::upper_bound( 907 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); 908 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) 909 --Entry; 910 911 NonLocalDepEntry *ExistingResult = nullptr; 912 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) 913 ExistingResult = &*Entry; 914 915 // Use cached result for invariant load only if there is no dependency for non 916 // invariant load. In this case invariant load can not have any dependency as 917 // well. 918 if (ExistingResult && isInvariantLoad && 919 !ExistingResult->getResult().isNonFuncLocal()) 920 ExistingResult = nullptr; 921 922 // If we have a cached entry, and it is non-dirty, use it as the value for 923 // this dependency. 924 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 925 ++NumCacheNonLocalPtr; 926 return ExistingResult->getResult(); 927 } 928 929 // Otherwise, we have to scan for the value. If we have a dirty cache 930 // entry, start scanning from its position, otherwise we scan from the end 931 // of the block. 932 BasicBlock::iterator ScanPos = BB->end(); 933 if (ExistingResult && ExistingResult->getResult().getInst()) { 934 assert(ExistingResult->getResult().getInst()->getParent() == BB && 935 "Instruction invalidated?"); 936 ++NumCacheDirtyNonLocalPtr; 937 ScanPos = ExistingResult->getResult().getInst()->getIterator(); 938 939 // Eliminating the dirty entry from 'Cache', so update the reverse info. 940 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 941 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); 942 } else { 943 ++NumUncacheNonLocalPtr; 944 } 945 946 // Scan the block for the dependency. 947 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, 948 QueryInst, nullptr, BatchAA); 949 950 // Don't cache results for invariant load. 951 if (isInvariantLoad) 952 return Dep; 953 954 // If we had a dirty entry for the block, update it. Otherwise, just add 955 // a new entry. 956 if (ExistingResult) 957 ExistingResult->setResult(Dep); 958 else 959 Cache->push_back(NonLocalDepEntry(BB, Dep)); 960 961 // If the block has a dependency (i.e. it isn't completely transparent to 962 // the value), remember the reverse association because we just added it 963 // to Cache! 964 if (!Dep.isDef() && !Dep.isClobber()) 965 return Dep; 966 967 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 968 // update MemDep when we remove instructions. 969 Instruction *Inst = Dep.getInst(); 970 assert(Inst && "Didn't depend on anything?"); 971 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 972 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 973 return Dep; 974 } 975 976 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the 977 /// array that are already properly ordered. 978 /// 979 /// This is optimized for the case when only a few entries are added. 980 static void 981 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, 982 unsigned NumSortedEntries) { 983 switch (Cache.size() - NumSortedEntries) { 984 case 0: 985 // done, no new entries. 986 break; 987 case 2: { 988 // Two new entries, insert the last one into place. 989 NonLocalDepEntry Val = Cache.back(); 990 Cache.pop_back(); 991 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 992 std::upper_bound(Cache.begin(), Cache.end() - 1, Val); 993 Cache.insert(Entry, Val); 994 LLVM_FALLTHROUGH; 995 } 996 case 1: 997 // One new entry, Just insert the new value at the appropriate position. 998 if (Cache.size() != 1) { 999 NonLocalDepEntry Val = Cache.back(); 1000 Cache.pop_back(); 1001 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1002 llvm::upper_bound(Cache, Val); 1003 Cache.insert(Entry, Val); 1004 } 1005 break; 1006 default: 1007 // Added many values, do a full scale sort. 1008 llvm::sort(Cache); 1009 break; 1010 } 1011 } 1012 1013 /// Perform a dependency query based on pointer/pointeesize starting at the end 1014 /// of StartBB. 1015 /// 1016 /// Add any clobber/def results to the results vector and keep track of which 1017 /// blocks are visited in 'Visited'. 1018 /// 1019 /// This has special behavior for the first block queries (when SkipFirstBlock 1020 /// is true). In this special case, it ignores the contents of the specified 1021 /// block and starts returning dependence info for its predecessors. 1022 /// 1023 /// This function returns true on success, or false to indicate that it could 1024 /// not compute dependence information for some reason. This should be treated 1025 /// as a clobber dependence on the first instruction in the predecessor block. 1026 bool MemoryDependenceResults::getNonLocalPointerDepFromBB( 1027 Instruction *QueryInst, const PHITransAddr &Pointer, 1028 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1029 SmallVectorImpl<NonLocalDepResult> &Result, 1030 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock, 1031 bool IsIncomplete) { 1032 // Look up the cached info for Pointer. 1033 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1034 1035 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1036 // CacheKey, this value will be inserted as the associated value. Otherwise, 1037 // it'll be ignored, and we'll have to check to see if the cached size and 1038 // aa tags are consistent with the current query. 1039 NonLocalPointerInfo InitialNLPI; 1040 InitialNLPI.Size = Loc.Size; 1041 InitialNLPI.AATags = Loc.AATags; 1042 1043 bool isInvariantLoad = false; 1044 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 1045 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 1046 1047 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1048 // already have one. 1049 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1050 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1051 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1052 1053 // If we already have a cache entry for this CacheKey, we may need to do some 1054 // work to reconcile the cache entry and the current query. 1055 // Invariant loads don't participate in caching. Thus no need to reconcile. 1056 if (!isInvariantLoad && !Pair.second) { 1057 if (CacheInfo->Size != Loc.Size) { 1058 bool ThrowOutEverything; 1059 if (CacheInfo->Size.hasValue() && Loc.Size.hasValue()) { 1060 // FIXME: We may be able to do better in the face of results with mixed 1061 // precision. We don't appear to get them in practice, though, so just 1062 // be conservative. 1063 ThrowOutEverything = 1064 CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() || 1065 CacheInfo->Size.getValue() < Loc.Size.getValue(); 1066 } else { 1067 // For our purposes, unknown size > all others. 1068 ThrowOutEverything = !Loc.Size.hasValue(); 1069 } 1070 1071 if (ThrowOutEverything) { 1072 // The query's Size is greater than the cached one. Throw out the 1073 // cached data and proceed with the query at the greater size. 1074 CacheInfo->Pair = BBSkipFirstBlockPair(); 1075 CacheInfo->Size = Loc.Size; 1076 for (auto &Entry : CacheInfo->NonLocalDeps) 1077 if (Instruction *Inst = Entry.getResult().getInst()) 1078 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1079 CacheInfo->NonLocalDeps.clear(); 1080 // The cache is cleared (in the above line) so we will have lost 1081 // information about blocks we have already visited. We therefore must 1082 // assume that the cache information is incomplete. 1083 IsIncomplete = true; 1084 } else { 1085 // This query's Size is less than the cached one. Conservatively restart 1086 // the query using the greater size. 1087 return getNonLocalPointerDepFromBB( 1088 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, 1089 StartBB, Result, Visited, SkipFirstBlock, IsIncomplete); 1090 } 1091 } 1092 1093 // If the query's AATags are inconsistent with the cached one, 1094 // conservatively throw out the cached data and restart the query with 1095 // no tag if needed. 1096 if (CacheInfo->AATags != Loc.AATags) { 1097 if (CacheInfo->AATags) { 1098 CacheInfo->Pair = BBSkipFirstBlockPair(); 1099 CacheInfo->AATags = AAMDNodes(); 1100 for (auto &Entry : CacheInfo->NonLocalDeps) 1101 if (Instruction *Inst = Entry.getResult().getInst()) 1102 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1103 CacheInfo->NonLocalDeps.clear(); 1104 // The cache is cleared (in the above line) so we will have lost 1105 // information about blocks we have already visited. We therefore must 1106 // assume that the cache information is incomplete. 1107 IsIncomplete = true; 1108 } 1109 if (Loc.AATags) 1110 return getNonLocalPointerDepFromBB( 1111 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, 1112 Visited, SkipFirstBlock, IsIncomplete); 1113 } 1114 } 1115 1116 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1117 1118 // If we have valid cached information for exactly the block we are 1119 // investigating, just return it with no recomputation. 1120 // Don't use cached information for invariant loads since it is valid for 1121 // non-invariant loads only. 1122 if (!IsIncomplete && !isInvariantLoad && 1123 CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1124 // We have a fully cached result for this query then we can just return the 1125 // cached results and populate the visited set. However, we have to verify 1126 // that we don't already have conflicting results for these blocks. Check 1127 // to ensure that if a block in the results set is in the visited set that 1128 // it was for the same pointer query. 1129 if (!Visited.empty()) { 1130 for (auto &Entry : *Cache) { 1131 DenseMap<BasicBlock *, Value *>::iterator VI = 1132 Visited.find(Entry.getBB()); 1133 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1134 continue; 1135 1136 // We have a pointer mismatch in a block. Just return false, saying 1137 // that something was clobbered in this result. We could also do a 1138 // non-fully cached query, but there is little point in doing this. 1139 return false; 1140 } 1141 } 1142 1143 Value *Addr = Pointer.getAddr(); 1144 for (auto &Entry : *Cache) { 1145 Visited.insert(std::make_pair(Entry.getBB(), Addr)); 1146 if (Entry.getResult().isNonLocal()) { 1147 continue; 1148 } 1149 1150 if (DT.isReachableFromEntry(Entry.getBB())) { 1151 Result.push_back( 1152 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); 1153 } 1154 } 1155 ++NumCacheCompleteNonLocalPtr; 1156 return true; 1157 } 1158 1159 // Otherwise, either this is a new block, a block with an invalid cache 1160 // pointer or one that we're about to invalidate by putting more info into 1161 // it than its valid cache info. If empty and not explicitly indicated as 1162 // incomplete, the result will be valid cache info, otherwise it isn't. 1163 // 1164 // Invariant loads don't affect cache in any way thus no need to update 1165 // CacheInfo as well. 1166 if (!isInvariantLoad) { 1167 if (!IsIncomplete && Cache->empty()) 1168 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1169 else 1170 CacheInfo->Pair = BBSkipFirstBlockPair(); 1171 } 1172 1173 SmallVector<BasicBlock *, 32> Worklist; 1174 Worklist.push_back(StartBB); 1175 1176 // PredList used inside loop. 1177 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; 1178 1179 // Keep track of the entries that we know are sorted. Previously cached 1180 // entries will all be sorted. The entries we add we only sort on demand (we 1181 // don't insert every element into its sorted position). We know that we 1182 // won't get any reuse from currently inserted values, because we don't 1183 // revisit blocks after we insert info for them. 1184 unsigned NumSortedEntries = Cache->size(); 1185 unsigned WorklistEntries = BlockNumberLimit; 1186 bool GotWorklistLimit = false; 1187 LLVM_DEBUG(AssertSorted(*Cache)); 1188 1189 BatchAAResults BatchAA(AA); 1190 while (!Worklist.empty()) { 1191 BasicBlock *BB = Worklist.pop_back_val(); 1192 1193 // If we do process a large number of blocks it becomes very expensive and 1194 // likely it isn't worth worrying about 1195 if (Result.size() > NumResultsLimit) { 1196 // Sort it now (if needed) so that recursive invocations of 1197 // getNonLocalPointerDepFromBB and other routines that could reuse the 1198 // cache value will only see properly sorted cache arrays. 1199 if (Cache && NumSortedEntries != Cache->size()) { 1200 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1201 } 1202 // Since we bail out, the "Cache" set won't contain all of the 1203 // results for the query. This is ok (we can still use it to accelerate 1204 // specific block queries) but we can't do the fastpath "return all 1205 // results from the set". Clear out the indicator for this. 1206 CacheInfo->Pair = BBSkipFirstBlockPair(); 1207 return false; 1208 } 1209 1210 // Skip the first block if we have it. 1211 if (!SkipFirstBlock) { 1212 // Analyze the dependency of *Pointer in FromBB. See if we already have 1213 // been here. 1214 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1215 1216 // Get the dependency info for Pointer in BB. If we have cached 1217 // information, we will use it, otherwise we compute it. 1218 LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1219 MemDepResult Dep = getNonLocalInfoForBlock( 1220 QueryInst, Loc, isLoad, BB, Cache, NumSortedEntries, BatchAA); 1221 1222 // If we got a Def or Clobber, add this to the list of results. 1223 if (!Dep.isNonLocal()) { 1224 if (DT.isReachableFromEntry(BB)) { 1225 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1226 continue; 1227 } 1228 } 1229 } 1230 1231 // If 'Pointer' is an instruction defined in this block, then we need to do 1232 // phi translation to change it into a value live in the predecessor block. 1233 // If not, we just add the predecessors to the worklist and scan them with 1234 // the same Pointer. 1235 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1236 SkipFirstBlock = false; 1237 SmallVector<BasicBlock *, 16> NewBlocks; 1238 for (BasicBlock *Pred : PredCache.get(BB)) { 1239 // Verify that we haven't looked at this block yet. 1240 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1241 Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1242 if (InsertRes.second) { 1243 // First time we've looked at *PI. 1244 NewBlocks.push_back(Pred); 1245 continue; 1246 } 1247 1248 // If we have seen this block before, but it was with a different 1249 // pointer then we have a phi translation failure and we have to treat 1250 // this as a clobber. 1251 if (InsertRes.first->second != Pointer.getAddr()) { 1252 // Make sure to clean up the Visited map before continuing on to 1253 // PredTranslationFailure. 1254 for (unsigned i = 0; i < NewBlocks.size(); i++) 1255 Visited.erase(NewBlocks[i]); 1256 goto PredTranslationFailure; 1257 } 1258 } 1259 if (NewBlocks.size() > WorklistEntries) { 1260 // Make sure to clean up the Visited map before continuing on to 1261 // PredTranslationFailure. 1262 for (unsigned i = 0; i < NewBlocks.size(); i++) 1263 Visited.erase(NewBlocks[i]); 1264 GotWorklistLimit = true; 1265 goto PredTranslationFailure; 1266 } 1267 WorklistEntries -= NewBlocks.size(); 1268 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1269 continue; 1270 } 1271 1272 // We do need to do phi translation, if we know ahead of time we can't phi 1273 // translate this value, don't even try. 1274 if (!Pointer.IsPotentiallyPHITranslatable()) 1275 goto PredTranslationFailure; 1276 1277 // We may have added values to the cache list before this PHI translation. 1278 // If so, we haven't done anything to ensure that the cache remains sorted. 1279 // Sort it now (if needed) so that recursive invocations of 1280 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1281 // value will only see properly sorted cache arrays. 1282 if (Cache && NumSortedEntries != Cache->size()) { 1283 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1284 NumSortedEntries = Cache->size(); 1285 } 1286 Cache = nullptr; 1287 1288 PredList.clear(); 1289 for (BasicBlock *Pred : PredCache.get(BB)) { 1290 PredList.push_back(std::make_pair(Pred, Pointer)); 1291 1292 // Get the PHI translated pointer in this predecessor. This can fail if 1293 // not translatable, in which case the getAddr() returns null. 1294 PHITransAddr &PredPointer = PredList.back().second; 1295 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false); 1296 Value *PredPtrVal = PredPointer.getAddr(); 1297 1298 // Check to see if we have already visited this pred block with another 1299 // pointer. If so, we can't do this lookup. This failure can occur 1300 // with PHI translation when a critical edge exists and the PHI node in 1301 // the successor translates to a pointer value different than the 1302 // pointer the block was first analyzed with. 1303 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1304 Visited.insert(std::make_pair(Pred, PredPtrVal)); 1305 1306 if (!InsertRes.second) { 1307 // We found the pred; take it off the list of preds to visit. 1308 PredList.pop_back(); 1309 1310 // If the predecessor was visited with PredPtr, then we already did 1311 // the analysis and can ignore it. 1312 if (InsertRes.first->second == PredPtrVal) 1313 continue; 1314 1315 // Otherwise, the block was previously analyzed with a different 1316 // pointer. We can't represent the result of this case, so we just 1317 // treat this as a phi translation failure. 1318 1319 // Make sure to clean up the Visited map before continuing on to 1320 // PredTranslationFailure. 1321 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1322 Visited.erase(PredList[i].first); 1323 1324 goto PredTranslationFailure; 1325 } 1326 } 1327 1328 // Actually process results here; this need to be a separate loop to avoid 1329 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1330 // any results for. (getNonLocalPointerDepFromBB will modify our 1331 // datastructures in ways the code after the PredTranslationFailure label 1332 // doesn't expect.) 1333 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1334 BasicBlock *Pred = PredList[i].first; 1335 PHITransAddr &PredPointer = PredList[i].second; 1336 Value *PredPtrVal = PredPointer.getAddr(); 1337 1338 bool CanTranslate = true; 1339 // If PHI translation was unable to find an available pointer in this 1340 // predecessor, then we have to assume that the pointer is clobbered in 1341 // that predecessor. We can still do PRE of the load, which would insert 1342 // a computation of the pointer in this predecessor. 1343 if (!PredPtrVal) 1344 CanTranslate = false; 1345 1346 // FIXME: it is entirely possible that PHI translating will end up with 1347 // the same value. Consider PHI translating something like: 1348 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1349 // to recurse here, pedantically speaking. 1350 1351 // If getNonLocalPointerDepFromBB fails here, that means the cached 1352 // result conflicted with the Visited list; we have to conservatively 1353 // assume it is unknown, but this also does not block PRE of the load. 1354 if (!CanTranslate || 1355 !getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1356 Loc.getWithNewPtr(PredPtrVal), isLoad, 1357 Pred, Result, Visited)) { 1358 // Add the entry to the Result list. 1359 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1360 Result.push_back(Entry); 1361 1362 // Since we had a phi translation failure, the cache for CacheKey won't 1363 // include all of the entries that we need to immediately satisfy future 1364 // queries. Mark this in NonLocalPointerDeps by setting the 1365 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1366 // cached value to do more work but not miss the phi trans failure. 1367 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1368 NLPI.Pair = BBSkipFirstBlockPair(); 1369 continue; 1370 } 1371 } 1372 1373 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1374 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1375 Cache = &CacheInfo->NonLocalDeps; 1376 NumSortedEntries = Cache->size(); 1377 1378 // Since we did phi translation, the "Cache" set won't contain all of the 1379 // results for the query. This is ok (we can still use it to accelerate 1380 // specific block queries) but we can't do the fastpath "return all 1381 // results from the set" Clear out the indicator for this. 1382 CacheInfo->Pair = BBSkipFirstBlockPair(); 1383 SkipFirstBlock = false; 1384 continue; 1385 1386 PredTranslationFailure: 1387 // The following code is "failure"; we can't produce a sane translation 1388 // for the given block. It assumes that we haven't modified any of 1389 // our datastructures while processing the current block. 1390 1391 if (!Cache) { 1392 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1393 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1394 Cache = &CacheInfo->NonLocalDeps; 1395 NumSortedEntries = Cache->size(); 1396 } 1397 1398 // Since we failed phi translation, the "Cache" set won't contain all of the 1399 // results for the query. This is ok (we can still use it to accelerate 1400 // specific block queries) but we can't do the fastpath "return all 1401 // results from the set". Clear out the indicator for this. 1402 CacheInfo->Pair = BBSkipFirstBlockPair(); 1403 1404 // If *nothing* works, mark the pointer as unknown. 1405 // 1406 // If this is the magic first block, return this as a clobber of the whole 1407 // incoming value. Since we can't phi translate to one of the predecessors, 1408 // we have to bail out. 1409 if (SkipFirstBlock) 1410 return false; 1411 1412 // Results of invariant loads are not cached thus no need to update cached 1413 // information. 1414 if (!isInvariantLoad) { 1415 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { 1416 if (I.getBB() != BB) 1417 continue; 1418 1419 assert((GotWorklistLimit || I.getResult().isNonLocal() || 1420 !DT.isReachableFromEntry(BB)) && 1421 "Should only be here with transparent block"); 1422 1423 I.setResult(MemDepResult::getUnknown()); 1424 1425 1426 break; 1427 } 1428 } 1429 (void)GotWorklistLimit; 1430 // Go ahead and report unknown dependence. 1431 Result.push_back( 1432 NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); 1433 } 1434 1435 // Okay, we're done now. If we added new values to the cache, re-sort it. 1436 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1437 LLVM_DEBUG(AssertSorted(*Cache)); 1438 return true; 1439 } 1440 1441 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it. 1442 void MemoryDependenceResults::removeCachedNonLocalPointerDependencies( 1443 ValueIsLoadPair P) { 1444 1445 // Most of the time this cache is empty. 1446 if (!NonLocalDefsCache.empty()) { 1447 auto it = NonLocalDefsCache.find(P.getPointer()); 1448 if (it != NonLocalDefsCache.end()) { 1449 RemoveFromReverseMap(ReverseNonLocalDefsCache, 1450 it->second.getResult().getInst(), P.getPointer()); 1451 NonLocalDefsCache.erase(it); 1452 } 1453 1454 if (auto *I = dyn_cast<Instruction>(P.getPointer())) { 1455 auto toRemoveIt = ReverseNonLocalDefsCache.find(I); 1456 if (toRemoveIt != ReverseNonLocalDefsCache.end()) { 1457 for (const auto *entry : toRemoveIt->second) 1458 NonLocalDefsCache.erase(entry); 1459 ReverseNonLocalDefsCache.erase(toRemoveIt); 1460 } 1461 } 1462 } 1463 1464 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); 1465 if (It == NonLocalPointerDeps.end()) 1466 return; 1467 1468 // Remove all of the entries in the BB->val map. This involves removing 1469 // instructions from the reverse map. 1470 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1471 1472 for (const NonLocalDepEntry &DE : PInfo) { 1473 Instruction *Target = DE.getResult().getInst(); 1474 if (!Target) 1475 continue; // Ignore non-local dep results. 1476 assert(Target->getParent() == DE.getBB()); 1477 1478 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1479 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1480 } 1481 1482 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1483 NonLocalPointerDeps.erase(It); 1484 } 1485 1486 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { 1487 // If Ptr isn't really a pointer, just ignore it. 1488 if (!Ptr->getType()->isPointerTy()) 1489 return; 1490 // Flush store info for the pointer. 1491 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1492 // Flush load info for the pointer. 1493 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1494 // Invalidate phis that use the pointer. 1495 PV.invalidateValue(Ptr); 1496 } 1497 1498 void MemoryDependenceResults::invalidateCachedPredecessors() { 1499 PredCache.clear(); 1500 } 1501 1502 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { 1503 // Walk through the Non-local dependencies, removing this one as the value 1504 // for any cached queries. 1505 NonLocalDepMapType::iterator NLDI = NonLocalDepsMap.find(RemInst); 1506 if (NLDI != NonLocalDepsMap.end()) { 1507 NonLocalDepInfo &BlockMap = NLDI->second.first; 1508 for (auto &Entry : BlockMap) 1509 if (Instruction *Inst = Entry.getResult().getInst()) 1510 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1511 NonLocalDepsMap.erase(NLDI); 1512 } 1513 1514 // If we have a cached local dependence query for this instruction, remove it. 1515 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1516 if (LocalDepEntry != LocalDeps.end()) { 1517 // Remove us from DepInst's reverse set now that the local dep info is gone. 1518 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1519 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1520 1521 // Remove this local dependency info. 1522 LocalDeps.erase(LocalDepEntry); 1523 } 1524 1525 // If we have any cached dependencies on this instruction, remove 1526 // them. 1527 1528 // If the instruction is a pointer, remove it from both the load info and the 1529 // store info. 1530 if (RemInst->getType()->isPointerTy()) { 1531 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1532 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1533 } else { 1534 // Otherwise, if the instructions is in the map directly, it must be a load. 1535 // Remove it. 1536 auto toRemoveIt = NonLocalDefsCache.find(RemInst); 1537 if (toRemoveIt != NonLocalDefsCache.end()) { 1538 assert(isa<LoadInst>(RemInst) && 1539 "only load instructions should be added directly"); 1540 const Instruction *DepV = toRemoveIt->second.getResult().getInst(); 1541 ReverseNonLocalDefsCache.find(DepV)->second.erase(RemInst); 1542 NonLocalDefsCache.erase(toRemoveIt); 1543 } 1544 } 1545 1546 // Loop over all of the things that depend on the instruction we're removing. 1547 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; 1548 1549 // If we find RemInst as a clobber or Def in any of the maps for other values, 1550 // we need to replace its entry with a dirty version of the instruction after 1551 // it. If RemInst is a terminator, we use a null dirty value. 1552 // 1553 // Using a dirty version of the instruction after RemInst saves having to scan 1554 // the entire block to get to this point. 1555 MemDepResult NewDirtyVal; 1556 if (!RemInst->isTerminator()) 1557 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); 1558 1559 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1560 if (ReverseDepIt != ReverseLocalDeps.end()) { 1561 // RemInst can't be the terminator if it has local stuff depending on it. 1562 assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() && 1563 "Nothing can locally depend on a terminator"); 1564 1565 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1566 assert(InstDependingOnRemInst != RemInst && 1567 "Already removed our local dep info"); 1568 1569 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1570 1571 // Make sure to remember that new things depend on NewDepInst. 1572 assert(NewDirtyVal.getInst() && 1573 "There is no way something else can have " 1574 "a local dep on this if it is a terminator!"); 1575 ReverseDepsToAdd.push_back( 1576 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); 1577 } 1578 1579 ReverseLocalDeps.erase(ReverseDepIt); 1580 1581 // Add new reverse deps after scanning the set, to avoid invalidating the 1582 // 'ReverseDeps' reference. 1583 while (!ReverseDepsToAdd.empty()) { 1584 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( 1585 ReverseDepsToAdd.back().second); 1586 ReverseDepsToAdd.pop_back(); 1587 } 1588 } 1589 1590 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1591 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1592 for (Instruction *I : ReverseDepIt->second) { 1593 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1594 1595 PerInstNLInfo &INLD = NonLocalDepsMap[I]; 1596 // The information is now dirty! 1597 INLD.second = true; 1598 1599 for (auto &Entry : INLD.first) { 1600 if (Entry.getResult().getInst() != RemInst) 1601 continue; 1602 1603 // Convert to a dirty entry for the subsequent instruction. 1604 Entry.setResult(NewDirtyVal); 1605 1606 if (Instruction *NextI = NewDirtyVal.getInst()) 1607 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1608 } 1609 } 1610 1611 ReverseNonLocalDeps.erase(ReverseDepIt); 1612 1613 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1614 while (!ReverseDepsToAdd.empty()) { 1615 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( 1616 ReverseDepsToAdd.back().second); 1617 ReverseDepsToAdd.pop_back(); 1618 } 1619 } 1620 1621 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1622 // value in the NonLocalPointerDeps info. 1623 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1624 ReverseNonLocalPtrDeps.find(RemInst); 1625 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1626 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> 1627 ReversePtrDepsToAdd; 1628 1629 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1630 assert(P.getPointer() != RemInst && 1631 "Already removed NonLocalPointerDeps info for RemInst"); 1632 1633 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1634 1635 // The cache is not valid for any specific block anymore. 1636 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1637 1638 // Update any entries for RemInst to use the instruction after it. 1639 for (auto &Entry : NLPDI) { 1640 if (Entry.getResult().getInst() != RemInst) 1641 continue; 1642 1643 // Convert to a dirty entry for the subsequent instruction. 1644 Entry.setResult(NewDirtyVal); 1645 1646 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1647 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1648 } 1649 1650 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1651 // subsequent value may invalidate the sortedness. 1652 llvm::sort(NLPDI); 1653 } 1654 1655 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1656 1657 while (!ReversePtrDepsToAdd.empty()) { 1658 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( 1659 ReversePtrDepsToAdd.back().second); 1660 ReversePtrDepsToAdd.pop_back(); 1661 } 1662 } 1663 1664 // Invalidate phis that use the removed instruction. 1665 PV.invalidateValue(RemInst); 1666 1667 assert(!NonLocalDepsMap.count(RemInst) && "RemInst got reinserted?"); 1668 LLVM_DEBUG(verifyRemoved(RemInst)); 1669 } 1670 1671 /// Verify that the specified instruction does not occur in our internal data 1672 /// structures. 1673 /// 1674 /// This function verifies by asserting in debug builds. 1675 void MemoryDependenceResults::verifyRemoved(Instruction *D) const { 1676 #ifndef NDEBUG 1677 for (const auto &DepKV : LocalDeps) { 1678 assert(DepKV.first != D && "Inst occurs in data structures"); 1679 assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); 1680 } 1681 1682 for (const auto &DepKV : NonLocalPointerDeps) { 1683 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); 1684 for (const auto &Entry : DepKV.second.NonLocalDeps) 1685 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); 1686 } 1687 1688 for (const auto &DepKV : NonLocalDepsMap) { 1689 assert(DepKV.first != D && "Inst occurs in data structures"); 1690 const PerInstNLInfo &INLD = DepKV.second; 1691 for (const auto &Entry : INLD.first) 1692 assert(Entry.getResult().getInst() != D && 1693 "Inst occurs in data structures"); 1694 } 1695 1696 for (const auto &DepKV : ReverseLocalDeps) { 1697 assert(DepKV.first != D && "Inst occurs in data structures"); 1698 for (Instruction *Inst : DepKV.second) 1699 assert(Inst != D && "Inst occurs in data structures"); 1700 } 1701 1702 for (const auto &DepKV : ReverseNonLocalDeps) { 1703 assert(DepKV.first != D && "Inst occurs in data structures"); 1704 for (Instruction *Inst : DepKV.second) 1705 assert(Inst != D && "Inst occurs in data structures"); 1706 } 1707 1708 for (const auto &DepKV : ReverseNonLocalPtrDeps) { 1709 assert(DepKV.first != D && "Inst occurs in rev NLPD map"); 1710 1711 for (ValueIsLoadPair P : DepKV.second) 1712 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && 1713 "Inst occurs in ReverseNonLocalPtrDeps map"); 1714 } 1715 #endif 1716 } 1717 1718 AnalysisKey MemoryDependenceAnalysis::Key; 1719 1720 MemoryDependenceAnalysis::MemoryDependenceAnalysis() 1721 : DefaultBlockScanLimit(BlockScanLimit) {} 1722 1723 MemoryDependenceResults 1724 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1725 auto &AA = AM.getResult<AAManager>(F); 1726 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1727 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1728 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1729 auto &PV = AM.getResult<PhiValuesAnalysis>(F); 1730 return MemoryDependenceResults(AA, AC, TLI, DT, PV, DefaultBlockScanLimit); 1731 } 1732 1733 char MemoryDependenceWrapperPass::ID = 0; 1734 1735 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep", 1736 "Memory Dependence Analysis", false, true) 1737 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1738 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1739 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1740 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1741 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1742 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep", 1743 "Memory Dependence Analysis", false, true) 1744 1745 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) { 1746 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry()); 1747 } 1748 1749 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default; 1750 1751 void MemoryDependenceWrapperPass::releaseMemory() { 1752 MemDep.reset(); 1753 } 1754 1755 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1756 AU.setPreservesAll(); 1757 AU.addRequired<AssumptionCacheTracker>(); 1758 AU.addRequired<DominatorTreeWrapperPass>(); 1759 AU.addRequired<PhiValuesWrapperPass>(); 1760 AU.addRequiredTransitive<AAResultsWrapperPass>(); 1761 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1762 } 1763 1764 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA, 1765 FunctionAnalysisManager::Invalidator &Inv) { 1766 // Check whether our analysis is preserved. 1767 auto PAC = PA.getChecker<MemoryDependenceAnalysis>(); 1768 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>()) 1769 // If not, give up now. 1770 return true; 1771 1772 // Check whether the analyses we depend on became invalid for any reason. 1773 if (Inv.invalidate<AAManager>(F, PA) || 1774 Inv.invalidate<AssumptionAnalysis>(F, PA) || 1775 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 1776 Inv.invalidate<PhiValuesAnalysis>(F, PA)) 1777 return true; 1778 1779 // Otherwise this analysis result remains valid. 1780 return false; 1781 } 1782 1783 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const { 1784 return DefaultBlockScanLimit; 1785 } 1786 1787 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { 1788 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1789 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1790 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1791 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1792 auto &PV = getAnalysis<PhiValuesWrapperPass>().getResult(); 1793 MemDep.emplace(AA, AC, TLI, DT, PV, BlockScanLimit); 1794 return false; 1795 } 1796