1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file promotes memory references to be register references. It promotes 10 // alloca instructions which only have loads and stores as uses. An alloca is 11 // transformed by using iterated dominator frontiers to place PHI nodes, then 12 // traversing the function in depth-first order to rewrite loads and stores as 13 // appropriate. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/ADT/TinyPtrVector.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/IteratedDominanceFrontier.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DIBuilder.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/InstrTypes.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/LLVMContext.h" 44 #include "llvm/IR/Module.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/User.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 49 #include <algorithm> 50 #include <cassert> 51 #include <iterator> 52 #include <utility> 53 #include <vector> 54 55 using namespace llvm; 56 57 #define DEBUG_TYPE "mem2reg" 58 59 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block"); 60 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); 61 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); 62 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); 63 64 bool llvm::isAllocaPromotable(const AllocaInst *AI) { 65 // FIXME: If the memory unit is of pointer or integer type, we can permit 66 // assignments to subsections of the memory unit. 67 unsigned AS = AI->getType()->getAddressSpace(); 68 69 // Only allow direct and non-volatile loads and stores... 70 for (const User *U : AI->users()) { 71 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 72 // Note that atomic loads can be transformed; atomic semantics do 73 // not have any meaning for a local alloca. 74 if (LI->isVolatile()) 75 return false; 76 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 77 if (SI->getOperand(0) == AI) 78 return false; // Don't allow a store OF the AI, only INTO the AI. 79 // Note that atomic stores can be transformed; atomic semantics do 80 // not have any meaning for a local alloca. 81 if (SI->isVolatile()) 82 return false; 83 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 84 if (!II->isLifetimeStartOrEnd()) 85 return false; 86 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 87 if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS)) 88 return false; 89 if (!onlyUsedByLifetimeMarkers(BCI)) 90 return false; 91 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 92 if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS)) 93 return false; 94 if (!GEPI->hasAllZeroIndices()) 95 return false; 96 if (!onlyUsedByLifetimeMarkers(GEPI)) 97 return false; 98 } else { 99 return false; 100 } 101 } 102 103 return true; 104 } 105 106 namespace { 107 108 struct AllocaInfo { 109 SmallVector<BasicBlock *, 32> DefiningBlocks; 110 SmallVector<BasicBlock *, 32> UsingBlocks; 111 112 StoreInst *OnlyStore; 113 BasicBlock *OnlyBlock; 114 bool OnlyUsedInOneBlock; 115 116 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares; 117 118 void clear() { 119 DefiningBlocks.clear(); 120 UsingBlocks.clear(); 121 OnlyStore = nullptr; 122 OnlyBlock = nullptr; 123 OnlyUsedInOneBlock = true; 124 DbgDeclares.clear(); 125 } 126 127 /// Scan the uses of the specified alloca, filling in the AllocaInfo used 128 /// by the rest of the pass to reason about the uses of this alloca. 129 void AnalyzeAlloca(AllocaInst *AI) { 130 clear(); 131 132 // As we scan the uses of the alloca instruction, keep track of stores, 133 // and decide whether all of the loads and stores to the alloca are within 134 // the same basic block. 135 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 136 Instruction *User = cast<Instruction>(*UI++); 137 138 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 139 // Remember the basic blocks which define new values for the alloca 140 DefiningBlocks.push_back(SI->getParent()); 141 OnlyStore = SI; 142 } else { 143 LoadInst *LI = cast<LoadInst>(User); 144 // Otherwise it must be a load instruction, keep track of variable 145 // reads. 146 UsingBlocks.push_back(LI->getParent()); 147 } 148 149 if (OnlyUsedInOneBlock) { 150 if (!OnlyBlock) 151 OnlyBlock = User->getParent(); 152 else if (OnlyBlock != User->getParent()) 153 OnlyUsedInOneBlock = false; 154 } 155 } 156 157 DbgDeclares = FindDbgAddrUses(AI); 158 } 159 }; 160 161 /// Data package used by RenamePass(). 162 struct RenamePassData { 163 using ValVector = std::vector<Value *>; 164 using LocationVector = std::vector<DebugLoc>; 165 166 RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L) 167 : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {} 168 169 BasicBlock *BB; 170 BasicBlock *Pred; 171 ValVector Values; 172 LocationVector Locations; 173 }; 174 175 /// This assigns and keeps a per-bb relative ordering of load/store 176 /// instructions in the block that directly load or store an alloca. 177 /// 178 /// This functionality is important because it avoids scanning large basic 179 /// blocks multiple times when promoting many allocas in the same block. 180 class LargeBlockInfo { 181 /// For each instruction that we track, keep the index of the 182 /// instruction. 183 /// 184 /// The index starts out as the number of the instruction from the start of 185 /// the block. 186 DenseMap<const Instruction *, unsigned> InstNumbers; 187 188 public: 189 190 /// This code only looks at accesses to allocas. 191 static bool isInterestingInstruction(const Instruction *I) { 192 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) || 193 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); 194 } 195 196 /// Get or calculate the index of the specified instruction. 197 unsigned getInstructionIndex(const Instruction *I) { 198 assert(isInterestingInstruction(I) && 199 "Not a load/store to/from an alloca?"); 200 201 // If we already have this instruction number, return it. 202 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I); 203 if (It != InstNumbers.end()) 204 return It->second; 205 206 // Scan the whole block to get the instruction. This accumulates 207 // information for every interesting instruction in the block, in order to 208 // avoid gratuitus rescans. 209 const BasicBlock *BB = I->getParent(); 210 unsigned InstNo = 0; 211 for (const Instruction &BBI : *BB) 212 if (isInterestingInstruction(&BBI)) 213 InstNumbers[&BBI] = InstNo++; 214 It = InstNumbers.find(I); 215 216 assert(It != InstNumbers.end() && "Didn't insert instruction?"); 217 return It->second; 218 } 219 220 void deleteValue(const Instruction *I) { InstNumbers.erase(I); } 221 222 void clear() { InstNumbers.clear(); } 223 }; 224 225 struct PromoteMem2Reg { 226 /// The alloca instructions being promoted. 227 std::vector<AllocaInst *> Allocas; 228 229 DominatorTree &DT; 230 DIBuilder DIB; 231 232 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction. 233 AssumptionCache *AC; 234 235 const SimplifyQuery SQ; 236 237 /// Reverse mapping of Allocas. 238 DenseMap<AllocaInst *, unsigned> AllocaLookup; 239 240 /// The PhiNodes we're adding. 241 /// 242 /// That map is used to simplify some Phi nodes as we iterate over it, so 243 /// it should have deterministic iterators. We could use a MapVector, but 244 /// since we already maintain a map from BasicBlock* to a stable numbering 245 /// (BBNumbers), the DenseMap is more efficient (also supports removal). 246 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes; 247 248 /// For each PHI node, keep track of which entry in Allocas it corresponds 249 /// to. 250 DenseMap<PHINode *, unsigned> PhiToAllocaMap; 251 252 /// For each alloca, we keep track of the dbg.declare intrinsic that 253 /// describes it, if any, so that we can convert it to a dbg.value 254 /// intrinsic if the alloca gets promoted. 255 SmallVector<TinyPtrVector<DbgVariableIntrinsic *>, 8> AllocaDbgDeclares; 256 257 /// The set of basic blocks the renamer has already visited. 258 SmallPtrSet<BasicBlock *, 16> Visited; 259 260 /// Contains a stable numbering of basic blocks to avoid non-determinstic 261 /// behavior. 262 DenseMap<BasicBlock *, unsigned> BBNumbers; 263 264 /// Lazily compute the number of predecessors a block has. 265 DenseMap<const BasicBlock *, unsigned> BBNumPreds; 266 267 public: 268 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 269 AssumptionCache *AC) 270 : Allocas(Allocas.begin(), Allocas.end()), DT(DT), 271 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false), 272 AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(), 273 nullptr, &DT, AC) {} 274 275 void run(); 276 277 private: 278 void RemoveFromAllocasList(unsigned &AllocaIdx) { 279 Allocas[AllocaIdx] = Allocas.back(); 280 Allocas.pop_back(); 281 --AllocaIdx; 282 } 283 284 unsigned getNumPreds(const BasicBlock *BB) { 285 unsigned &NP = BBNumPreds[BB]; 286 if (NP == 0) 287 NP = pred_size(BB) + 1; 288 return NP - 1; 289 } 290 291 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 292 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 293 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks); 294 void RenamePass(BasicBlock *BB, BasicBlock *Pred, 295 RenamePassData::ValVector &IncVals, 296 RenamePassData::LocationVector &IncLocs, 297 std::vector<RenamePassData> &Worklist); 298 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version); 299 }; 300 301 } // end anonymous namespace 302 303 /// Given a LoadInst LI this adds assume(LI != null) after it. 304 static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) { 305 Function *AssumeIntrinsic = 306 Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume); 307 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI, 308 Constant::getNullValue(LI->getType())); 309 LoadNotNull->insertAfter(LI); 310 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull}); 311 CI->insertAfter(LoadNotNull); 312 AC->registerAssumption(CI); 313 } 314 315 static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { 316 // Knowing that this alloca is promotable, we know that it's safe to kill all 317 // instructions except for load and store. 318 319 for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { 320 Instruction *I = cast<Instruction>(*UI); 321 ++UI; 322 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 323 continue; 324 325 if (!I->getType()->isVoidTy()) { 326 // The only users of this bitcast/GEP instruction are lifetime intrinsics. 327 // Follow the use/def chain to erase them now instead of leaving it for 328 // dead code elimination later. 329 for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) { 330 Instruction *Inst = cast<Instruction>(*UUI); 331 ++UUI; 332 Inst->eraseFromParent(); 333 } 334 } 335 I->eraseFromParent(); 336 } 337 } 338 339 /// Rewrite as many loads as possible given a single store. 340 /// 341 /// When there is only a single store, we can use the domtree to trivially 342 /// replace all of the dominated loads with the stored value. Do so, and return 343 /// true if this has successfully promoted the alloca entirely. If this returns 344 /// false there were some loads which were not dominated by the single store 345 /// and thus must be phi-ed with undef. We fall back to the standard alloca 346 /// promotion algorithm in that case. 347 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, 348 LargeBlockInfo &LBI, const DataLayout &DL, 349 DominatorTree &DT, AssumptionCache *AC) { 350 StoreInst *OnlyStore = Info.OnlyStore; 351 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0)); 352 BasicBlock *StoreBB = OnlyStore->getParent(); 353 int StoreIndex = -1; 354 355 // Clear out UsingBlocks. We will reconstruct it here if needed. 356 Info.UsingBlocks.clear(); 357 358 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 359 Instruction *UserInst = cast<Instruction>(*UI++); 360 if (UserInst == OnlyStore) 361 continue; 362 LoadInst *LI = cast<LoadInst>(UserInst); 363 364 // Okay, if we have a load from the alloca, we want to replace it with the 365 // only value stored to the alloca. We can do this if the value is 366 // dominated by the store. If not, we use the rest of the mem2reg machinery 367 // to insert the phi nodes as needed. 368 if (!StoringGlobalVal) { // Non-instructions are always dominated. 369 if (LI->getParent() == StoreBB) { 370 // If we have a use that is in the same block as the store, compare the 371 // indices of the two instructions to see which one came first. If the 372 // load came before the store, we can't handle it. 373 if (StoreIndex == -1) 374 StoreIndex = LBI.getInstructionIndex(OnlyStore); 375 376 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) { 377 // Can't handle this load, bail out. 378 Info.UsingBlocks.push_back(StoreBB); 379 continue; 380 } 381 } else if (!DT.dominates(StoreBB, LI->getParent())) { 382 // If the load and store are in different blocks, use BB dominance to 383 // check their relationships. If the store doesn't dom the use, bail 384 // out. 385 Info.UsingBlocks.push_back(LI->getParent()); 386 continue; 387 } 388 } 389 390 // Otherwise, we *can* safely rewrite this load. 391 Value *ReplVal = OnlyStore->getOperand(0); 392 // If the replacement value is the load, this must occur in unreachable 393 // code. 394 if (ReplVal == LI) 395 ReplVal = UndefValue::get(LI->getType()); 396 397 // If the load was marked as nonnull we don't want to lose 398 // that information when we erase this Load. So we preserve 399 // it with an assume. 400 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 401 !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) 402 addAssumeNonNull(AC, LI); 403 404 LI->replaceAllUsesWith(ReplVal); 405 LI->eraseFromParent(); 406 LBI.deleteValue(LI); 407 } 408 409 // Finally, after the scan, check to see if the store is all that is left. 410 if (!Info.UsingBlocks.empty()) 411 return false; // If not, we'll have to fall back for the remainder. 412 413 // Record debuginfo for the store and remove the declaration's 414 // debuginfo. 415 for (DbgVariableIntrinsic *DII : Info.DbgDeclares) { 416 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 417 ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB); 418 DII->eraseFromParent(); 419 } 420 // Remove the (now dead) store and alloca. 421 Info.OnlyStore->eraseFromParent(); 422 LBI.deleteValue(Info.OnlyStore); 423 424 AI->eraseFromParent(); 425 return true; 426 } 427 428 /// Many allocas are only used within a single basic block. If this is the 429 /// case, avoid traversing the CFG and inserting a lot of potentially useless 430 /// PHI nodes by just performing a single linear pass over the basic block 431 /// using the Alloca. 432 /// 433 /// If we cannot promote this alloca (because it is read before it is written), 434 /// return false. This is necessary in cases where, due to control flow, the 435 /// alloca is undefined only on some control flow paths. e.g. code like 436 /// this is correct in LLVM IR: 437 /// // A is an alloca with no stores so far 438 /// for (...) { 439 /// int t = *A; 440 /// if (!first_iteration) 441 /// use(t); 442 /// *A = 42; 443 /// } 444 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, 445 LargeBlockInfo &LBI, 446 const DataLayout &DL, 447 DominatorTree &DT, 448 AssumptionCache *AC) { 449 // The trickiest case to handle is when we have large blocks. Because of this, 450 // this code is optimized assuming that large blocks happen. This does not 451 // significantly pessimize the small block case. This uses LargeBlockInfo to 452 // make it efficient to get the index of various operations in the block. 453 454 // Walk the use-def list of the alloca, getting the locations of all stores. 455 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>; 456 StoresByIndexTy StoresByIndex; 457 458 for (User *U : AI->users()) 459 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 460 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI)); 461 462 // Sort the stores by their index, making it efficient to do a lookup with a 463 // binary search. 464 llvm::sort(StoresByIndex, less_first()); 465 466 // Walk all of the loads from this alloca, replacing them with the nearest 467 // store above them, if any. 468 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 469 LoadInst *LI = dyn_cast<LoadInst>(*UI++); 470 if (!LI) 471 continue; 472 473 unsigned LoadIdx = LBI.getInstructionIndex(LI); 474 475 // Find the nearest store that has a lower index than this load. 476 StoresByIndexTy::iterator I = llvm::lower_bound( 477 StoresByIndex, 478 std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)), 479 less_first()); 480 if (I == StoresByIndex.begin()) { 481 if (StoresByIndex.empty()) 482 // If there are no stores, the load takes the undef value. 483 LI->replaceAllUsesWith(UndefValue::get(LI->getType())); 484 else 485 // There is no store before this load, bail out (load may be affected 486 // by the following stores - see main comment). 487 return false; 488 } else { 489 // Otherwise, there was a store before this load, the load takes its value. 490 // Note, if the load was marked as nonnull we don't want to lose that 491 // information when we erase it. So we preserve it with an assume. 492 Value *ReplVal = std::prev(I)->second->getOperand(0); 493 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 494 !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) 495 addAssumeNonNull(AC, LI); 496 497 // If the replacement value is the load, this must occur in unreachable 498 // code. 499 if (ReplVal == LI) 500 ReplVal = UndefValue::get(LI->getType()); 501 502 LI->replaceAllUsesWith(ReplVal); 503 } 504 505 LI->eraseFromParent(); 506 LBI.deleteValue(LI); 507 } 508 509 // Remove the (now dead) stores and alloca. 510 while (!AI->use_empty()) { 511 StoreInst *SI = cast<StoreInst>(AI->user_back()); 512 // Record debuginfo for the store before removing it. 513 for (DbgVariableIntrinsic *DII : Info.DbgDeclares) { 514 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 515 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 516 } 517 SI->eraseFromParent(); 518 LBI.deleteValue(SI); 519 } 520 521 AI->eraseFromParent(); 522 523 // The alloca's debuginfo can be removed as well. 524 for (DbgVariableIntrinsic *DII : Info.DbgDeclares) 525 DII->eraseFromParent(); 526 527 ++NumLocalPromoted; 528 return true; 529 } 530 531 void PromoteMem2Reg::run() { 532 Function &F = *DT.getRoot()->getParent(); 533 534 AllocaDbgDeclares.resize(Allocas.size()); 535 536 AllocaInfo Info; 537 LargeBlockInfo LBI; 538 ForwardIDFCalculator IDF(DT); 539 540 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { 541 AllocaInst *AI = Allocas[AllocaNum]; 542 543 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!"); 544 assert(AI->getParent()->getParent() == &F && 545 "All allocas should be in the same function, which is same as DF!"); 546 547 removeLifetimeIntrinsicUsers(AI); 548 549 if (AI->use_empty()) { 550 // If there are no uses of the alloca, just delete it now. 551 AI->eraseFromParent(); 552 553 // Remove the alloca from the Allocas list, since it has been processed 554 RemoveFromAllocasList(AllocaNum); 555 ++NumDeadAlloca; 556 continue; 557 } 558 559 // Calculate the set of read and write-locations for each alloca. This is 560 // analogous to finding the 'uses' and 'definitions' of each variable. 561 Info.AnalyzeAlloca(AI); 562 563 // If there is only a single store to this value, replace any loads of 564 // it that are directly dominated by the definition with the value stored. 565 if (Info.DefiningBlocks.size() == 1) { 566 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { 567 // The alloca has been processed, move on. 568 RemoveFromAllocasList(AllocaNum); 569 ++NumSingleStore; 570 continue; 571 } 572 } 573 574 // If the alloca is only read and written in one basic block, just perform a 575 // linear sweep over the block to eliminate it. 576 if (Info.OnlyUsedInOneBlock && 577 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { 578 // The alloca has been processed, move on. 579 RemoveFromAllocasList(AllocaNum); 580 continue; 581 } 582 583 // If we haven't computed a numbering for the BB's in the function, do so 584 // now. 585 if (BBNumbers.empty()) { 586 unsigned ID = 0; 587 for (auto &BB : F) 588 BBNumbers[&BB] = ID++; 589 } 590 591 // Remember the dbg.declare intrinsic describing this alloca, if any. 592 if (!Info.DbgDeclares.empty()) 593 AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares; 594 595 // Keep the reverse mapping of the 'Allocas' array for the rename pass. 596 AllocaLookup[Allocas[AllocaNum]] = AllocaNum; 597 598 // At this point, we're committed to promoting the alloca using IDF's, and 599 // the standard SSA construction algorithm. Determine which blocks need PHI 600 // nodes and see if we can optimize out some work by avoiding insertion of 601 // dead phi nodes. 602 603 // Unique the set of defining blocks for efficient lookup. 604 SmallPtrSet<BasicBlock *, 32> DefBlocks(Info.DefiningBlocks.begin(), 605 Info.DefiningBlocks.end()); 606 607 // Determine which blocks the value is live in. These are blocks which lead 608 // to uses. 609 SmallPtrSet<BasicBlock *, 32> LiveInBlocks; 610 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); 611 612 // At this point, we're committed to promoting the alloca using IDF's, and 613 // the standard SSA construction algorithm. Determine which blocks need phi 614 // nodes and see if we can optimize out some work by avoiding insertion of 615 // dead phi nodes. 616 IDF.setLiveInBlocks(LiveInBlocks); 617 IDF.setDefiningBlocks(DefBlocks); 618 SmallVector<BasicBlock *, 32> PHIBlocks; 619 IDF.calculate(PHIBlocks); 620 llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) { 621 return BBNumbers.find(A)->second < BBNumbers.find(B)->second; 622 }); 623 624 unsigned CurrentVersion = 0; 625 for (BasicBlock *BB : PHIBlocks) 626 QueuePhiNode(BB, AllocaNum, CurrentVersion); 627 } 628 629 if (Allocas.empty()) 630 return; // All of the allocas must have been trivial! 631 632 LBI.clear(); 633 634 // Set the incoming values for the basic block to be null values for all of 635 // the alloca's. We do this in case there is a load of a value that has not 636 // been stored yet. In this case, it will get this null value. 637 RenamePassData::ValVector Values(Allocas.size()); 638 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) 639 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType()); 640 641 // When handling debug info, treat all incoming values as if they have unknown 642 // locations until proven otherwise. 643 RenamePassData::LocationVector Locations(Allocas.size()); 644 645 // Walks all basic blocks in the function performing the SSA rename algorithm 646 // and inserting the phi nodes we marked as necessary 647 std::vector<RenamePassData> RenamePassWorkList; 648 RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values), 649 std::move(Locations)); 650 do { 651 RenamePassData RPD = std::move(RenamePassWorkList.back()); 652 RenamePassWorkList.pop_back(); 653 // RenamePass may add new worklist entries. 654 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList); 655 } while (!RenamePassWorkList.empty()); 656 657 // The renamer uses the Visited set to avoid infinite loops. Clear it now. 658 Visited.clear(); 659 660 // Remove the allocas themselves from the function. 661 for (Instruction *A : Allocas) { 662 // If there are any uses of the alloca instructions left, they must be in 663 // unreachable basic blocks that were not processed by walking the dominator 664 // tree. Just delete the users now. 665 if (!A->use_empty()) 666 A->replaceAllUsesWith(UndefValue::get(A->getType())); 667 A->eraseFromParent(); 668 } 669 670 // Remove alloca's dbg.declare instrinsics from the function. 671 for (auto &Declares : AllocaDbgDeclares) 672 for (auto *DII : Declares) 673 DII->eraseFromParent(); 674 675 // Loop over all of the PHI nodes and see if there are any that we can get 676 // rid of because they merge all of the same incoming values. This can 677 // happen due to undef values coming into the PHI nodes. This process is 678 // iterative, because eliminating one PHI node can cause others to be removed. 679 bool EliminatedAPHI = true; 680 while (EliminatedAPHI) { 681 EliminatedAPHI = false; 682 683 // Iterating over NewPhiNodes is deterministic, so it is safe to try to 684 // simplify and RAUW them as we go. If it was not, we could add uses to 685 // the values we replace with in a non-deterministic order, thus creating 686 // non-deterministic def->use chains. 687 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 688 I = NewPhiNodes.begin(), 689 E = NewPhiNodes.end(); 690 I != E;) { 691 PHINode *PN = I->second; 692 693 // If this PHI node merges one value and/or undefs, get the value. 694 if (Value *V = SimplifyInstruction(PN, SQ)) { 695 PN->replaceAllUsesWith(V); 696 PN->eraseFromParent(); 697 NewPhiNodes.erase(I++); 698 EliminatedAPHI = true; 699 continue; 700 } 701 ++I; 702 } 703 } 704 705 // At this point, the renamer has added entries to PHI nodes for all reachable 706 // code. Unfortunately, there may be unreachable blocks which the renamer 707 // hasn't traversed. If this is the case, the PHI nodes may not 708 // have incoming values for all predecessors. Loop over all PHI nodes we have 709 // created, inserting undef values if they are missing any incoming values. 710 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 711 I = NewPhiNodes.begin(), 712 E = NewPhiNodes.end(); 713 I != E; ++I) { 714 // We want to do this once per basic block. As such, only process a block 715 // when we find the PHI that is the first entry in the block. 716 PHINode *SomePHI = I->second; 717 BasicBlock *BB = SomePHI->getParent(); 718 if (&BB->front() != SomePHI) 719 continue; 720 721 // Only do work here if there the PHI nodes are missing incoming values. We 722 // know that all PHI nodes that were inserted in a block will have the same 723 // number of incoming values, so we can just check any of them. 724 if (SomePHI->getNumIncomingValues() == getNumPreds(BB)) 725 continue; 726 727 // Get the preds for BB. 728 SmallVector<BasicBlock *, 16> Preds(pred_begin(BB), pred_end(BB)); 729 730 // Ok, now we know that all of the PHI nodes are missing entries for some 731 // basic blocks. Start by sorting the incoming predecessors for efficient 732 // access. 733 auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) { 734 return BBNumbers.find(A)->second < BBNumbers.find(B)->second; 735 }; 736 llvm::sort(Preds, CompareBBNumbers); 737 738 // Now we loop through all BB's which have entries in SomePHI and remove 739 // them from the Preds list. 740 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) { 741 // Do a log(n) search of the Preds list for the entry we want. 742 SmallVectorImpl<BasicBlock *>::iterator EntIt = llvm::lower_bound( 743 Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers); 744 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) && 745 "PHI node has entry for a block which is not a predecessor!"); 746 747 // Remove the entry 748 Preds.erase(EntIt); 749 } 750 751 // At this point, the blocks left in the preds list must have dummy 752 // entries inserted into every PHI nodes for the block. Update all the phi 753 // nodes in this block that we are inserting (there could be phis before 754 // mem2reg runs). 755 unsigned NumBadPreds = SomePHI->getNumIncomingValues(); 756 BasicBlock::iterator BBI = BB->begin(); 757 while ((SomePHI = dyn_cast<PHINode>(BBI++)) && 758 SomePHI->getNumIncomingValues() == NumBadPreds) { 759 Value *UndefVal = UndefValue::get(SomePHI->getType()); 760 for (BasicBlock *Pred : Preds) 761 SomePHI->addIncoming(UndefVal, Pred); 762 } 763 } 764 765 NewPhiNodes.clear(); 766 } 767 768 /// Determine which blocks the value is live in. 769 /// 770 /// These are blocks which lead to uses. Knowing this allows us to avoid 771 /// inserting PHI nodes into blocks which don't lead to uses (thus, the 772 /// inserted phi nodes would be dead). 773 void PromoteMem2Reg::ComputeLiveInBlocks( 774 AllocaInst *AI, AllocaInfo &Info, 775 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 776 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) { 777 // To determine liveness, we must iterate through the predecessors of blocks 778 // where the def is live. Blocks are added to the worklist if we need to 779 // check their predecessors. Start with all the using blocks. 780 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(), 781 Info.UsingBlocks.end()); 782 783 // If any of the using blocks is also a definition block, check to see if the 784 // definition occurs before or after the use. If it happens before the use, 785 // the value isn't really live-in. 786 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) { 787 BasicBlock *BB = LiveInBlockWorklist[i]; 788 if (!DefBlocks.count(BB)) 789 continue; 790 791 // Okay, this is a block that both uses and defines the value. If the first 792 // reference to the alloca is a def (store), then we know it isn't live-in. 793 for (BasicBlock::iterator I = BB->begin();; ++I) { 794 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 795 if (SI->getOperand(1) != AI) 796 continue; 797 798 // We found a store to the alloca before a load. The alloca is not 799 // actually live-in here. 800 LiveInBlockWorklist[i] = LiveInBlockWorklist.back(); 801 LiveInBlockWorklist.pop_back(); 802 --i; 803 --e; 804 break; 805 } 806 807 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 808 // Okay, we found a load before a store to the alloca. It is actually 809 // live into this block. 810 if (LI->getOperand(0) == AI) 811 break; 812 } 813 } 814 815 // Now that we have a set of blocks where the phi is live-in, recursively add 816 // their predecessors until we find the full region the value is live. 817 while (!LiveInBlockWorklist.empty()) { 818 BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); 819 820 // The block really is live in here, insert it into the set. If already in 821 // the set, then it has already been processed. 822 if (!LiveInBlocks.insert(BB).second) 823 continue; 824 825 // Since the value is live into BB, it is either defined in a predecessor or 826 // live into it to. Add the preds to the worklist unless they are a 827 // defining block. 828 for (BasicBlock *P : predecessors(BB)) { 829 // The value is not live into a predecessor if it defines the value. 830 if (DefBlocks.count(P)) 831 continue; 832 833 // Otherwise it is, add to the worklist. 834 LiveInBlockWorklist.push_back(P); 835 } 836 } 837 } 838 839 /// Queue a phi-node to be added to a basic-block for a specific Alloca. 840 /// 841 /// Returns true if there wasn't already a phi-node for that variable 842 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, 843 unsigned &Version) { 844 // Look up the basic-block in question. 845 PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)]; 846 847 // If the BB already has a phi node added for the i'th alloca then we're done! 848 if (PN) 849 return false; 850 851 // Create a PhiNode using the dereferenced type... and add the phi-node to the 852 // BasicBlock. 853 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB), 854 Allocas[AllocaNo]->getName() + "." + Twine(Version++), 855 &BB->front()); 856 ++NumPHIInsert; 857 PhiToAllocaMap[PN] = AllocaNo; 858 return true; 859 } 860 861 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to 862 /// create a merged location incorporating \p DL, or to set \p DL directly. 863 static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, 864 bool ApplyMergedLoc) { 865 if (ApplyMergedLoc) 866 PN->applyMergedLocation(PN->getDebugLoc(), DL); 867 else 868 PN->setDebugLoc(DL); 869 } 870 871 /// Recursively traverse the CFG of the function, renaming loads and 872 /// stores to the allocas which we are promoting. 873 /// 874 /// IncomingVals indicates what value each Alloca contains on exit from the 875 /// predecessor block Pred. 876 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred, 877 RenamePassData::ValVector &IncomingVals, 878 RenamePassData::LocationVector &IncomingLocs, 879 std::vector<RenamePassData> &Worklist) { 880 NextIteration: 881 // If we are inserting any phi nodes into this BB, they will already be in the 882 // block. 883 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) { 884 // If we have PHI nodes to update, compute the number of edges from Pred to 885 // BB. 886 if (PhiToAllocaMap.count(APN)) { 887 // We want to be able to distinguish between PHI nodes being inserted by 888 // this invocation of mem2reg from those phi nodes that already existed in 889 // the IR before mem2reg was run. We determine that APN is being inserted 890 // because it is missing incoming edges. All other PHI nodes being 891 // inserted by this pass of mem2reg will have the same number of incoming 892 // operands so far. Remember this count. 893 unsigned NewPHINumOperands = APN->getNumOperands(); 894 895 unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB); 896 assert(NumEdges && "Must be at least one edge from Pred to BB!"); 897 898 // Add entries for all the phis. 899 BasicBlock::iterator PNI = BB->begin(); 900 do { 901 unsigned AllocaNo = PhiToAllocaMap[APN]; 902 903 // Update the location of the phi node. 904 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo], 905 APN->getNumIncomingValues() > 0); 906 907 // Add N incoming values to the PHI node. 908 for (unsigned i = 0; i != NumEdges; ++i) 909 APN->addIncoming(IncomingVals[AllocaNo], Pred); 910 911 // The currently active variable for this block is now the PHI. 912 IncomingVals[AllocaNo] = APN; 913 for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[AllocaNo]) 914 ConvertDebugDeclareToDebugValue(DII, APN, DIB); 915 916 // Get the next phi node. 917 ++PNI; 918 APN = dyn_cast<PHINode>(PNI); 919 if (!APN) 920 break; 921 922 // Verify that it is missing entries. If not, it is not being inserted 923 // by this mem2reg invocation so we want to ignore it. 924 } while (APN->getNumOperands() == NewPHINumOperands); 925 } 926 } 927 928 // Don't revisit blocks. 929 if (!Visited.insert(BB).second) 930 return; 931 932 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) { 933 Instruction *I = &*II++; // get the instruction, increment iterator 934 935 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 936 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand()); 937 if (!Src) 938 continue; 939 940 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src); 941 if (AI == AllocaLookup.end()) 942 continue; 943 944 Value *V = IncomingVals[AI->second]; 945 946 // If the load was marked as nonnull we don't want to lose 947 // that information when we erase this Load. So we preserve 948 // it with an assume. 949 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 950 !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT)) 951 addAssumeNonNull(AC, LI); 952 953 // Anything using the load now uses the current value. 954 LI->replaceAllUsesWith(V); 955 BB->getInstList().erase(LI); 956 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 957 // Delete this instruction and mark the name as the current holder of the 958 // value 959 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand()); 960 if (!Dest) 961 continue; 962 963 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest); 964 if (ai == AllocaLookup.end()) 965 continue; 966 967 // what value were we writing? 968 unsigned AllocaNo = ai->second; 969 IncomingVals[AllocaNo] = SI->getOperand(0); 970 971 // Record debuginfo for the store before removing it. 972 IncomingLocs[AllocaNo] = SI->getDebugLoc(); 973 for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[ai->second]) 974 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 975 BB->getInstList().erase(SI); 976 } 977 } 978 979 // 'Recurse' to our successors. 980 succ_iterator I = succ_begin(BB), E = succ_end(BB); 981 if (I == E) 982 return; 983 984 // Keep track of the successors so we don't visit the same successor twice 985 SmallPtrSet<BasicBlock *, 8> VisitedSuccs; 986 987 // Handle the first successor without using the worklist. 988 VisitedSuccs.insert(*I); 989 Pred = BB; 990 BB = *I; 991 ++I; 992 993 for (; I != E; ++I) 994 if (VisitedSuccs.insert(*I).second) 995 Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs); 996 997 goto NextIteration; 998 } 999 1000 void llvm::PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 1001 AssumptionCache *AC) { 1002 // If there is nothing to do, bail out... 1003 if (Allocas.empty()) 1004 return; 1005 1006 PromoteMem2Reg(Allocas, DT, AC).run(); 1007 } 1008