1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file promotes memory references to be register references. It promotes 10 // alloca instructions which only have loads and stores as uses. An alloca is 11 // transformed by using iterated dominator frontiers to place PHI nodes, then 12 // traversing the function in depth-first order to rewrite loads and stores as 13 // appropriate. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/IteratedDominanceFrontier.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/Constant.h" 31 #include "llvm/IR/Constants.h" 32 #include "llvm/IR/DIBuilder.h" 33 #include "llvm/IR/DebugInfo.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/IR/Type.h" 44 #include "llvm/IR/User.h" 45 #include "llvm/Support/Casting.h" 46 #include "llvm/Transforms/Utils/Local.h" 47 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 48 #include <algorithm> 49 #include <cassert> 50 #include <iterator> 51 #include <utility> 52 #include <vector> 53 54 using namespace llvm; 55 56 #define DEBUG_TYPE "mem2reg" 57 58 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block"); 59 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); 60 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); 61 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); 62 63 bool llvm::isAllocaPromotable(const AllocaInst *AI) { 64 // Only allow direct and non-volatile loads and stores... 65 for (const User *U : AI->users()) { 66 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 67 // Note that atomic loads can be transformed; atomic semantics do 68 // not have any meaning for a local alloca. 69 if (LI->isVolatile() || LI->getType() != AI->getAllocatedType()) 70 return false; 71 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 72 if (SI->getValueOperand() == AI || 73 SI->getValueOperand()->getType() != AI->getAllocatedType()) 74 return false; // Don't allow a store OF the AI, only INTO the AI. 75 // Note that atomic stores can be transformed; atomic semantics do 76 // not have any meaning for a local alloca. 77 if (SI->isVolatile()) 78 return false; 79 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 80 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 81 return false; 82 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 83 if (!onlyUsedByLifetimeMarkersOrDroppableInsts(BCI)) 84 return false; 85 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 86 if (!GEPI->hasAllZeroIndices()) 87 return false; 88 if (!onlyUsedByLifetimeMarkersOrDroppableInsts(GEPI)) 89 return false; 90 } else if (const AddrSpaceCastInst *ASCI = dyn_cast<AddrSpaceCastInst>(U)) { 91 if (!onlyUsedByLifetimeMarkers(ASCI)) 92 return false; 93 } else { 94 return false; 95 } 96 } 97 98 return true; 99 } 100 101 namespace { 102 103 /// Helper for updating assignment tracking debug info when promoting allocas. 104 class AssignmentTrackingInfo { 105 /// DbgAssignIntrinsics linked to the alloca with at most one per variable 106 /// fragment. (i.e. not be a comprehensive set if there are multiple 107 /// dbg.assigns for one variable fragment). 108 SmallVector<DbgVariableIntrinsic *> DbgAssigns; 109 110 public: 111 void init(AllocaInst *AI) { 112 SmallSet<DebugVariable, 2> Vars; 113 for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(AI)) { 114 if (Vars.insert(DebugVariable(DAI)).second) 115 DbgAssigns.push_back(DAI); 116 } 117 } 118 119 /// Update assignment tracking debug info given for the to-be-deleted store 120 /// \p ToDelete that stores to this alloca. 121 void updateForDeletedStore( 122 StoreInst *ToDelete, DIBuilder &DIB, 123 SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete) const { 124 // There's nothing to do if the alloca doesn't have any variables using 125 // assignment tracking. 126 if (DbgAssigns.empty()) 127 return; 128 129 // Insert a dbg.value where the linked dbg.assign is and remember to delete 130 // the dbg.assign later. Demoting to dbg.value isn't necessary for 131 // correctness but does reduce compile time and memory usage by reducing 132 // unnecessary function-local metadata. Remember that we've seen a 133 // dbg.assign for each variable fragment for the untracked store handling 134 // (after this loop). 135 SmallSet<DebugVariableAggregate, 2> VarHasDbgAssignForStore; 136 for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(ToDelete)) { 137 VarHasDbgAssignForStore.insert(DebugVariableAggregate(DAI)); 138 DbgAssignsToDelete->insert(DAI); 139 DIB.insertDbgValueIntrinsic(DAI->getValue(), DAI->getVariable(), 140 DAI->getExpression(), DAI->getDebugLoc(), 141 DAI); 142 } 143 144 // It's possible for variables using assignment tracking to have no 145 // dbg.assign linked to this store. These are variables in DbgAssigns that 146 // are missing from VarHasDbgAssignForStore. Since there isn't a dbg.assign 147 // to mark the assignment - and the store is going to be deleted - insert a 148 // dbg.value to do that now. An untracked store may be either one that 149 // cannot be represented using assignment tracking (non-const offset or 150 // size) or one that is trackable but has had its DIAssignID attachment 151 // dropped accidentally. 152 for (auto *DAI : DbgAssigns) { 153 if (VarHasDbgAssignForStore.contains(DebugVariableAggregate(DAI))) 154 continue; 155 ConvertDebugDeclareToDebugValue(DAI, ToDelete, DIB); 156 } 157 } 158 159 /// Update assignment tracking debug info given for the newly inserted PHI \p 160 /// NewPhi. 161 void updateForNewPhi(PHINode *NewPhi, DIBuilder &DIB) const { 162 // Regardless of the position of dbg.assigns relative to stores, the 163 // incoming values into a new PHI should be the same for the (imaginary) 164 // debug-phi. 165 for (auto *DAI : DbgAssigns) 166 ConvertDebugDeclareToDebugValue(DAI, NewPhi, DIB); 167 } 168 169 void clear() { DbgAssigns.clear(); } 170 bool empty() { return DbgAssigns.empty(); } 171 }; 172 173 struct AllocaInfo { 174 using DbgUserVec = SmallVector<DbgVariableIntrinsic *, 1>; 175 176 SmallVector<BasicBlock *, 32> DefiningBlocks; 177 SmallVector<BasicBlock *, 32> UsingBlocks; 178 179 StoreInst *OnlyStore; 180 BasicBlock *OnlyBlock; 181 bool OnlyUsedInOneBlock; 182 183 /// Debug users of the alloca - does not include dbg.assign intrinsics. 184 DbgUserVec DbgUsers; 185 /// Helper to update assignment tracking debug info. 186 AssignmentTrackingInfo AssignmentTracking; 187 188 void clear() { 189 DefiningBlocks.clear(); 190 UsingBlocks.clear(); 191 OnlyStore = nullptr; 192 OnlyBlock = nullptr; 193 OnlyUsedInOneBlock = true; 194 DbgUsers.clear(); 195 AssignmentTracking.clear(); 196 } 197 198 /// Scan the uses of the specified alloca, filling in the AllocaInfo used 199 /// by the rest of the pass to reason about the uses of this alloca. 200 void AnalyzeAlloca(AllocaInst *AI) { 201 clear(); 202 203 // As we scan the uses of the alloca instruction, keep track of stores, 204 // and decide whether all of the loads and stores to the alloca are within 205 // the same basic block. 206 for (User *U : AI->users()) { 207 Instruction *User = cast<Instruction>(U); 208 209 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 210 // Remember the basic blocks which define new values for the alloca 211 DefiningBlocks.push_back(SI->getParent()); 212 OnlyStore = SI; 213 } else { 214 LoadInst *LI = cast<LoadInst>(User); 215 // Otherwise it must be a load instruction, keep track of variable 216 // reads. 217 UsingBlocks.push_back(LI->getParent()); 218 } 219 220 if (OnlyUsedInOneBlock) { 221 if (!OnlyBlock) 222 OnlyBlock = User->getParent(); 223 else if (OnlyBlock != User->getParent()) 224 OnlyUsedInOneBlock = false; 225 } 226 } 227 DbgUserVec AllDbgUsers; 228 findDbgUsers(AllDbgUsers, AI); 229 std::copy_if(AllDbgUsers.begin(), AllDbgUsers.end(), 230 std::back_inserter(DbgUsers), [](DbgVariableIntrinsic *DII) { 231 return !isa<DbgAssignIntrinsic>(DII); 232 }); 233 AssignmentTracking.init(AI); 234 } 235 }; 236 237 /// Data package used by RenamePass(). 238 struct RenamePassData { 239 using ValVector = std::vector<Value *>; 240 using LocationVector = std::vector<DebugLoc>; 241 242 RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L) 243 : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {} 244 245 BasicBlock *BB; 246 BasicBlock *Pred; 247 ValVector Values; 248 LocationVector Locations; 249 }; 250 251 /// This assigns and keeps a per-bb relative ordering of load/store 252 /// instructions in the block that directly load or store an alloca. 253 /// 254 /// This functionality is important because it avoids scanning large basic 255 /// blocks multiple times when promoting many allocas in the same block. 256 class LargeBlockInfo { 257 /// For each instruction that we track, keep the index of the 258 /// instruction. 259 /// 260 /// The index starts out as the number of the instruction from the start of 261 /// the block. 262 DenseMap<const Instruction *, unsigned> InstNumbers; 263 264 public: 265 266 /// This code only looks at accesses to allocas. 267 static bool isInterestingInstruction(const Instruction *I) { 268 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) || 269 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); 270 } 271 272 /// Get or calculate the index of the specified instruction. 273 unsigned getInstructionIndex(const Instruction *I) { 274 assert(isInterestingInstruction(I) && 275 "Not a load/store to/from an alloca?"); 276 277 // If we already have this instruction number, return it. 278 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I); 279 if (It != InstNumbers.end()) 280 return It->second; 281 282 // Scan the whole block to get the instruction. This accumulates 283 // information for every interesting instruction in the block, in order to 284 // avoid gratuitus rescans. 285 const BasicBlock *BB = I->getParent(); 286 unsigned InstNo = 0; 287 for (const Instruction &BBI : *BB) 288 if (isInterestingInstruction(&BBI)) 289 InstNumbers[&BBI] = InstNo++; 290 It = InstNumbers.find(I); 291 292 assert(It != InstNumbers.end() && "Didn't insert instruction?"); 293 return It->second; 294 } 295 296 void deleteValue(const Instruction *I) { InstNumbers.erase(I); } 297 298 void clear() { InstNumbers.clear(); } 299 }; 300 301 struct PromoteMem2Reg { 302 /// The alloca instructions being promoted. 303 std::vector<AllocaInst *> Allocas; 304 305 DominatorTree &DT; 306 DIBuilder DIB; 307 308 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction. 309 AssumptionCache *AC; 310 311 const SimplifyQuery SQ; 312 313 /// Reverse mapping of Allocas. 314 DenseMap<AllocaInst *, unsigned> AllocaLookup; 315 316 /// The PhiNodes we're adding. 317 /// 318 /// That map is used to simplify some Phi nodes as we iterate over it, so 319 /// it should have deterministic iterators. We could use a MapVector, but 320 /// since we already maintain a map from BasicBlock* to a stable numbering 321 /// (BBNumbers), the DenseMap is more efficient (also supports removal). 322 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes; 323 324 /// For each PHI node, keep track of which entry in Allocas it corresponds 325 /// to. 326 DenseMap<PHINode *, unsigned> PhiToAllocaMap; 327 328 /// For each alloca, we keep track of the dbg.declare intrinsic that 329 /// describes it, if any, so that we can convert it to a dbg.value 330 /// intrinsic if the alloca gets promoted. 331 SmallVector<AllocaInfo::DbgUserVec, 8> AllocaDbgUsers; 332 333 /// For each alloca, keep an instance of a helper class that gives us an easy 334 /// way to update assignment tracking debug info if the alloca is promoted. 335 SmallVector<AssignmentTrackingInfo, 8> AllocaATInfo; 336 /// A set of dbg.assigns to delete because they've been demoted to 337 /// dbg.values. Call cleanUpDbgAssigns to delete them. 338 SmallSet<DbgAssignIntrinsic *, 8> DbgAssignsToDelete; 339 340 /// The set of basic blocks the renamer has already visited. 341 SmallPtrSet<BasicBlock *, 16> Visited; 342 343 /// Contains a stable numbering of basic blocks to avoid non-determinstic 344 /// behavior. 345 DenseMap<BasicBlock *, unsigned> BBNumbers; 346 347 /// Lazily compute the number of predecessors a block has. 348 DenseMap<const BasicBlock *, unsigned> BBNumPreds; 349 350 public: 351 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 352 AssumptionCache *AC) 353 : Allocas(Allocas.begin(), Allocas.end()), DT(DT), 354 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false), 355 AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(), 356 nullptr, &DT, AC) {} 357 358 void run(); 359 360 private: 361 void RemoveFromAllocasList(unsigned &AllocaIdx) { 362 Allocas[AllocaIdx] = Allocas.back(); 363 Allocas.pop_back(); 364 --AllocaIdx; 365 } 366 367 unsigned getNumPreds(const BasicBlock *BB) { 368 unsigned &NP = BBNumPreds[BB]; 369 if (NP == 0) 370 NP = pred_size(BB) + 1; 371 return NP - 1; 372 } 373 374 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 375 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 376 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks); 377 void RenamePass(BasicBlock *BB, BasicBlock *Pred, 378 RenamePassData::ValVector &IncVals, 379 RenamePassData::LocationVector &IncLocs, 380 std::vector<RenamePassData> &Worklist); 381 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version); 382 383 /// Delete dbg.assigns that have been demoted to dbg.values. 384 void cleanUpDbgAssigns() { 385 for (auto *DAI : DbgAssignsToDelete) 386 DAI->eraseFromParent(); 387 DbgAssignsToDelete.clear(); 388 } 389 }; 390 391 } // end anonymous namespace 392 393 /// Given a LoadInst LI this adds assume(LI != null) after it. 394 static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) { 395 Function *AssumeIntrinsic = 396 Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume); 397 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI, 398 Constant::getNullValue(LI->getType())); 399 LoadNotNull->insertAfter(LI); 400 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull}); 401 CI->insertAfter(LoadNotNull); 402 AC->registerAssumption(cast<AssumeInst>(CI)); 403 } 404 405 static void convertMetadataToAssumes(LoadInst *LI, Value *Val, 406 const DataLayout &DL, AssumptionCache *AC, 407 const DominatorTree *DT) { 408 // If the load was marked as nonnull we don't want to lose that information 409 // when we erase this Load. So we preserve it with an assume. As !nonnull 410 // returns poison while assume violations are immediate undefined behavior, 411 // we can only do this if the value is known non-poison. 412 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 413 LI->getMetadata(LLVMContext::MD_noundef) && 414 !isKnownNonZero(Val, DL, 0, AC, LI, DT)) 415 addAssumeNonNull(AC, LI); 416 } 417 418 static void removeIntrinsicUsers(AllocaInst *AI) { 419 // Knowing that this alloca is promotable, we know that it's safe to kill all 420 // instructions except for load and store. 421 422 for (Use &U : llvm::make_early_inc_range(AI->uses())) { 423 Instruction *I = cast<Instruction>(U.getUser()); 424 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 425 continue; 426 427 // Drop the use of AI in droppable instructions. 428 if (I->isDroppable()) { 429 I->dropDroppableUse(U); 430 continue; 431 } 432 433 if (!I->getType()->isVoidTy()) { 434 // The only users of this bitcast/GEP instruction are lifetime intrinsics. 435 // Follow the use/def chain to erase them now instead of leaving it for 436 // dead code elimination later. 437 for (Use &UU : llvm::make_early_inc_range(I->uses())) { 438 Instruction *Inst = cast<Instruction>(UU.getUser()); 439 440 // Drop the use of I in droppable instructions. 441 if (Inst->isDroppable()) { 442 Inst->dropDroppableUse(UU); 443 continue; 444 } 445 Inst->eraseFromParent(); 446 } 447 } 448 I->eraseFromParent(); 449 } 450 } 451 452 /// Rewrite as many loads as possible given a single store. 453 /// 454 /// When there is only a single store, we can use the domtree to trivially 455 /// replace all of the dominated loads with the stored value. Do so, and return 456 /// true if this has successfully promoted the alloca entirely. If this returns 457 /// false there were some loads which were not dominated by the single store 458 /// and thus must be phi-ed with undef. We fall back to the standard alloca 459 /// promotion algorithm in that case. 460 static bool rewriteSingleStoreAlloca( 461 AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL, 462 DominatorTree &DT, AssumptionCache *AC, 463 SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete) { 464 StoreInst *OnlyStore = Info.OnlyStore; 465 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0)); 466 BasicBlock *StoreBB = OnlyStore->getParent(); 467 int StoreIndex = -1; 468 469 // Clear out UsingBlocks. We will reconstruct it here if needed. 470 Info.UsingBlocks.clear(); 471 472 for (User *U : make_early_inc_range(AI->users())) { 473 Instruction *UserInst = cast<Instruction>(U); 474 if (UserInst == OnlyStore) 475 continue; 476 LoadInst *LI = cast<LoadInst>(UserInst); 477 478 // Okay, if we have a load from the alloca, we want to replace it with the 479 // only value stored to the alloca. We can do this if the value is 480 // dominated by the store. If not, we use the rest of the mem2reg machinery 481 // to insert the phi nodes as needed. 482 if (!StoringGlobalVal) { // Non-instructions are always dominated. 483 if (LI->getParent() == StoreBB) { 484 // If we have a use that is in the same block as the store, compare the 485 // indices of the two instructions to see which one came first. If the 486 // load came before the store, we can't handle it. 487 if (StoreIndex == -1) 488 StoreIndex = LBI.getInstructionIndex(OnlyStore); 489 490 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) { 491 // Can't handle this load, bail out. 492 Info.UsingBlocks.push_back(StoreBB); 493 continue; 494 } 495 } else if (!DT.dominates(StoreBB, LI->getParent())) { 496 // If the load and store are in different blocks, use BB dominance to 497 // check their relationships. If the store doesn't dom the use, bail 498 // out. 499 Info.UsingBlocks.push_back(LI->getParent()); 500 continue; 501 } 502 } 503 504 // Otherwise, we *can* safely rewrite this load. 505 Value *ReplVal = OnlyStore->getOperand(0); 506 // If the replacement value is the load, this must occur in unreachable 507 // code. 508 if (ReplVal == LI) 509 ReplVal = PoisonValue::get(LI->getType()); 510 511 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT); 512 LI->replaceAllUsesWith(ReplVal); 513 LI->eraseFromParent(); 514 LBI.deleteValue(LI); 515 } 516 517 // Finally, after the scan, check to see if the store is all that is left. 518 if (!Info.UsingBlocks.empty()) 519 return false; // If not, we'll have to fall back for the remainder. 520 521 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 522 // Update assignment tracking info for the store we're going to delete. 523 Info.AssignmentTracking.updateForDeletedStore(Info.OnlyStore, DIB, 524 DbgAssignsToDelete); 525 526 // Record debuginfo for the store and remove the declaration's 527 // debuginfo. 528 for (DbgVariableIntrinsic *DII : Info.DbgUsers) { 529 if (DII->isAddressOfVariable()) { 530 ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB); 531 DII->eraseFromParent(); 532 } else if (DII->getExpression()->startsWithDeref()) { 533 DII->eraseFromParent(); 534 } 535 } 536 537 // Remove dbg.assigns linked to the alloca as these are now redundant. 538 at::deleteAssignmentMarkers(AI); 539 540 // Remove the (now dead) store and alloca. 541 Info.OnlyStore->eraseFromParent(); 542 LBI.deleteValue(Info.OnlyStore); 543 544 AI->eraseFromParent(); 545 return true; 546 } 547 548 /// Many allocas are only used within a single basic block. If this is the 549 /// case, avoid traversing the CFG and inserting a lot of potentially useless 550 /// PHI nodes by just performing a single linear pass over the basic block 551 /// using the Alloca. 552 /// 553 /// If we cannot promote this alloca (because it is read before it is written), 554 /// return false. This is necessary in cases where, due to control flow, the 555 /// alloca is undefined only on some control flow paths. e.g. code like 556 /// this is correct in LLVM IR: 557 /// // A is an alloca with no stores so far 558 /// for (...) { 559 /// int t = *A; 560 /// if (!first_iteration) 561 /// use(t); 562 /// *A = 42; 563 /// } 564 static bool promoteSingleBlockAlloca( 565 AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, 566 const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC, 567 SmallSet<DbgAssignIntrinsic *, 8> *DbgAssignsToDelete) { 568 // The trickiest case to handle is when we have large blocks. Because of this, 569 // this code is optimized assuming that large blocks happen. This does not 570 // significantly pessimize the small block case. This uses LargeBlockInfo to 571 // make it efficient to get the index of various operations in the block. 572 573 // Walk the use-def list of the alloca, getting the locations of all stores. 574 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>; 575 StoresByIndexTy StoresByIndex; 576 577 for (User *U : AI->users()) 578 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 579 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI)); 580 581 // Sort the stores by their index, making it efficient to do a lookup with a 582 // binary search. 583 llvm::sort(StoresByIndex, less_first()); 584 585 // Walk all of the loads from this alloca, replacing them with the nearest 586 // store above them, if any. 587 for (User *U : make_early_inc_range(AI->users())) { 588 LoadInst *LI = dyn_cast<LoadInst>(U); 589 if (!LI) 590 continue; 591 592 unsigned LoadIdx = LBI.getInstructionIndex(LI); 593 594 // Find the nearest store that has a lower index than this load. 595 StoresByIndexTy::iterator I = llvm::lower_bound( 596 StoresByIndex, 597 std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)), 598 less_first()); 599 Value *ReplVal; 600 if (I == StoresByIndex.begin()) { 601 if (StoresByIndex.empty()) 602 // If there are no stores, the load takes the undef value. 603 ReplVal = UndefValue::get(LI->getType()); 604 else 605 // There is no store before this load, bail out (load may be affected 606 // by the following stores - see main comment). 607 return false; 608 } else { 609 // Otherwise, there was a store before this load, the load takes its 610 // value. 611 ReplVal = std::prev(I)->second->getOperand(0); 612 } 613 614 convertMetadataToAssumes(LI, ReplVal, DL, AC, &DT); 615 616 // If the replacement value is the load, this must occur in unreachable 617 // code. 618 if (ReplVal == LI) 619 ReplVal = PoisonValue::get(LI->getType()); 620 621 LI->replaceAllUsesWith(ReplVal); 622 LI->eraseFromParent(); 623 LBI.deleteValue(LI); 624 } 625 626 // Remove the (now dead) stores and alloca. 627 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 628 while (!AI->use_empty()) { 629 StoreInst *SI = cast<StoreInst>(AI->user_back()); 630 // Update assignment tracking info for the store we're going to delete. 631 Info.AssignmentTracking.updateForDeletedStore(SI, DIB, DbgAssignsToDelete); 632 // Record debuginfo for the store before removing it. 633 for (DbgVariableIntrinsic *DII : Info.DbgUsers) { 634 if (DII->isAddressOfVariable()) { 635 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 636 } 637 } 638 SI->eraseFromParent(); 639 LBI.deleteValue(SI); 640 } 641 642 // Remove dbg.assigns linked to the alloca as these are now redundant. 643 at::deleteAssignmentMarkers(AI); 644 AI->eraseFromParent(); 645 646 // The alloca's debuginfo can be removed as well. 647 for (DbgVariableIntrinsic *DII : Info.DbgUsers) 648 if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref()) 649 DII->eraseFromParent(); 650 651 ++NumLocalPromoted; 652 return true; 653 } 654 655 void PromoteMem2Reg::run() { 656 Function &F = *DT.getRoot()->getParent(); 657 658 AllocaDbgUsers.resize(Allocas.size()); 659 AllocaATInfo.resize(Allocas.size()); 660 661 AllocaInfo Info; 662 LargeBlockInfo LBI; 663 ForwardIDFCalculator IDF(DT); 664 665 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { 666 AllocaInst *AI = Allocas[AllocaNum]; 667 668 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!"); 669 assert(AI->getParent()->getParent() == &F && 670 "All allocas should be in the same function, which is same as DF!"); 671 672 removeIntrinsicUsers(AI); 673 674 if (AI->use_empty()) { 675 // If there are no uses of the alloca, just delete it now. 676 AI->eraseFromParent(); 677 678 // Remove the alloca from the Allocas list, since it has been processed 679 RemoveFromAllocasList(AllocaNum); 680 ++NumDeadAlloca; 681 continue; 682 } 683 684 // Calculate the set of read and write-locations for each alloca. This is 685 // analogous to finding the 'uses' and 'definitions' of each variable. 686 Info.AnalyzeAlloca(AI); 687 688 // If there is only a single store to this value, replace any loads of 689 // it that are directly dominated by the definition with the value stored. 690 if (Info.DefiningBlocks.size() == 1) { 691 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC, 692 &DbgAssignsToDelete)) { 693 // The alloca has been processed, move on. 694 RemoveFromAllocasList(AllocaNum); 695 ++NumSingleStore; 696 continue; 697 } 698 } 699 700 // If the alloca is only read and written in one basic block, just perform a 701 // linear sweep over the block to eliminate it. 702 if (Info.OnlyUsedInOneBlock && 703 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC, 704 &DbgAssignsToDelete)) { 705 // The alloca has been processed, move on. 706 RemoveFromAllocasList(AllocaNum); 707 continue; 708 } 709 710 // If we haven't computed a numbering for the BB's in the function, do so 711 // now. 712 if (BBNumbers.empty()) { 713 unsigned ID = 0; 714 for (auto &BB : F) 715 BBNumbers[&BB] = ID++; 716 } 717 718 // Remember the dbg.declare intrinsic describing this alloca, if any. 719 if (!Info.DbgUsers.empty()) 720 AllocaDbgUsers[AllocaNum] = Info.DbgUsers; 721 if (!Info.AssignmentTracking.empty()) 722 AllocaATInfo[AllocaNum] = Info.AssignmentTracking; 723 724 // Keep the reverse mapping of the 'Allocas' array for the rename pass. 725 AllocaLookup[Allocas[AllocaNum]] = AllocaNum; 726 727 // Unique the set of defining blocks for efficient lookup. 728 SmallPtrSet<BasicBlock *, 32> DefBlocks(Info.DefiningBlocks.begin(), 729 Info.DefiningBlocks.end()); 730 731 // Determine which blocks the value is live in. These are blocks which lead 732 // to uses. 733 SmallPtrSet<BasicBlock *, 32> LiveInBlocks; 734 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); 735 736 // At this point, we're committed to promoting the alloca using IDF's, and 737 // the standard SSA construction algorithm. Determine which blocks need phi 738 // nodes and see if we can optimize out some work by avoiding insertion of 739 // dead phi nodes. 740 IDF.setLiveInBlocks(LiveInBlocks); 741 IDF.setDefiningBlocks(DefBlocks); 742 SmallVector<BasicBlock *, 32> PHIBlocks; 743 IDF.calculate(PHIBlocks); 744 llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) { 745 return BBNumbers.find(A)->second < BBNumbers.find(B)->second; 746 }); 747 748 unsigned CurrentVersion = 0; 749 for (BasicBlock *BB : PHIBlocks) 750 QueuePhiNode(BB, AllocaNum, CurrentVersion); 751 } 752 753 if (Allocas.empty()) { 754 cleanUpDbgAssigns(); 755 return; // All of the allocas must have been trivial! 756 } 757 LBI.clear(); 758 759 // Set the incoming values for the basic block to be null values for all of 760 // the alloca's. We do this in case there is a load of a value that has not 761 // been stored yet. In this case, it will get this null value. 762 RenamePassData::ValVector Values(Allocas.size()); 763 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) 764 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType()); 765 766 // When handling debug info, treat all incoming values as if they have unknown 767 // locations until proven otherwise. 768 RenamePassData::LocationVector Locations(Allocas.size()); 769 770 // Walks all basic blocks in the function performing the SSA rename algorithm 771 // and inserting the phi nodes we marked as necessary 772 std::vector<RenamePassData> RenamePassWorkList; 773 RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values), 774 std::move(Locations)); 775 do { 776 RenamePassData RPD = std::move(RenamePassWorkList.back()); 777 RenamePassWorkList.pop_back(); 778 // RenamePass may add new worklist entries. 779 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList); 780 } while (!RenamePassWorkList.empty()); 781 782 // The renamer uses the Visited set to avoid infinite loops. Clear it now. 783 Visited.clear(); 784 785 // Remove the allocas themselves from the function. 786 for (Instruction *A : Allocas) { 787 // Remove dbg.assigns linked to the alloca as these are now redundant. 788 at::deleteAssignmentMarkers(A); 789 // If there are any uses of the alloca instructions left, they must be in 790 // unreachable basic blocks that were not processed by walking the dominator 791 // tree. Just delete the users now. 792 if (!A->use_empty()) 793 A->replaceAllUsesWith(PoisonValue::get(A->getType())); 794 A->eraseFromParent(); 795 } 796 797 // Remove alloca's dbg.declare intrinsics from the function. 798 for (auto &DbgUsers : AllocaDbgUsers) { 799 for (auto *DII : DbgUsers) 800 if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref()) 801 DII->eraseFromParent(); 802 } 803 804 // Loop over all of the PHI nodes and see if there are any that we can get 805 // rid of because they merge all of the same incoming values. This can 806 // happen due to undef values coming into the PHI nodes. This process is 807 // iterative, because eliminating one PHI node can cause others to be removed. 808 bool EliminatedAPHI = true; 809 while (EliminatedAPHI) { 810 EliminatedAPHI = false; 811 812 // Iterating over NewPhiNodes is deterministic, so it is safe to try to 813 // simplify and RAUW them as we go. If it was not, we could add uses to 814 // the values we replace with in a non-deterministic order, thus creating 815 // non-deterministic def->use chains. 816 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 817 I = NewPhiNodes.begin(), 818 E = NewPhiNodes.end(); 819 I != E;) { 820 PHINode *PN = I->second; 821 822 // If this PHI node merges one value and/or undefs, get the value. 823 if (Value *V = simplifyInstruction(PN, SQ)) { 824 PN->replaceAllUsesWith(V); 825 PN->eraseFromParent(); 826 NewPhiNodes.erase(I++); 827 EliminatedAPHI = true; 828 continue; 829 } 830 ++I; 831 } 832 } 833 834 // At this point, the renamer has added entries to PHI nodes for all reachable 835 // code. Unfortunately, there may be unreachable blocks which the renamer 836 // hasn't traversed. If this is the case, the PHI nodes may not 837 // have incoming values for all predecessors. Loop over all PHI nodes we have 838 // created, inserting poison values if they are missing any incoming values. 839 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 840 I = NewPhiNodes.begin(), 841 E = NewPhiNodes.end(); 842 I != E; ++I) { 843 // We want to do this once per basic block. As such, only process a block 844 // when we find the PHI that is the first entry in the block. 845 PHINode *SomePHI = I->second; 846 BasicBlock *BB = SomePHI->getParent(); 847 if (&BB->front() != SomePHI) 848 continue; 849 850 // Only do work here if there the PHI nodes are missing incoming values. We 851 // know that all PHI nodes that were inserted in a block will have the same 852 // number of incoming values, so we can just check any of them. 853 if (SomePHI->getNumIncomingValues() == getNumPreds(BB)) 854 continue; 855 856 // Get the preds for BB. 857 SmallVector<BasicBlock *, 16> Preds(predecessors(BB)); 858 859 // Ok, now we know that all of the PHI nodes are missing entries for some 860 // basic blocks. Start by sorting the incoming predecessors for efficient 861 // access. 862 auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) { 863 return BBNumbers.find(A)->second < BBNumbers.find(B)->second; 864 }; 865 llvm::sort(Preds, CompareBBNumbers); 866 867 // Now we loop through all BB's which have entries in SomePHI and remove 868 // them from the Preds list. 869 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) { 870 // Do a log(n) search of the Preds list for the entry we want. 871 SmallVectorImpl<BasicBlock *>::iterator EntIt = llvm::lower_bound( 872 Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers); 873 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) && 874 "PHI node has entry for a block which is not a predecessor!"); 875 876 // Remove the entry 877 Preds.erase(EntIt); 878 } 879 880 // At this point, the blocks left in the preds list must have dummy 881 // entries inserted into every PHI nodes for the block. Update all the phi 882 // nodes in this block that we are inserting (there could be phis before 883 // mem2reg runs). 884 unsigned NumBadPreds = SomePHI->getNumIncomingValues(); 885 BasicBlock::iterator BBI = BB->begin(); 886 while ((SomePHI = dyn_cast<PHINode>(BBI++)) && 887 SomePHI->getNumIncomingValues() == NumBadPreds) { 888 Value *PoisonVal = PoisonValue::get(SomePHI->getType()); 889 for (BasicBlock *Pred : Preds) 890 SomePHI->addIncoming(PoisonVal, Pred); 891 } 892 } 893 894 NewPhiNodes.clear(); 895 cleanUpDbgAssigns(); 896 } 897 898 /// Determine which blocks the value is live in. 899 /// 900 /// These are blocks which lead to uses. Knowing this allows us to avoid 901 /// inserting PHI nodes into blocks which don't lead to uses (thus, the 902 /// inserted phi nodes would be dead). 903 void PromoteMem2Reg::ComputeLiveInBlocks( 904 AllocaInst *AI, AllocaInfo &Info, 905 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 906 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) { 907 // To determine liveness, we must iterate through the predecessors of blocks 908 // where the def is live. Blocks are added to the worklist if we need to 909 // check their predecessors. Start with all the using blocks. 910 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(), 911 Info.UsingBlocks.end()); 912 913 // If any of the using blocks is also a definition block, check to see if the 914 // definition occurs before or after the use. If it happens before the use, 915 // the value isn't really live-in. 916 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) { 917 BasicBlock *BB = LiveInBlockWorklist[i]; 918 if (!DefBlocks.count(BB)) 919 continue; 920 921 // Okay, this is a block that both uses and defines the value. If the first 922 // reference to the alloca is a def (store), then we know it isn't live-in. 923 for (BasicBlock::iterator I = BB->begin();; ++I) { 924 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 925 if (SI->getOperand(1) != AI) 926 continue; 927 928 // We found a store to the alloca before a load. The alloca is not 929 // actually live-in here. 930 LiveInBlockWorklist[i] = LiveInBlockWorklist.back(); 931 LiveInBlockWorklist.pop_back(); 932 --i; 933 --e; 934 break; 935 } 936 937 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 938 // Okay, we found a load before a store to the alloca. It is actually 939 // live into this block. 940 if (LI->getOperand(0) == AI) 941 break; 942 } 943 } 944 945 // Now that we have a set of blocks where the phi is live-in, recursively add 946 // their predecessors until we find the full region the value is live. 947 while (!LiveInBlockWorklist.empty()) { 948 BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); 949 950 // The block really is live in here, insert it into the set. If already in 951 // the set, then it has already been processed. 952 if (!LiveInBlocks.insert(BB).second) 953 continue; 954 955 // Since the value is live into BB, it is either defined in a predecessor or 956 // live into it to. Add the preds to the worklist unless they are a 957 // defining block. 958 for (BasicBlock *P : predecessors(BB)) { 959 // The value is not live into a predecessor if it defines the value. 960 if (DefBlocks.count(P)) 961 continue; 962 963 // Otherwise it is, add to the worklist. 964 LiveInBlockWorklist.push_back(P); 965 } 966 } 967 } 968 969 /// Queue a phi-node to be added to a basic-block for a specific Alloca. 970 /// 971 /// Returns true if there wasn't already a phi-node for that variable 972 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, 973 unsigned &Version) { 974 // Look up the basic-block in question. 975 PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)]; 976 977 // If the BB already has a phi node added for the i'th alloca then we're done! 978 if (PN) 979 return false; 980 981 // Create a PhiNode using the dereferenced type... and add the phi-node to the 982 // BasicBlock. 983 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB), 984 Allocas[AllocaNo]->getName() + "." + Twine(Version++), 985 &BB->front()); 986 ++NumPHIInsert; 987 PhiToAllocaMap[PN] = AllocaNo; 988 return true; 989 } 990 991 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to 992 /// create a merged location incorporating \p DL, or to set \p DL directly. 993 static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, 994 bool ApplyMergedLoc) { 995 if (ApplyMergedLoc) 996 PN->applyMergedLocation(PN->getDebugLoc(), DL); 997 else 998 PN->setDebugLoc(DL); 999 } 1000 1001 /// Recursively traverse the CFG of the function, renaming loads and 1002 /// stores to the allocas which we are promoting. 1003 /// 1004 /// IncomingVals indicates what value each Alloca contains on exit from the 1005 /// predecessor block Pred. 1006 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred, 1007 RenamePassData::ValVector &IncomingVals, 1008 RenamePassData::LocationVector &IncomingLocs, 1009 std::vector<RenamePassData> &Worklist) { 1010 NextIteration: 1011 // If we are inserting any phi nodes into this BB, they will already be in the 1012 // block. 1013 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) { 1014 // If we have PHI nodes to update, compute the number of edges from Pred to 1015 // BB. 1016 if (PhiToAllocaMap.count(APN)) { 1017 // We want to be able to distinguish between PHI nodes being inserted by 1018 // this invocation of mem2reg from those phi nodes that already existed in 1019 // the IR before mem2reg was run. We determine that APN is being inserted 1020 // because it is missing incoming edges. All other PHI nodes being 1021 // inserted by this pass of mem2reg will have the same number of incoming 1022 // operands so far. Remember this count. 1023 unsigned NewPHINumOperands = APN->getNumOperands(); 1024 1025 unsigned NumEdges = llvm::count(successors(Pred), BB); 1026 assert(NumEdges && "Must be at least one edge from Pred to BB!"); 1027 1028 // Add entries for all the phis. 1029 BasicBlock::iterator PNI = BB->begin(); 1030 do { 1031 unsigned AllocaNo = PhiToAllocaMap[APN]; 1032 1033 // Update the location of the phi node. 1034 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo], 1035 APN->getNumIncomingValues() > 0); 1036 1037 // Add N incoming values to the PHI node. 1038 for (unsigned i = 0; i != NumEdges; ++i) 1039 APN->addIncoming(IncomingVals[AllocaNo], Pred); 1040 1041 // The currently active variable for this block is now the PHI. 1042 IncomingVals[AllocaNo] = APN; 1043 AllocaATInfo[AllocaNo].updateForNewPhi(APN, DIB); 1044 for (DbgVariableIntrinsic *DII : AllocaDbgUsers[AllocaNo]) 1045 if (DII->isAddressOfVariable()) 1046 ConvertDebugDeclareToDebugValue(DII, APN, DIB); 1047 1048 // Get the next phi node. 1049 ++PNI; 1050 APN = dyn_cast<PHINode>(PNI); 1051 if (!APN) 1052 break; 1053 1054 // Verify that it is missing entries. If not, it is not being inserted 1055 // by this mem2reg invocation so we want to ignore it. 1056 } while (APN->getNumOperands() == NewPHINumOperands); 1057 } 1058 } 1059 1060 // Don't revisit blocks. 1061 if (!Visited.insert(BB).second) 1062 return; 1063 1064 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) { 1065 Instruction *I = &*II++; // get the instruction, increment iterator 1066 1067 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1068 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand()); 1069 if (!Src) 1070 continue; 1071 1072 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src); 1073 if (AI == AllocaLookup.end()) 1074 continue; 1075 1076 Value *V = IncomingVals[AI->second]; 1077 convertMetadataToAssumes(LI, V, SQ.DL, AC, &DT); 1078 1079 // Anything using the load now uses the current value. 1080 LI->replaceAllUsesWith(V); 1081 LI->eraseFromParent(); 1082 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1083 // Delete this instruction and mark the name as the current holder of the 1084 // value 1085 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand()); 1086 if (!Dest) 1087 continue; 1088 1089 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest); 1090 if (ai == AllocaLookup.end()) 1091 continue; 1092 1093 // what value were we writing? 1094 unsigned AllocaNo = ai->second; 1095 IncomingVals[AllocaNo] = SI->getOperand(0); 1096 1097 // Record debuginfo for the store before removing it. 1098 IncomingLocs[AllocaNo] = SI->getDebugLoc(); 1099 AllocaATInfo[AllocaNo].updateForDeletedStore(SI, DIB, 1100 &DbgAssignsToDelete); 1101 for (DbgVariableIntrinsic *DII : AllocaDbgUsers[ai->second]) 1102 if (DII->isAddressOfVariable()) 1103 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 1104 SI->eraseFromParent(); 1105 } 1106 } 1107 1108 // 'Recurse' to our successors. 1109 succ_iterator I = succ_begin(BB), E = succ_end(BB); 1110 if (I == E) 1111 return; 1112 1113 // Keep track of the successors so we don't visit the same successor twice 1114 SmallPtrSet<BasicBlock *, 8> VisitedSuccs; 1115 1116 // Handle the first successor without using the worklist. 1117 VisitedSuccs.insert(*I); 1118 Pred = BB; 1119 BB = *I; 1120 ++I; 1121 1122 for (; I != E; ++I) 1123 if (VisitedSuccs.insert(*I).second) 1124 Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs); 1125 1126 goto NextIteration; 1127 } 1128 1129 void llvm::PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 1130 AssumptionCache *AC) { 1131 // If there is nothing to do, bail out... 1132 if (Allocas.empty()) 1133 return; 1134 1135 PromoteMem2Reg(Allocas, DT, AC).run(); 1136 } 1137