1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass eliminates allocas by either converting them into vectors or 11 // by migrating them to local address space. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "Utils/AMDGPUBaseInfo.h" 18 #include "llvm/ADT/APInt.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/StringRef.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/CodeGen/TargetPassConfig.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/Constant.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/GlobalValue.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/IRBuilder.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Metadata.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/IR/Type.h" 45 #include "llvm/IR/User.h" 46 #include "llvm/IR/Value.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/ErrorHandling.h" 51 #include "llvm/Support/MathExtras.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include "llvm/Target/TargetMachine.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <cstdint> 57 #include <map> 58 #include <tuple> 59 #include <utility> 60 #include <vector> 61 62 #define DEBUG_TYPE "amdgpu-promote-alloca" 63 64 using namespace llvm; 65 66 namespace { 67 68 static cl::opt<bool> DisablePromoteAllocaToVector( 69 "disable-promote-alloca-to-vector", 70 cl::desc("Disable promote alloca to vector"), 71 cl::init(false)); 72 73 static cl::opt<bool> DisablePromoteAllocaToLDS( 74 "disable-promote-alloca-to-lds", 75 cl::desc("Disable promote alloca to LDS"), 76 cl::init(false)); 77 78 // FIXME: This can create globals so should be a module pass. 79 class AMDGPUPromoteAlloca : public FunctionPass { 80 private: 81 const TargetMachine *TM; 82 Module *Mod = nullptr; 83 const DataLayout *DL = nullptr; 84 85 // FIXME: This should be per-kernel. 86 uint32_t LocalMemLimit = 0; 87 uint32_t CurrentLocalMemUsage = 0; 88 89 bool IsAMDGCN = false; 90 bool IsAMDHSA = false; 91 92 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); 93 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); 94 95 /// BaseAlloca is the alloca root the search started from. 96 /// Val may be that alloca or a recursive user of it. 97 bool collectUsesWithPtrTypes(Value *BaseAlloca, 98 Value *Val, 99 std::vector<Value*> &WorkList) const; 100 101 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand 102 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). 103 /// Returns true if both operands are derived from the same alloca. Val should 104 /// be the same value as one of the input operands of UseInst. 105 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, 106 Instruction *UseInst, 107 int OpIdx0, int OpIdx1) const; 108 109 /// Check whether we have enough local memory for promotion. 110 bool hasSufficientLocalMem(const Function &F); 111 112 public: 113 static char ID; 114 115 AMDGPUPromoteAlloca() : FunctionPass(ID) {} 116 117 bool doInitialization(Module &M) override; 118 bool runOnFunction(Function &F) override; 119 120 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } 121 122 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 123 124 void getAnalysisUsage(AnalysisUsage &AU) const override { 125 AU.setPreservesCFG(); 126 FunctionPass::getAnalysisUsage(AU); 127 } 128 }; 129 130 } // end anonymous namespace 131 132 char AMDGPUPromoteAlloca::ID = 0; 133 134 INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE, 135 "AMDGPU promote alloca to vector or LDS", false, false) 136 137 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; 138 139 bool AMDGPUPromoteAlloca::doInitialization(Module &M) { 140 Mod = &M; 141 DL = &Mod->getDataLayout(); 142 143 return false; 144 } 145 146 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { 147 if (skipFunction(F)) 148 return false; 149 150 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) 151 TM = &TPC->getTM<TargetMachine>(); 152 else 153 return false; 154 155 const Triple &TT = TM->getTargetTriple(); 156 IsAMDGCN = TT.getArch() == Triple::amdgcn; 157 IsAMDHSA = TT.getOS() == Triple::AMDHSA; 158 159 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F); 160 if (!ST.isPromoteAllocaEnabled()) 161 return false; 162 163 bool SufficientLDS = hasSufficientLocalMem(F); 164 bool Changed = false; 165 BasicBlock &EntryBB = *F.begin(); 166 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) { 167 AllocaInst *AI = dyn_cast<AllocaInst>(I); 168 169 ++I; 170 if (AI) 171 Changed |= handleAlloca(*AI, SufficientLDS); 172 } 173 174 return Changed; 175 } 176 177 std::pair<Value *, Value *> 178 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) { 179 const Function &F = *Builder.GetInsertBlock()->getParent(); 180 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F); 181 182 if (!IsAMDHSA) { 183 Function *LocalSizeYFn 184 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); 185 Function *LocalSizeZFn 186 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); 187 188 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); 189 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); 190 191 ST.makeLIDRangeMetadata(LocalSizeY); 192 ST.makeLIDRangeMetadata(LocalSizeZ); 193 194 return std::make_pair(LocalSizeY, LocalSizeZ); 195 } 196 197 // We must read the size out of the dispatch pointer. 198 assert(IsAMDGCN); 199 200 // We are indexing into this struct, and want to extract the workgroup_size_* 201 // fields. 202 // 203 // typedef struct hsa_kernel_dispatch_packet_s { 204 // uint16_t header; 205 // uint16_t setup; 206 // uint16_t workgroup_size_x ; 207 // uint16_t workgroup_size_y; 208 // uint16_t workgroup_size_z; 209 // uint16_t reserved0; 210 // uint32_t grid_size_x ; 211 // uint32_t grid_size_y ; 212 // uint32_t grid_size_z; 213 // 214 // uint32_t private_segment_size; 215 // uint32_t group_segment_size; 216 // uint64_t kernel_object; 217 // 218 // #ifdef HSA_LARGE_MODEL 219 // void *kernarg_address; 220 // #elif defined HSA_LITTLE_ENDIAN 221 // void *kernarg_address; 222 // uint32_t reserved1; 223 // #else 224 // uint32_t reserved1; 225 // void *kernarg_address; 226 // #endif 227 // uint64_t reserved2; 228 // hsa_signal_t completion_signal; // uint64_t wrapper 229 // } hsa_kernel_dispatch_packet_t 230 // 231 Function *DispatchPtrFn 232 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); 233 234 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); 235 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 236 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 237 238 // Size of the dispatch packet struct. 239 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64); 240 241 Type *I32Ty = Type::getInt32Ty(Mod->getContext()); 242 Value *CastDispatchPtr = Builder.CreateBitCast( 243 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS)); 244 245 // We could do a single 64-bit load here, but it's likely that the basic 246 // 32-bit and extract sequence is already present, and it is probably easier 247 // to CSE this. The loads should be mergable later anyway. 248 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1); 249 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4); 250 251 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2); 252 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4); 253 254 MDNode *MD = MDNode::get(Mod->getContext(), None); 255 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); 256 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); 257 ST.makeLIDRangeMetadata(LoadZU); 258 259 // Extract y component. Upper half of LoadZU should be zero already. 260 Value *Y = Builder.CreateLShr(LoadXY, 16); 261 262 return std::make_pair(Y, LoadZU); 263 } 264 265 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) { 266 const AMDGPUSubtarget &ST = 267 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent()); 268 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic; 269 270 switch (N) { 271 case 0: 272 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x 273 : Intrinsic::r600_read_tidig_x; 274 break; 275 case 1: 276 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y 277 : Intrinsic::r600_read_tidig_y; 278 break; 279 280 case 2: 281 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z 282 : Intrinsic::r600_read_tidig_z; 283 break; 284 default: 285 llvm_unreachable("invalid dimension"); 286 } 287 288 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); 289 CallInst *CI = Builder.CreateCall(WorkitemIdFn); 290 ST.makeLIDRangeMetadata(CI); 291 292 return CI; 293 } 294 295 static VectorType *arrayTypeToVecType(ArrayType *ArrayTy) { 296 return VectorType::get(ArrayTy->getElementType(), 297 ArrayTy->getNumElements()); 298 } 299 300 static Value * 301 calculateVectorIndex(Value *Ptr, 302 const std::map<GetElementPtrInst *, Value *> &GEPIdx) { 303 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr); 304 305 auto I = GEPIdx.find(GEP); 306 return I == GEPIdx.end() ? nullptr : I->second; 307 } 308 309 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { 310 // FIXME we only support simple cases 311 if (GEP->getNumOperands() != 3) 312 return nullptr; 313 314 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); 315 if (!I0 || !I0->isZero()) 316 return nullptr; 317 318 return GEP->getOperand(2); 319 } 320 321 // Not an instruction handled below to turn into a vector. 322 // 323 // TODO: Check isTriviallyVectorizable for calls and handle other 324 // instructions. 325 static bool canVectorizeInst(Instruction *Inst, User *User) { 326 switch (Inst->getOpcode()) { 327 case Instruction::Load: { 328 // Currently only handle the case where the Pointer Operand is a GEP. 329 // Also we could not vectorize volatile or atomic loads. 330 LoadInst *LI = cast<LoadInst>(Inst); 331 if (isa<AllocaInst>(User) && 332 LI->getPointerOperandType() == User->getType() && 333 isa<VectorType>(LI->getType())) 334 return true; 335 return isa<GetElementPtrInst>(LI->getPointerOperand()) && LI->isSimple(); 336 } 337 case Instruction::BitCast: 338 return true; 339 case Instruction::Store: { 340 // Must be the stored pointer operand, not a stored value, plus 341 // since it should be canonical form, the User should be a GEP. 342 // Also we could not vectorize volatile or atomic stores. 343 StoreInst *SI = cast<StoreInst>(Inst); 344 if (isa<AllocaInst>(User) && 345 SI->getPointerOperandType() == User->getType() && 346 isa<VectorType>(SI->getValueOperand()->getType())) 347 return true; 348 return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && SI->isSimple(); 349 } 350 default: 351 return false; 352 } 353 } 354 355 static bool tryPromoteAllocaToVector(AllocaInst *Alloca) { 356 357 if (DisablePromoteAllocaToVector) { 358 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n"); 359 return false; 360 } 361 362 Type *AT = Alloca->getAllocatedType(); 363 SequentialType *AllocaTy = dyn_cast<SequentialType>(AT); 364 365 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n"); 366 367 // FIXME: There is no reason why we can't support larger arrays, we 368 // are just being conservative for now. 369 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these 370 // could also be promoted but we don't currently handle this case 371 if (!AllocaTy || 372 AllocaTy->getNumElements() > 16 || 373 AllocaTy->getNumElements() < 2 || 374 !VectorType::isValidElementType(AllocaTy->getElementType())) { 375 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); 376 return false; 377 } 378 379 std::map<GetElementPtrInst*, Value*> GEPVectorIdx; 380 std::vector<Value*> WorkList; 381 for (User *AllocaUser : Alloca->users()) { 382 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); 383 if (!GEP) { 384 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca)) 385 return false; 386 387 WorkList.push_back(AllocaUser); 388 continue; 389 } 390 391 Value *Index = GEPToVectorIndex(GEP); 392 393 // If we can't compute a vector index from this GEP, then we can't 394 // promote this alloca to vector. 395 if (!Index) { 396 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP 397 << '\n'); 398 return false; 399 } 400 401 GEPVectorIdx[GEP] = Index; 402 for (User *GEPUser : AllocaUser->users()) { 403 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser)) 404 return false; 405 406 WorkList.push_back(GEPUser); 407 } 408 } 409 410 VectorType *VectorTy = dyn_cast<VectorType>(AllocaTy); 411 if (!VectorTy) 412 VectorTy = arrayTypeToVecType(cast<ArrayType>(AllocaTy)); 413 414 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> " 415 << *VectorTy << '\n'); 416 417 for (Value *V : WorkList) { 418 Instruction *Inst = cast<Instruction>(V); 419 IRBuilder<> Builder(Inst); 420 switch (Inst->getOpcode()) { 421 case Instruction::Load: { 422 if (Inst->getType() == AT) 423 break; 424 425 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 426 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); 427 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 428 429 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 430 Value *VecValue = Builder.CreateLoad(BitCast); 431 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); 432 Inst->replaceAllUsesWith(ExtractElement); 433 Inst->eraseFromParent(); 434 break; 435 } 436 case Instruction::Store: { 437 StoreInst *SI = cast<StoreInst>(Inst); 438 if (SI->getValueOperand()->getType() == AT) 439 break; 440 441 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 442 Value *Ptr = SI->getPointerOperand(); 443 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 444 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 445 Value *VecValue = Builder.CreateLoad(BitCast); 446 Value *NewVecValue = Builder.CreateInsertElement(VecValue, 447 SI->getValueOperand(), 448 Index); 449 Builder.CreateStore(NewVecValue, BitCast); 450 Inst->eraseFromParent(); 451 break; 452 } 453 case Instruction::BitCast: 454 case Instruction::AddrSpaceCast: 455 break; 456 457 default: 458 llvm_unreachable("Inconsistency in instructions promotable to vector"); 459 } 460 } 461 return true; 462 } 463 464 static bool isCallPromotable(CallInst *CI) { 465 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 466 if (!II) 467 return false; 468 469 switch (II->getIntrinsicID()) { 470 case Intrinsic::memcpy: 471 case Intrinsic::memmove: 472 case Intrinsic::memset: 473 case Intrinsic::lifetime_start: 474 case Intrinsic::lifetime_end: 475 case Intrinsic::invariant_start: 476 case Intrinsic::invariant_end: 477 case Intrinsic::launder_invariant_group: 478 case Intrinsic::strip_invariant_group: 479 case Intrinsic::objectsize: 480 return true; 481 default: 482 return false; 483 } 484 } 485 486 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca, 487 Value *Val, 488 Instruction *Inst, 489 int OpIdx0, 490 int OpIdx1) const { 491 // Figure out which operand is the one we might not be promoting. 492 Value *OtherOp = Inst->getOperand(OpIdx0); 493 if (Val == OtherOp) 494 OtherOp = Inst->getOperand(OpIdx1); 495 496 if (isa<ConstantPointerNull>(OtherOp)) 497 return true; 498 499 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL); 500 if (!isa<AllocaInst>(OtherObj)) 501 return false; 502 503 // TODO: We should be able to replace undefs with the right pointer type. 504 505 // TODO: If we know the other base object is another promotable 506 // alloca, not necessarily this alloca, we can do this. The 507 // important part is both must have the same address space at 508 // the end. 509 if (OtherObj != BaseAlloca) { 510 LLVM_DEBUG( 511 dbgs() << "Found a binary instruction with another alloca object\n"); 512 return false; 513 } 514 515 return true; 516 } 517 518 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes( 519 Value *BaseAlloca, 520 Value *Val, 521 std::vector<Value*> &WorkList) const { 522 523 for (User *User : Val->users()) { 524 if (is_contained(WorkList, User)) 525 continue; 526 527 if (CallInst *CI = dyn_cast<CallInst>(User)) { 528 if (!isCallPromotable(CI)) 529 return false; 530 531 WorkList.push_back(User); 532 continue; 533 } 534 535 Instruction *UseInst = cast<Instruction>(User); 536 if (UseInst->getOpcode() == Instruction::PtrToInt) 537 return false; 538 539 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { 540 if (LI->isVolatile()) 541 return false; 542 543 continue; 544 } 545 546 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { 547 if (SI->isVolatile()) 548 return false; 549 550 // Reject if the stored value is not the pointer operand. 551 if (SI->getPointerOperand() != Val) 552 return false; 553 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { 554 if (RMW->isVolatile()) 555 return false; 556 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { 557 if (CAS->isVolatile()) 558 return false; 559 } 560 561 // Only promote a select if we know that the other select operand 562 // is from another pointer that will also be promoted. 563 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { 564 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) 565 return false; 566 567 // May need to rewrite constant operands. 568 WorkList.push_back(ICmp); 569 } 570 571 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { 572 // Give up if the pointer may be captured. 573 if (PointerMayBeCaptured(UseInst, true, true)) 574 return false; 575 // Don't collect the users of this. 576 WorkList.push_back(User); 577 continue; 578 } 579 580 if (!User->getType()->isPointerTy()) 581 continue; 582 583 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { 584 // Be conservative if an address could be computed outside the bounds of 585 // the alloca. 586 if (!GEP->isInBounds()) 587 return false; 588 } 589 590 // Only promote a select if we know that the other select operand is from 591 // another pointer that will also be promoted. 592 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { 593 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) 594 return false; 595 } 596 597 // Repeat for phis. 598 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { 599 // TODO: Handle more complex cases. We should be able to replace loops 600 // over arrays. 601 switch (Phi->getNumIncomingValues()) { 602 case 1: 603 break; 604 case 2: 605 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) 606 return false; 607 break; 608 default: 609 return false; 610 } 611 } 612 613 WorkList.push_back(User); 614 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) 615 return false; 616 } 617 618 return true; 619 } 620 621 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) { 622 623 FunctionType *FTy = F.getFunctionType(); 624 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F); 625 626 // If the function has any arguments in the local address space, then it's 627 // possible these arguments require the entire local memory space, so 628 // we cannot use local memory in the pass. 629 for (Type *ParamTy : FTy->params()) { 630 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); 631 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 632 LocalMemLimit = 0; 633 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to " 634 "local memory disabled.\n"); 635 return false; 636 } 637 } 638 639 LocalMemLimit = ST.getLocalMemorySize(); 640 if (LocalMemLimit == 0) 641 return false; 642 643 const DataLayout &DL = Mod->getDataLayout(); 644 645 // Check how much local memory is being used by global objects 646 CurrentLocalMemUsage = 0; 647 for (GlobalVariable &GV : Mod->globals()) { 648 if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) 649 continue; 650 651 for (const User *U : GV.users()) { 652 const Instruction *Use = dyn_cast<Instruction>(U); 653 if (!Use) 654 continue; 655 656 if (Use->getParent()->getParent() == &F) { 657 unsigned Align = GV.getAlignment(); 658 if (Align == 0) 659 Align = DL.getABITypeAlignment(GV.getValueType()); 660 661 // FIXME: Try to account for padding here. The padding is currently 662 // determined from the inverse order of uses in the function. I'm not 663 // sure if the use list order is in any way connected to this, so the 664 // total reported size is likely incorrect. 665 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType()); 666 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align); 667 CurrentLocalMemUsage += AllocSize; 668 break; 669 } 670 } 671 } 672 673 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, 674 F); 675 676 // Restrict local memory usage so that we don't drastically reduce occupancy, 677 // unless it is already significantly reduced. 678 679 // TODO: Have some sort of hint or other heuristics to guess occupancy based 680 // on other factors.. 681 unsigned OccupancyHint = ST.getWavesPerEU(F).second; 682 if (OccupancyHint == 0) 683 OccupancyHint = 7; 684 685 // Clamp to max value. 686 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); 687 688 // Check the hint but ignore it if it's obviously wrong from the existing LDS 689 // usage. 690 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); 691 692 693 // Round up to the next tier of usage. 694 unsigned MaxSizeWithWaveCount 695 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); 696 697 // Program is possibly broken by using more local mem than available. 698 if (CurrentLocalMemUsage > MaxSizeWithWaveCount) 699 return false; 700 701 LocalMemLimit = MaxSizeWithWaveCount; 702 703 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage 704 << " bytes of LDS\n" 705 << " Rounding size to " << MaxSizeWithWaveCount 706 << " with a maximum occupancy of " << MaxOccupancy << '\n' 707 << " and " << (LocalMemLimit - CurrentLocalMemUsage) 708 << " available for promotion\n"); 709 710 return true; 711 } 712 713 // FIXME: Should try to pick the most likely to be profitable allocas first. 714 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) { 715 // Array allocations are probably not worth handling, since an allocation of 716 // the array type is the canonical form. 717 if (!I.isStaticAlloca() || I.isArrayAllocation()) 718 return false; 719 720 IRBuilder<> Builder(&I); 721 722 // First try to replace the alloca with a vector 723 Type *AllocaTy = I.getAllocatedType(); 724 725 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 726 727 if (tryPromoteAllocaToVector(&I)) 728 return true; // Promoted to vector. 729 730 if (DisablePromoteAllocaToLDS) 731 return false; 732 733 const Function &ContainingFunction = *I.getParent()->getParent(); 734 CallingConv::ID CC = ContainingFunction.getCallingConv(); 735 736 // Don't promote the alloca to LDS for shader calling conventions as the work 737 // item ID intrinsics are not supported for these calling conventions. 738 // Furthermore not all LDS is available for some of the stages. 739 switch (CC) { 740 case CallingConv::AMDGPU_KERNEL: 741 case CallingConv::SPIR_KERNEL: 742 break; 743 default: 744 LLVM_DEBUG( 745 dbgs() 746 << " promote alloca to LDS not supported with calling convention.\n"); 747 return false; 748 } 749 750 // Not likely to have sufficient local memory for promotion. 751 if (!SufficientLDS) 752 return false; 753 754 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction); 755 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; 756 757 const DataLayout &DL = Mod->getDataLayout(); 758 759 unsigned Align = I.getAlignment(); 760 if (Align == 0) 761 Align = DL.getABITypeAlignment(I.getAllocatedType()); 762 763 // FIXME: This computed padding is likely wrong since it depends on inverse 764 // usage order. 765 // 766 // FIXME: It is also possible that if we're allowed to use all of the memory 767 // could could end up using more than the maximum due to alignment padding. 768 769 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align); 770 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); 771 NewSize += AllocSize; 772 773 if (NewSize > LocalMemLimit) { 774 LLVM_DEBUG(dbgs() << " " << AllocSize 775 << " bytes of local memory not available to promote\n"); 776 return false; 777 } 778 779 CurrentLocalMemUsage = NewSize; 780 781 std::vector<Value*> WorkList; 782 783 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { 784 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n"); 785 return false; 786 } 787 788 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n"); 789 790 Function *F = I.getParent()->getParent(); 791 792 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); 793 GlobalVariable *GV = new GlobalVariable( 794 *Mod, GVTy, false, GlobalValue::InternalLinkage, 795 UndefValue::get(GVTy), 796 Twine(F->getName()) + Twine('.') + I.getName(), 797 nullptr, 798 GlobalVariable::NotThreadLocal, 799 AMDGPUAS::LOCAL_ADDRESS); 800 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 801 GV->setAlignment(I.getAlignment()); 802 803 Value *TCntY, *TCntZ; 804 805 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); 806 Value *TIdX = getWorkitemID(Builder, 0); 807 Value *TIdY = getWorkitemID(Builder, 1); 808 Value *TIdZ = getWorkitemID(Builder, 2); 809 810 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); 811 Tmp0 = Builder.CreateMul(Tmp0, TIdX); 812 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); 813 Value *TID = Builder.CreateAdd(Tmp0, Tmp1); 814 TID = Builder.CreateAdd(TID, TIdZ); 815 816 Value *Indices[] = { 817 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), 818 TID 819 }; 820 821 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); 822 I.mutateType(Offset->getType()); 823 I.replaceAllUsesWith(Offset); 824 I.eraseFromParent(); 825 826 for (Value *V : WorkList) { 827 CallInst *Call = dyn_cast<CallInst>(V); 828 if (!Call) { 829 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { 830 Value *Src0 = CI->getOperand(0); 831 Type *EltTy = Src0->getType()->getPointerElementType(); 832 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); 833 834 if (isa<ConstantPointerNull>(CI->getOperand(0))) 835 CI->setOperand(0, ConstantPointerNull::get(NewTy)); 836 837 if (isa<ConstantPointerNull>(CI->getOperand(1))) 838 CI->setOperand(1, ConstantPointerNull::get(NewTy)); 839 840 continue; 841 } 842 843 // The operand's value should be corrected on its own and we don't want to 844 // touch the users. 845 if (isa<AddrSpaceCastInst>(V)) 846 continue; 847 848 Type *EltTy = V->getType()->getPointerElementType(); 849 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); 850 851 // FIXME: It doesn't really make sense to try to do this for all 852 // instructions. 853 V->mutateType(NewTy); 854 855 // Adjust the types of any constant operands. 856 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 857 if (isa<ConstantPointerNull>(SI->getOperand(1))) 858 SI->setOperand(1, ConstantPointerNull::get(NewTy)); 859 860 if (isa<ConstantPointerNull>(SI->getOperand(2))) 861 SI->setOperand(2, ConstantPointerNull::get(NewTy)); 862 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { 863 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 864 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) 865 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); 866 } 867 } 868 869 continue; 870 } 871 872 IntrinsicInst *Intr = cast<IntrinsicInst>(Call); 873 Builder.SetInsertPoint(Intr); 874 switch (Intr->getIntrinsicID()) { 875 case Intrinsic::lifetime_start: 876 case Intrinsic::lifetime_end: 877 // These intrinsics are for address space 0 only 878 Intr->eraseFromParent(); 879 continue; 880 case Intrinsic::memcpy: { 881 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); 882 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(), 883 MemCpy->getRawSource(), MemCpy->getSourceAlignment(), 884 MemCpy->getLength(), MemCpy->isVolatile()); 885 Intr->eraseFromParent(); 886 continue; 887 } 888 case Intrinsic::memmove: { 889 MemMoveInst *MemMove = cast<MemMoveInst>(Intr); 890 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(), 891 MemMove->getRawSource(), MemMove->getSourceAlignment(), 892 MemMove->getLength(), MemMove->isVolatile()); 893 Intr->eraseFromParent(); 894 continue; 895 } 896 case Intrinsic::memset: { 897 MemSetInst *MemSet = cast<MemSetInst>(Intr); 898 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), 899 MemSet->getLength(), MemSet->getDestAlignment(), 900 MemSet->isVolatile()); 901 Intr->eraseFromParent(); 902 continue; 903 } 904 case Intrinsic::invariant_start: 905 case Intrinsic::invariant_end: 906 case Intrinsic::launder_invariant_group: 907 case Intrinsic::strip_invariant_group: 908 Intr->eraseFromParent(); 909 // FIXME: I think the invariant marker should still theoretically apply, 910 // but the intrinsics need to be changed to accept pointers with any 911 // address space. 912 continue; 913 case Intrinsic::objectsize: { 914 Value *Src = Intr->getOperand(0); 915 Type *SrcTy = Src->getType()->getPointerElementType(); 916 Function *ObjectSize = Intrinsic::getDeclaration(Mod, 917 Intrinsic::objectsize, 918 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) } 919 ); 920 921 CallInst *NewCall = Builder.CreateCall( 922 ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)}); 923 Intr->replaceAllUsesWith(NewCall); 924 Intr->eraseFromParent(); 925 continue; 926 } 927 default: 928 Intr->print(errs()); 929 llvm_unreachable("Don't know how to promote alloca intrinsic use."); 930 } 931 } 932 return true; 933 } 934 935 FunctionPass *llvm::createAMDGPUPromoteAlloca() { 936 return new AMDGPUPromoteAlloca(); 937 } 938