1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Eliminates allocas by either converting them into vectors or by migrating
10 // them to local address space.
11 //
12 // Two passes are exposed by this file:
13 //    - "promote-alloca-to-vector", which runs early in the pipeline and only
14 //      promotes to vector. Promotion to vector is almost always profitable
15 //      except when the alloca is too big and the promotion would result in
16 //      very high register pressure.
17 //    - "promote-alloca", which does both promotion to vector and LDS and runs
18 //      much later in the pipeline. This runs after SROA because promoting to
19 //      LDS is of course less profitable than getting rid of the alloca or
20 //      vectorizing it, thus we only want to do it when the only alternative is
21 //      lowering the alloca to stack.
22 //
23 // Note that both of them exist for the old and new PMs. The new PM passes are
24 // declared in AMDGPU.h and the legacy PM ones are declared here.s
25 //
26 //===----------------------------------------------------------------------===//
27 
28 #include "AMDGPU.h"
29 #include "GCNSubtarget.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Analysis/CaptureTracking.h"
33 #include "llvm/Analysis/InstSimplifyFolder.h"
34 #include "llvm/Analysis/InstructionSimplify.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/IntrinsicsAMDGPU.h"
40 #include "llvm/IR/IntrinsicsR600.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Transforms/Utils/SSAUpdater.h"
45 
46 #define DEBUG_TYPE "amdgpu-promote-alloca"
47 
48 using namespace llvm;
49 
50 namespace {
51 
52 static cl::opt<bool>
53     DisablePromoteAllocaToVector("disable-promote-alloca-to-vector",
54                                  cl::desc("Disable promote alloca to vector"),
55                                  cl::init(false));
56 
57 static cl::opt<bool>
58     DisablePromoteAllocaToLDS("disable-promote-alloca-to-lds",
59                               cl::desc("Disable promote alloca to LDS"),
60                               cl::init(false));
61 
62 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
63     "amdgpu-promote-alloca-to-vector-limit",
64     cl::desc("Maximum byte size to consider promote alloca to vector"),
65     cl::init(0));
66 
67 // Shared implementation which can do both promotion to vector and to LDS.
68 class AMDGPUPromoteAllocaImpl {
69 private:
70   const TargetMachine &TM;
71   Module *Mod = nullptr;
72   const DataLayout *DL = nullptr;
73 
74   // FIXME: This should be per-kernel.
75   uint32_t LocalMemLimit = 0;
76   uint32_t CurrentLocalMemUsage = 0;
77   unsigned MaxVGPRs;
78 
79   bool IsAMDGCN = false;
80   bool IsAMDHSA = false;
81 
82   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
83   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
84 
85   /// BaseAlloca is the alloca root the search started from.
86   /// Val may be that alloca or a recursive user of it.
87   bool collectUsesWithPtrTypes(Value *BaseAlloca, Value *Val,
88                                std::vector<Value *> &WorkList) const;
89 
90   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92   /// Returns true if both operands are derived from the same alloca. Val should
93   /// be the same value as one of the input operands of UseInst.
94   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95                                        Instruction *UseInst, int OpIdx0,
96                                        int OpIdx1) const;
97 
98   /// Check whether we have enough local memory for promotion.
99   bool hasSufficientLocalMem(const Function &F);
100 
101   bool tryPromoteAllocaToVector(AllocaInst &I);
102   bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
103 
104 public:
105   AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {
106     const Triple &TT = TM.getTargetTriple();
107     IsAMDGCN = TT.getArch() == Triple::amdgcn;
108     IsAMDHSA = TT.getOS() == Triple::AMDHSA;
109   }
110 
111   bool run(Function &F, bool PromoteToLDS);
112 };
113 
114 // FIXME: This can create globals so should be a module pass.
115 class AMDGPUPromoteAlloca : public FunctionPass {
116 public:
117   static char ID;
118 
119   AMDGPUPromoteAlloca() : FunctionPass(ID) {}
120 
121   bool runOnFunction(Function &F) override {
122     if (skipFunction(F))
123       return false;
124     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
125       return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
126           .run(F, /*PromoteToLDS*/ true);
127     return false;
128   }
129 
130   StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
131 
132   void getAnalysisUsage(AnalysisUsage &AU) const override {
133     AU.setPreservesCFG();
134     FunctionPass::getAnalysisUsage(AU);
135   }
136 };
137 
138 class AMDGPUPromoteAllocaToVector : public FunctionPass {
139 public:
140   static char ID;
141 
142   AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
143 
144   bool runOnFunction(Function &F) override {
145     if (skipFunction(F))
146       return false;
147     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
148       return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
149           .run(F, /*PromoteToLDS*/ false);
150     return false;
151   }
152 
153   StringRef getPassName() const override {
154     return "AMDGPU Promote Alloca to vector";
155   }
156 
157   void getAnalysisUsage(AnalysisUsage &AU) const override {
158     AU.setPreservesCFG();
159     FunctionPass::getAnalysisUsage(AU);
160   }
161 };
162 
163 unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
164   if (!TM.getTargetTriple().isAMDGCN())
165     return 128;
166 
167   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
168   unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
169 
170   // A non-entry function has only 32 caller preserved registers.
171   // Do not promote alloca which will force spilling unless we know the function
172   // will be inlined.
173   if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
174       !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
175     MaxVGPRs = std::min(MaxVGPRs, 32u);
176   return MaxVGPRs;
177 }
178 
179 } // end anonymous namespace
180 
181 char AMDGPUPromoteAlloca::ID = 0;
182 char AMDGPUPromoteAllocaToVector::ID = 0;
183 
184 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
185                       "AMDGPU promote alloca to vector or LDS", false, false)
186 // Move LDS uses from functions to kernels before promote alloca for accurate
187 // estimation of LDS available
188 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDSLegacy)
189 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
190                     "AMDGPU promote alloca to vector or LDS", false, false)
191 
192 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
193                 "AMDGPU promote alloca to vector", false, false)
194 
195 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
196 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
197 
198 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
199                                                FunctionAnalysisManager &AM) {
200   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ true);
201   if (Changed) {
202     PreservedAnalyses PA;
203     PA.preserveSet<CFGAnalyses>();
204     return PA;
205   }
206   return PreservedAnalyses::all();
207 }
208 
209 PreservedAnalyses
210 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
211   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ false);
212   if (Changed) {
213     PreservedAnalyses PA;
214     PA.preserveSet<CFGAnalyses>();
215     return PA;
216   }
217   return PreservedAnalyses::all();
218 }
219 
220 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
221   return new AMDGPUPromoteAlloca();
222 }
223 
224 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
225   return new AMDGPUPromoteAllocaToVector();
226 }
227 
228 bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
229   Mod = F.getParent();
230   DL = &Mod->getDataLayout();
231 
232   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
233   if (!ST.isPromoteAllocaEnabled())
234     return false;
235 
236   MaxVGPRs = getMaxVGPRs(TM, F);
237 
238   bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
239 
240   SmallVector<AllocaInst *, 16> Allocas;
241   for (Instruction &I : F.getEntryBlock()) {
242     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
243       // Array allocations are probably not worth handling, since an allocation
244       // of the array type is the canonical form.
245       if (!AI->isStaticAlloca() || AI->isArrayAllocation())
246         continue;
247       Allocas.push_back(AI);
248     }
249   }
250 
251   bool Changed = false;
252   for (AllocaInst *AI : Allocas) {
253     if (tryPromoteAllocaToVector(*AI))
254       Changed = true;
255     else if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
256       Changed = true;
257   }
258 
259   // NOTE: tryPromoteAllocaToVector removes the alloca, so Allocas contains
260   // dangling pointers. If we want to reuse it past this point, the loop above
261   // would need to be updated to remove successfully promoted allocas.
262 
263   return Changed;
264 }
265 
266 struct MemTransferInfo {
267   ConstantInt *SrcIndex = nullptr;
268   ConstantInt *DestIndex = nullptr;
269 };
270 
271 // Checks if the instruction I is a memset user of the alloca AI that we can
272 // deal with. Currently, only non-volatile memsets that affect the whole alloca
273 // are handled.
274 static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI,
275                               const DataLayout &DL) {
276   using namespace PatternMatch;
277   // For now we only care about non-volatile memsets that affect the whole type
278   // (start at index 0 and fill the whole alloca).
279   //
280   // TODO: Now that we moved to PromoteAlloca we could handle any memsets
281   // (except maybe volatile ones?) - we just need to use shufflevector if it
282   // only affects a subset of the vector.
283   const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
284   return I->getOperand(0) == AI &&
285          match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
286 }
287 
288 static Value *
289 calculateVectorIndex(Value *Ptr,
290                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
291   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
292   if (!GEP)
293     return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
294 
295   auto I = GEPIdx.find(GEP);
296   assert(I != GEPIdx.end() && "Must have entry for GEP!");
297   return I->second;
298 }
299 
300 static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
301                                Type *VecElemTy, const DataLayout &DL) {
302   // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
303   // helper.
304   unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
305   MapVector<Value *, APInt> VarOffsets;
306   APInt ConstOffset(BW, 0);
307   if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
308       !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
309     return nullptr;
310 
311   unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
312   if (VarOffsets.size() > 1)
313     return nullptr;
314 
315   if (VarOffsets.size() == 1) {
316     // Only handle cases where we don't need to insert extra arithmetic
317     // instructions.
318     const auto &VarOffset = VarOffsets.front();
319     if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
320       return nullptr;
321     return VarOffset.first;
322   }
323 
324   APInt Quot;
325   uint64_t Rem;
326   APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
327   if (Rem != 0)
328     return nullptr;
329 
330   return ConstantInt::get(GEP->getContext(), Quot);
331 }
332 
333 /// Promotes a single user of the alloca to a vector form.
334 ///
335 /// \param Inst           Instruction to be promoted.
336 /// \param DL             Module Data Layout.
337 /// \param VectorTy       Vectorized Type.
338 /// \param VecStoreSize   Size of \p VectorTy in bytes.
339 /// \param ElementSize    Size of \p VectorTy element type in bytes.
340 /// \param TransferInfo   MemTransferInst info map.
341 /// \param GEPVectorIdx   GEP -> VectorIdx cache.
342 /// \param CurVal         Current value of the vector (e.g. last stored value)
343 /// \param[out]  DeferredLoads \p Inst is added to this vector if it can't
344 ///              be promoted now. This happens when promoting requires \p
345 ///              CurVal, but \p CurVal is nullptr.
346 /// \return the stored value if \p Inst would have written to the alloca, or
347 ///         nullptr otherwise.
348 static Value *promoteAllocaUserToVector(
349     Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy,
350     unsigned VecStoreSize, unsigned ElementSize,
351     DenseMap<MemTransferInst *, MemTransferInfo> &TransferInfo,
352     std::map<GetElementPtrInst *, Value *> &GEPVectorIdx, Value *CurVal,
353     SmallVectorImpl<LoadInst *> &DeferredLoads) {
354   // Note: we use InstSimplifyFolder because it can leverage the DataLayout
355   // to do more folding, especially in the case of vector splats.
356   IRBuilder<InstSimplifyFolder> Builder(Inst->getContext(),
357                                         InstSimplifyFolder(DL));
358   Builder.SetInsertPoint(Inst);
359 
360   const auto GetOrLoadCurrentVectorValue = [&]() -> Value * {
361     if (CurVal)
362       return CurVal;
363 
364     // If the current value is not known, insert a dummy load and lower it on
365     // the second pass.
366     LoadInst *Dummy =
367         Builder.CreateLoad(VectorTy, PoisonValue::get(Builder.getPtrTy()),
368                            "promotealloca.dummyload");
369     DeferredLoads.push_back(Dummy);
370     return Dummy;
371   };
372 
373   const auto CreateTempPtrIntCast = [&Builder, DL](Value *Val,
374                                                    Type *PtrTy) -> Value * {
375     assert(DL.getTypeStoreSize(Val->getType()) == DL.getTypeStoreSize(PtrTy));
376     const unsigned Size = DL.getTypeStoreSizeInBits(PtrTy);
377     if (!PtrTy->isVectorTy())
378       return Builder.CreateBitOrPointerCast(Val, Builder.getIntNTy(Size));
379     const unsigned NumPtrElts = cast<FixedVectorType>(PtrTy)->getNumElements();
380     // If we want to cast to cast, e.g. a <2 x ptr> into a <4 x i32>, we need to
381     // first cast the ptr vector to <2 x i64>.
382     assert((Size % NumPtrElts == 0) && "Vector size not divisble");
383     Type *EltTy = Builder.getIntNTy(Size / NumPtrElts);
384     return Builder.CreateBitOrPointerCast(
385         Val, FixedVectorType::get(EltTy, NumPtrElts));
386   };
387 
388   Type *VecEltTy = VectorTy->getElementType();
389 
390   switch (Inst->getOpcode()) {
391   case Instruction::Load: {
392     // Loads can only be lowered if the value is known.
393     if (!CurVal) {
394       DeferredLoads.push_back(cast<LoadInst>(Inst));
395       return nullptr;
396     }
397 
398     Value *Index = calculateVectorIndex(
399         cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx);
400 
401     // We're loading the full vector.
402     Type *AccessTy = Inst->getType();
403     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
404     if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
405       if (AccessTy->isPtrOrPtrVectorTy())
406         CurVal = CreateTempPtrIntCast(CurVal, AccessTy);
407       else if (CurVal->getType()->isPtrOrPtrVectorTy())
408         CurVal = CreateTempPtrIntCast(CurVal, CurVal->getType());
409       Value *NewVal = Builder.CreateBitOrPointerCast(CurVal, AccessTy);
410       Inst->replaceAllUsesWith(NewVal);
411       return nullptr;
412     }
413 
414     // Loading a subvector.
415     if (isa<FixedVectorType>(AccessTy)) {
416       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
417       const unsigned NumLoadedElts = AccessSize / DL.getTypeStoreSize(VecEltTy);
418       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumLoadedElts);
419       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
420 
421       Value *SubVec = PoisonValue::get(SubVecTy);
422       for (unsigned K = 0; K < NumLoadedElts; ++K) {
423         Value *CurIdx =
424             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
425         SubVec = Builder.CreateInsertElement(
426             SubVec, Builder.CreateExtractElement(CurVal, CurIdx), K);
427       }
428 
429       if (AccessTy->isPtrOrPtrVectorTy())
430         SubVec = CreateTempPtrIntCast(SubVec, AccessTy);
431       else if (SubVecTy->isPtrOrPtrVectorTy())
432         SubVec = CreateTempPtrIntCast(SubVec, SubVecTy);
433 
434       SubVec = Builder.CreateBitOrPointerCast(SubVec, AccessTy);
435       Inst->replaceAllUsesWith(SubVec);
436       return nullptr;
437     }
438 
439     // We're loading one element.
440     Value *ExtractElement = Builder.CreateExtractElement(CurVal, Index);
441     if (AccessTy != VecEltTy)
442       ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, AccessTy);
443 
444     Inst->replaceAllUsesWith(ExtractElement);
445     return nullptr;
446   }
447   case Instruction::Store: {
448     // For stores, it's a bit trickier and it depends on whether we're storing
449     // the full vector or not. If we're storing the full vector, we don't need
450     // to know the current value. If this is a store of a single element, we
451     // need to know the value.
452     StoreInst *SI = cast<StoreInst>(Inst);
453     Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx);
454     Value *Val = SI->getValueOperand();
455 
456     // We're storing the full vector, we can handle this without knowing CurVal.
457     Type *AccessTy = Val->getType();
458     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
459     if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
460       if (AccessTy->isPtrOrPtrVectorTy())
461         Val = CreateTempPtrIntCast(Val, AccessTy);
462       else if (VectorTy->isPtrOrPtrVectorTy())
463         Val = CreateTempPtrIntCast(Val, VectorTy);
464       return Builder.CreateBitOrPointerCast(Val, VectorTy);
465     }
466 
467     // Storing a subvector.
468     if (isa<FixedVectorType>(AccessTy)) {
469       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
470       const unsigned NumWrittenElts =
471           AccessSize / DL.getTypeStoreSize(VecEltTy);
472       const unsigned NumVecElts = VectorTy->getNumElements();
473       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumWrittenElts);
474       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
475 
476       if (SubVecTy->isPtrOrPtrVectorTy())
477         Val = CreateTempPtrIntCast(Val, SubVecTy);
478       else if (AccessTy->isPtrOrPtrVectorTy())
479         Val = CreateTempPtrIntCast(Val, AccessTy);
480 
481       Val = Builder.CreateBitOrPointerCast(Val, SubVecTy);
482 
483       Value *CurVec = GetOrLoadCurrentVectorValue();
484       for (unsigned K = 0, NumElts = std::min(NumWrittenElts, NumVecElts);
485            K < NumElts; ++K) {
486         Value *CurIdx =
487             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
488         CurVec = Builder.CreateInsertElement(
489             CurVec, Builder.CreateExtractElement(Val, K), CurIdx);
490       }
491       return CurVec;
492     }
493 
494     if (Val->getType() != VecEltTy)
495       Val = Builder.CreateBitOrPointerCast(Val, VecEltTy);
496     return Builder.CreateInsertElement(GetOrLoadCurrentVectorValue(), Val,
497                                        Index);
498   }
499   case Instruction::Call: {
500     if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) {
501       // For memcpy, we need to know curval.
502       ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
503       unsigned NumCopied = Length->getZExtValue() / ElementSize;
504       MemTransferInfo *TI = &TransferInfo[MTI];
505       unsigned SrcBegin = TI->SrcIndex->getZExtValue();
506       unsigned DestBegin = TI->DestIndex->getZExtValue();
507 
508       SmallVector<int> Mask;
509       for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
510         if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
511           Mask.push_back(SrcBegin++);
512         } else {
513           Mask.push_back(Idx);
514         }
515       }
516 
517       return Builder.CreateShuffleVector(GetOrLoadCurrentVectorValue(), Mask);
518     }
519 
520     if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
521       // For memset, we don't need to know the previous value because we
522       // currently only allow memsets that cover the whole alloca.
523       Value *Elt = MSI->getOperand(1);
524       if (DL.getTypeStoreSize(VecEltTy) > 1) {
525         Value *EltBytes =
526             Builder.CreateVectorSplat(DL.getTypeStoreSize(VecEltTy), Elt);
527         Elt = Builder.CreateBitCast(EltBytes, VecEltTy);
528       }
529 
530       return Builder.CreateVectorSplat(VectorTy->getElementCount(), Elt);
531     }
532 
533     llvm_unreachable("Unsupported call when promoting alloca to vector");
534   }
535 
536   default:
537     llvm_unreachable("Inconsistency in instructions promotable to vector");
538   }
539 
540   llvm_unreachable("Did not return after promoting instruction!");
541 }
542 
543 static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy,
544                                   const DataLayout &DL) {
545   // Access as a vector type can work if the size of the access vector is a
546   // multiple of the size of the alloca's vector element type.
547   //
548   // Examples:
549   //    - VecTy = <8 x float>, AccessTy = <4 x float> -> OK
550   //    - VecTy = <4 x double>, AccessTy = <2 x float> -> OK
551   //    - VecTy = <4 x double>, AccessTy = <3 x float> -> NOT OK
552   //        - 3*32 is not a multiple of 64
553   //
554   // We could handle more complicated cases, but it'd make things a lot more
555   // complicated.
556   if (isa<FixedVectorType>(AccessTy)) {
557     TypeSize AccTS = DL.getTypeStoreSize(AccessTy);
558     TypeSize VecTS = DL.getTypeStoreSize(VecTy->getElementType());
559     return AccTS.isKnownMultipleOf(VecTS);
560   }
561 
562   return CastInst::isBitOrNoopPointerCastable(VecTy->getElementType(), AccessTy,
563                                               DL);
564 }
565 
566 /// Iterates over an instruction worklist that may contain multiple instructions
567 /// from the same basic block, but in a different order.
568 template <typename InstContainer>
569 static void forEachWorkListItem(const InstContainer &WorkList,
570                                 std::function<void(Instruction *)> Fn) {
571   // Bucket up uses of the alloca by the block they occur in.
572   // This is important because we have to handle multiple defs/uses in a block
573   // ourselves: SSAUpdater is purely for cross-block references.
574   DenseMap<BasicBlock *, SmallDenseSet<Instruction *>> UsesByBlock;
575   for (Instruction *User : WorkList)
576     UsesByBlock[User->getParent()].insert(User);
577 
578   for (Instruction *User : WorkList) {
579     BasicBlock *BB = User->getParent();
580     auto &BlockUses = UsesByBlock[BB];
581 
582     // Already processed, skip.
583     if (BlockUses.empty())
584       continue;
585 
586     // Only user in the block, directly process it.
587     if (BlockUses.size() == 1) {
588       Fn(User);
589       continue;
590     }
591 
592     // Multiple users in the block, do a linear scan to see users in order.
593     for (Instruction &Inst : *BB) {
594       if (!BlockUses.contains(&Inst))
595         continue;
596 
597       Fn(&Inst);
598     }
599 
600     // Clear the block so we know it's been processed.
601     BlockUses.clear();
602   }
603 }
604 
605 // FIXME: Should try to pick the most likely to be profitable allocas first.
606 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
607   LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
608 
609   if (DisablePromoteAllocaToVector) {
610     LLVM_DEBUG(dbgs() << "  Promote alloca to vector is disabled\n");
611     return false;
612   }
613 
614   Type *AllocaTy = Alloca.getAllocatedType();
615   auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
616   if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
617     if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
618         ArrayTy->getNumElements() > 0)
619       VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
620                                       ArrayTy->getNumElements());
621   }
622 
623   // Use up to 1/4 of available register budget for vectorization.
624   unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
625                                               : (MaxVGPRs * 32);
626 
627   if (DL->getTypeSizeInBits(AllocaTy) * 4 > Limit) {
628     LLVM_DEBUG(dbgs() << "  Alloca too big for vectorization with " << MaxVGPRs
629                       << " registers available\n");
630     return false;
631   }
632 
633   // FIXME: There is no reason why we can't support larger arrays, we
634   // are just being conservative for now.
635   // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
636   // equivalent. Potentially these could also be promoted but we don't currently
637   // handle this case
638   if (!VectorTy) {
639     LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
640     return false;
641   }
642 
643   if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
644     LLVM_DEBUG(dbgs() << "  " << *VectorTy
645                       << " has an unsupported number of elements\n");
646     return false;
647   }
648 
649   std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
650   SmallVector<Instruction *> WorkList;
651   SmallVector<Instruction *> UsersToRemove;
652   SmallVector<Instruction *> DeferredInsts;
653   SmallVector<Use *, 8> Uses;
654   DenseMap<MemTransferInst *, MemTransferInfo> TransferInfo;
655 
656   const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
657     LLVM_DEBUG(dbgs() << "  Cannot promote alloca to vector: " << Msg << "\n"
658                       << "    " << *Inst << "\n");
659     return false;
660   };
661 
662   for (Use &U : Alloca.uses())
663     Uses.push_back(&U);
664 
665   LLVM_DEBUG(dbgs() << "  Attempting promotion to: " << *VectorTy << "\n");
666 
667   Type *VecEltTy = VectorTy->getElementType();
668   unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
669   while (!Uses.empty()) {
670     Use *U = Uses.pop_back_val();
671     Instruction *Inst = cast<Instruction>(U->getUser());
672 
673     if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
674       // This is a store of the pointer, not to the pointer.
675       if (isa<StoreInst>(Inst) &&
676           U->getOperandNo() != StoreInst::getPointerOperandIndex())
677         return RejectUser(Inst, "pointer is being stored");
678 
679       Type *AccessTy = getLoadStoreType(Inst);
680       if (AccessTy->isAggregateType())
681         return RejectUser(Inst, "unsupported load/store as aggregate");
682       assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
683 
684       // Check that this is a simple access of a vector element.
685       bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
686                                           : cast<StoreInst>(Inst)->isSimple();
687       if (!IsSimple)
688         return RejectUser(Inst, "not a simple load or store");
689 
690       Ptr = Ptr->stripPointerCasts();
691 
692       // Alloca already accessed as vector.
693       if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
694                                 DL->getTypeStoreSize(AccessTy)) {
695         WorkList.push_back(Inst);
696         continue;
697       }
698 
699       if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
700         return RejectUser(Inst, "not a supported access type");
701 
702       WorkList.push_back(Inst);
703       continue;
704     }
705 
706     if (isa<BitCastInst>(Inst)) {
707       // Look through bitcasts.
708       for (Use &U : Inst->uses())
709         Uses.push_back(&U);
710       UsersToRemove.push_back(Inst);
711       continue;
712     }
713 
714     if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
715       // If we can't compute a vector index from this GEP, then we can't
716       // promote this alloca to vector.
717       Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
718       if (!Index)
719         return RejectUser(Inst, "cannot compute vector index for GEP");
720 
721       GEPVectorIdx[GEP] = Index;
722       for (Use &U : Inst->uses())
723         Uses.push_back(&U);
724       UsersToRemove.push_back(Inst);
725       continue;
726     }
727 
728     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
729         MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
730       WorkList.push_back(Inst);
731       continue;
732     }
733 
734     if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
735       if (TransferInst->isVolatile())
736         return RejectUser(Inst, "mem transfer inst is volatile");
737 
738       ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
739       if (!Len || (Len->getZExtValue() % ElementSize))
740         return RejectUser(Inst, "mem transfer inst length is non-constant or "
741                                 "not a multiple of the vector element size");
742 
743       if (!TransferInfo.count(TransferInst)) {
744         DeferredInsts.push_back(Inst);
745         WorkList.push_back(Inst);
746         TransferInfo[TransferInst] = MemTransferInfo();
747       }
748 
749       auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
750         GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
751         if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
752           return nullptr;
753 
754         return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
755       };
756 
757       unsigned OpNum = U->getOperandNo();
758       MemTransferInfo *TI = &TransferInfo[TransferInst];
759       if (OpNum == 0) {
760         Value *Dest = TransferInst->getDest();
761         ConstantInt *Index = getPointerIndexOfAlloca(Dest);
762         if (!Index)
763           return RejectUser(Inst, "could not calculate constant dest index");
764         TI->DestIndex = Index;
765       } else {
766         assert(OpNum == 1);
767         Value *Src = TransferInst->getSource();
768         ConstantInt *Index = getPointerIndexOfAlloca(Src);
769         if (!Index)
770           return RejectUser(Inst, "could not calculate constant src index");
771         TI->SrcIndex = Index;
772       }
773       continue;
774     }
775 
776     // Ignore assume-like intrinsics and comparisons used in assumes.
777     if (isAssumeLikeIntrinsic(Inst)) {
778       UsersToRemove.push_back(Inst);
779       continue;
780     }
781 
782     if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
783           return isAssumeLikeIntrinsic(cast<Instruction>(U));
784         })) {
785       UsersToRemove.push_back(Inst);
786       continue;
787     }
788 
789     return RejectUser(Inst, "unhandled alloca user");
790   }
791 
792   while (!DeferredInsts.empty()) {
793     Instruction *Inst = DeferredInsts.pop_back_val();
794     MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
795     // TODO: Support the case if the pointers are from different alloca or
796     // from different address spaces.
797     MemTransferInfo &Info = TransferInfo[TransferInst];
798     if (!Info.SrcIndex || !Info.DestIndex)
799       return RejectUser(
800           Inst, "mem transfer inst is missing constant src and/or dst index");
801   }
802 
803   LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
804                     << *VectorTy << '\n');
805   const unsigned VecStoreSize = DL->getTypeStoreSize(VectorTy);
806 
807   // Alloca is uninitialized memory. Imitate that by making the first value
808   // undef.
809   SSAUpdater Updater;
810   Updater.Initialize(VectorTy, "promotealloca");
811   Updater.AddAvailableValue(Alloca.getParent(), UndefValue::get(VectorTy));
812 
813   // First handle the initial worklist.
814   SmallVector<LoadInst *, 4> DeferredLoads;
815   forEachWorkListItem(WorkList, [&](Instruction *I) {
816     BasicBlock *BB = I->getParent();
817     // On the first pass, we only take values that are trivially known, i.e.
818     // where AddAvailableValue was already called in this block.
819     Value *Result = promoteAllocaUserToVector(
820         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
821         Updater.FindValueForBlock(BB), DeferredLoads);
822     if (Result)
823       Updater.AddAvailableValue(BB, Result);
824   });
825 
826   // Then handle deferred loads.
827   forEachWorkListItem(DeferredLoads, [&](Instruction *I) {
828     SmallVector<LoadInst *, 0> NewDLs;
829     BasicBlock *BB = I->getParent();
830     // On the second pass, we use GetValueInMiddleOfBlock to guarantee we always
831     // get a value, inserting PHIs as needed.
832     Value *Result = promoteAllocaUserToVector(
833         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
834         Updater.GetValueInMiddleOfBlock(I->getParent()), NewDLs);
835     if (Result)
836       Updater.AddAvailableValue(BB, Result);
837     assert(NewDLs.empty() && "No more deferred loads should be queued!");
838   });
839 
840   // Delete all instructions. On the first pass, new dummy loads may have been
841   // added so we need to collect them too.
842   DenseSet<Instruction *> InstsToDelete(WorkList.begin(), WorkList.end());
843   InstsToDelete.insert(DeferredLoads.begin(), DeferredLoads.end());
844   for (Instruction *I : InstsToDelete) {
845     assert(I->use_empty());
846     I->eraseFromParent();
847   }
848 
849   // Delete all the users that are known to be removeable.
850   for (Instruction *I : reverse(UsersToRemove)) {
851     I->dropDroppableUses();
852     assert(I->use_empty());
853     I->eraseFromParent();
854   }
855 
856   // Alloca should now be dead too.
857   assert(Alloca.use_empty());
858   Alloca.eraseFromParent();
859   return true;
860 }
861 
862 std::pair<Value *, Value *>
863 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
864   Function &F = *Builder.GetInsertBlock()->getParent();
865   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
866 
867   if (!IsAMDHSA) {
868     Function *LocalSizeYFn =
869         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
870     Function *LocalSizeZFn =
871         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
872 
873     CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
874     CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
875 
876     ST.makeLIDRangeMetadata(LocalSizeY);
877     ST.makeLIDRangeMetadata(LocalSizeZ);
878 
879     return std::pair(LocalSizeY, LocalSizeZ);
880   }
881 
882   // We must read the size out of the dispatch pointer.
883   assert(IsAMDGCN);
884 
885   // We are indexing into this struct, and want to extract the workgroup_size_*
886   // fields.
887   //
888   //   typedef struct hsa_kernel_dispatch_packet_s {
889   //     uint16_t header;
890   //     uint16_t setup;
891   //     uint16_t workgroup_size_x ;
892   //     uint16_t workgroup_size_y;
893   //     uint16_t workgroup_size_z;
894   //     uint16_t reserved0;
895   //     uint32_t grid_size_x ;
896   //     uint32_t grid_size_y ;
897   //     uint32_t grid_size_z;
898   //
899   //     uint32_t private_segment_size;
900   //     uint32_t group_segment_size;
901   //     uint64_t kernel_object;
902   //
903   // #ifdef HSA_LARGE_MODEL
904   //     void *kernarg_address;
905   // #elif defined HSA_LITTLE_ENDIAN
906   //     void *kernarg_address;
907   //     uint32_t reserved1;
908   // #else
909   //     uint32_t reserved1;
910   //     void *kernarg_address;
911   // #endif
912   //     uint64_t reserved2;
913   //     hsa_signal_t completion_signal; // uint64_t wrapper
914   //   } hsa_kernel_dispatch_packet_t
915   //
916   Function *DispatchPtrFn =
917       Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
918 
919   CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
920   DispatchPtr->addRetAttr(Attribute::NoAlias);
921   DispatchPtr->addRetAttr(Attribute::NonNull);
922   F.removeFnAttr("amdgpu-no-dispatch-ptr");
923 
924   // Size of the dispatch packet struct.
925   DispatchPtr->addDereferenceableRetAttr(64);
926 
927   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
928   Value *CastDispatchPtr = Builder.CreateBitCast(
929       DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
930 
931   // We could do a single 64-bit load here, but it's likely that the basic
932   // 32-bit and extract sequence is already present, and it is probably easier
933   // to CSE this. The loads should be mergeable later anyway.
934   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
935   LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
936 
937   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
938   LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
939 
940   MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
941   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
942   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
943   ST.makeLIDRangeMetadata(LoadZU);
944 
945   // Extract y component. Upper half of LoadZU should be zero already.
946   Value *Y = Builder.CreateLShr(LoadXY, 16);
947 
948   return std::pair(Y, LoadZU);
949 }
950 
951 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
952                                               unsigned N) {
953   Function *F = Builder.GetInsertBlock()->getParent();
954   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
955   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
956   StringRef AttrName;
957 
958   switch (N) {
959   case 0:
960     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
961                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
962     AttrName = "amdgpu-no-workitem-id-x";
963     break;
964   case 1:
965     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
966                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
967     AttrName = "amdgpu-no-workitem-id-y";
968     break;
969 
970   case 2:
971     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
972                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
973     AttrName = "amdgpu-no-workitem-id-z";
974     break;
975   default:
976     llvm_unreachable("invalid dimension");
977   }
978 
979   Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
980   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
981   ST.makeLIDRangeMetadata(CI);
982   F->removeFnAttr(AttrName);
983 
984   return CI;
985 }
986 
987 static bool isCallPromotable(CallInst *CI) {
988   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
989   if (!II)
990     return false;
991 
992   switch (II->getIntrinsicID()) {
993   case Intrinsic::memcpy:
994   case Intrinsic::memmove:
995   case Intrinsic::memset:
996   case Intrinsic::lifetime_start:
997   case Intrinsic::lifetime_end:
998   case Intrinsic::invariant_start:
999   case Intrinsic::invariant_end:
1000   case Intrinsic::launder_invariant_group:
1001   case Intrinsic::strip_invariant_group:
1002   case Intrinsic::objectsize:
1003     return true;
1004   default:
1005     return false;
1006   }
1007 }
1008 
1009 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
1010     Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
1011     int OpIdx1) const {
1012   // Figure out which operand is the one we might not be promoting.
1013   Value *OtherOp = Inst->getOperand(OpIdx0);
1014   if (Val == OtherOp)
1015     OtherOp = Inst->getOperand(OpIdx1);
1016 
1017   if (isa<ConstantPointerNull>(OtherOp))
1018     return true;
1019 
1020   Value *OtherObj = getUnderlyingObject(OtherOp);
1021   if (!isa<AllocaInst>(OtherObj))
1022     return false;
1023 
1024   // TODO: We should be able to replace undefs with the right pointer type.
1025 
1026   // TODO: If we know the other base object is another promotable
1027   // alloca, not necessarily this alloca, we can do this. The
1028   // important part is both must have the same address space at
1029   // the end.
1030   if (OtherObj != BaseAlloca) {
1031     LLVM_DEBUG(
1032         dbgs() << "Found a binary instruction with another alloca object\n");
1033     return false;
1034   }
1035 
1036   return true;
1037 }
1038 
1039 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
1040     Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
1041 
1042   for (User *User : Val->users()) {
1043     if (is_contained(WorkList, User))
1044       continue;
1045 
1046     if (CallInst *CI = dyn_cast<CallInst>(User)) {
1047       if (!isCallPromotable(CI))
1048         return false;
1049 
1050       WorkList.push_back(User);
1051       continue;
1052     }
1053 
1054     Instruction *UseInst = cast<Instruction>(User);
1055     if (UseInst->getOpcode() == Instruction::PtrToInt)
1056       return false;
1057 
1058     if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
1059       if (LI->isVolatile())
1060         return false;
1061 
1062       continue;
1063     }
1064 
1065     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
1066       if (SI->isVolatile())
1067         return false;
1068 
1069       // Reject if the stored value is not the pointer operand.
1070       if (SI->getPointerOperand() != Val)
1071         return false;
1072     } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
1073       if (RMW->isVolatile())
1074         return false;
1075     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
1076       if (CAS->isVolatile())
1077         return false;
1078     }
1079 
1080     // Only promote a select if we know that the other select operand
1081     // is from another pointer that will also be promoted.
1082     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
1083       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
1084         return false;
1085 
1086       // May need to rewrite constant operands.
1087       WorkList.push_back(ICmp);
1088     }
1089 
1090     if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
1091       // Give up if the pointer may be captured.
1092       if (PointerMayBeCaptured(UseInst, true, true))
1093         return false;
1094       // Don't collect the users of this.
1095       WorkList.push_back(User);
1096       continue;
1097     }
1098 
1099     // Do not promote vector/aggregate type instructions. It is hard to track
1100     // their users.
1101     if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
1102       return false;
1103 
1104     if (!User->getType()->isPointerTy())
1105       continue;
1106 
1107     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
1108       // Be conservative if an address could be computed outside the bounds of
1109       // the alloca.
1110       if (!GEP->isInBounds())
1111         return false;
1112     }
1113 
1114     // Only promote a select if we know that the other select operand is from
1115     // another pointer that will also be promoted.
1116     if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
1117       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
1118         return false;
1119     }
1120 
1121     // Repeat for phis.
1122     if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
1123       // TODO: Handle more complex cases. We should be able to replace loops
1124       // over arrays.
1125       switch (Phi->getNumIncomingValues()) {
1126       case 1:
1127         break;
1128       case 2:
1129         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
1130           return false;
1131         break;
1132       default:
1133         return false;
1134       }
1135     }
1136 
1137     WorkList.push_back(User);
1138     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
1139       return false;
1140   }
1141 
1142   return true;
1143 }
1144 
1145 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
1146 
1147   FunctionType *FTy = F.getFunctionType();
1148   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1149 
1150   // If the function has any arguments in the local address space, then it's
1151   // possible these arguments require the entire local memory space, so
1152   // we cannot use local memory in the pass.
1153   for (Type *ParamTy : FTy->params()) {
1154     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
1155     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1156       LocalMemLimit = 0;
1157       LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
1158                            "local memory disabled.\n");
1159       return false;
1160     }
1161   }
1162 
1163   LocalMemLimit = ST.getAddressableLocalMemorySize();
1164   if (LocalMemLimit == 0)
1165     return false;
1166 
1167   SmallVector<const Constant *, 16> Stack;
1168   SmallPtrSet<const Constant *, 8> VisitedConstants;
1169   SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
1170 
1171   auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
1172     for (const User *U : Val->users()) {
1173       if (const Instruction *Use = dyn_cast<Instruction>(U)) {
1174         if (Use->getParent()->getParent() == &F)
1175           return true;
1176       } else {
1177         const Constant *C = cast<Constant>(U);
1178         if (VisitedConstants.insert(C).second)
1179           Stack.push_back(C);
1180       }
1181     }
1182 
1183     return false;
1184   };
1185 
1186   for (GlobalVariable &GV : Mod->globals()) {
1187     if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
1188       continue;
1189 
1190     if (visitUsers(&GV, &GV)) {
1191       UsedLDS.insert(&GV);
1192       Stack.clear();
1193       continue;
1194     }
1195 
1196     // For any ConstantExpr uses, we need to recursively search the users until
1197     // we see a function.
1198     while (!Stack.empty()) {
1199       const Constant *C = Stack.pop_back_val();
1200       if (visitUsers(&GV, C)) {
1201         UsedLDS.insert(&GV);
1202         Stack.clear();
1203         break;
1204       }
1205     }
1206   }
1207 
1208   const DataLayout &DL = Mod->getDataLayout();
1209   SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
1210   AllocatedSizes.reserve(UsedLDS.size());
1211 
1212   for (const GlobalVariable *GV : UsedLDS) {
1213     Align Alignment =
1214         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
1215     uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
1216 
1217     // HIP uses an extern unsized array in local address space for dynamically
1218     // allocated shared memory.  In that case, we have to disable the promotion.
1219     if (GV->hasExternalLinkage() && AllocSize == 0) {
1220       LocalMemLimit = 0;
1221       LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
1222                            "local memory. Promoting to local memory "
1223                            "disabled.\n");
1224       return false;
1225     }
1226 
1227     AllocatedSizes.emplace_back(AllocSize, Alignment);
1228   }
1229 
1230   // Sort to try to estimate the worst case alignment padding
1231   //
1232   // FIXME: We should really do something to fix the addresses to a more optimal
1233   // value instead
1234   llvm::sort(AllocatedSizes, llvm::less_second());
1235 
1236   // Check how much local memory is being used by global objects
1237   CurrentLocalMemUsage = 0;
1238 
1239   // FIXME: Try to account for padding here. The real padding and address is
1240   // currently determined from the inverse order of uses in the function when
1241   // legalizing, which could also potentially change. We try to estimate the
1242   // worst case here, but we probably should fix the addresses earlier.
1243   for (auto Alloc : AllocatedSizes) {
1244     CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
1245     CurrentLocalMemUsage += Alloc.first;
1246   }
1247 
1248   unsigned MaxOccupancy =
1249       ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, F);
1250 
1251   // Restrict local memory usage so that we don't drastically reduce occupancy,
1252   // unless it is already significantly reduced.
1253 
1254   // TODO: Have some sort of hint or other heuristics to guess occupancy based
1255   // on other factors..
1256   unsigned OccupancyHint = ST.getWavesPerEU(F).second;
1257   if (OccupancyHint == 0)
1258     OccupancyHint = 7;
1259 
1260   // Clamp to max value.
1261   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
1262 
1263   // Check the hint but ignore it if it's obviously wrong from the existing LDS
1264   // usage.
1265   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
1266 
1267   // Round up to the next tier of usage.
1268   unsigned MaxSizeWithWaveCount =
1269       ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
1270 
1271   // Program is possibly broken by using more local mem than available.
1272   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
1273     return false;
1274 
1275   LocalMemLimit = MaxSizeWithWaveCount;
1276 
1277   LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
1278                     << " bytes of LDS\n"
1279                     << "  Rounding size to " << MaxSizeWithWaveCount
1280                     << " with a maximum occupancy of " << MaxOccupancy << '\n'
1281                     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
1282                     << " available for promotion\n");
1283 
1284   return true;
1285 }
1286 
1287 // FIXME: Should try to pick the most likely to be profitable allocas first.
1288 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
1289                                                     bool SufficientLDS) {
1290   LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
1291 
1292   if (DisablePromoteAllocaToLDS) {
1293     LLVM_DEBUG(dbgs() << "  Promote alloca to LDS is disabled\n");
1294     return false;
1295   }
1296 
1297   const DataLayout &DL = Mod->getDataLayout();
1298   IRBuilder<> Builder(&I);
1299 
1300   const Function &ContainingFunction = *I.getParent()->getParent();
1301   CallingConv::ID CC = ContainingFunction.getCallingConv();
1302 
1303   // Don't promote the alloca to LDS for shader calling conventions as the work
1304   // item ID intrinsics are not supported for these calling conventions.
1305   // Furthermore not all LDS is available for some of the stages.
1306   switch (CC) {
1307   case CallingConv::AMDGPU_KERNEL:
1308   case CallingConv::SPIR_KERNEL:
1309     break;
1310   default:
1311     LLVM_DEBUG(
1312         dbgs()
1313         << " promote alloca to LDS not supported with calling convention.\n");
1314     return false;
1315   }
1316 
1317   // Not likely to have sufficient local memory for promotion.
1318   if (!SufficientLDS)
1319     return false;
1320 
1321   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
1322   unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
1323 
1324   Align Alignment =
1325       DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
1326 
1327   // FIXME: This computed padding is likely wrong since it depends on inverse
1328   // usage order.
1329   //
1330   // FIXME: It is also possible that if we're allowed to use all of the memory
1331   // could end up using more than the maximum due to alignment padding.
1332 
1333   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
1334   uint32_t AllocSize =
1335       WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
1336   NewSize += AllocSize;
1337 
1338   if (NewSize > LocalMemLimit) {
1339     LLVM_DEBUG(dbgs() << "  " << AllocSize
1340                       << " bytes of local memory not available to promote\n");
1341     return false;
1342   }
1343 
1344   CurrentLocalMemUsage = NewSize;
1345 
1346   std::vector<Value *> WorkList;
1347 
1348   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
1349     LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
1350     return false;
1351   }
1352 
1353   LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1354 
1355   Function *F = I.getParent()->getParent();
1356 
1357   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1358   GlobalVariable *GV = new GlobalVariable(
1359       *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy),
1360       Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1361       GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
1362   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1363   GV->setAlignment(I.getAlign());
1364 
1365   Value *TCntY, *TCntZ;
1366 
1367   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1368   Value *TIdX = getWorkitemID(Builder, 0);
1369   Value *TIdY = getWorkitemID(Builder, 1);
1370   Value *TIdZ = getWorkitemID(Builder, 2);
1371 
1372   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1373   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1374   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1375   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1376   TID = Builder.CreateAdd(TID, TIdZ);
1377 
1378   LLVMContext &Context = Mod->getContext();
1379   Value *Indices[] = {Constant::getNullValue(Type::getInt32Ty(Context)), TID};
1380 
1381   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1382   I.mutateType(Offset->getType());
1383   I.replaceAllUsesWith(Offset);
1384   I.eraseFromParent();
1385 
1386   SmallVector<IntrinsicInst *> DeferredIntrs;
1387 
1388   for (Value *V : WorkList) {
1389     CallInst *Call = dyn_cast<CallInst>(V);
1390     if (!Call) {
1391       if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1392         PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1393 
1394         if (isa<ConstantPointerNull>(CI->getOperand(0)))
1395           CI->setOperand(0, ConstantPointerNull::get(NewTy));
1396 
1397         if (isa<ConstantPointerNull>(CI->getOperand(1)))
1398           CI->setOperand(1, ConstantPointerNull::get(NewTy));
1399 
1400         continue;
1401       }
1402 
1403       // The operand's value should be corrected on its own and we don't want to
1404       // touch the users.
1405       if (isa<AddrSpaceCastInst>(V))
1406         continue;
1407 
1408       PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1409 
1410       // FIXME: It doesn't really make sense to try to do this for all
1411       // instructions.
1412       V->mutateType(NewTy);
1413 
1414       // Adjust the types of any constant operands.
1415       if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1416         if (isa<ConstantPointerNull>(SI->getOperand(1)))
1417           SI->setOperand(1, ConstantPointerNull::get(NewTy));
1418 
1419         if (isa<ConstantPointerNull>(SI->getOperand(2)))
1420           SI->setOperand(2, ConstantPointerNull::get(NewTy));
1421       } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1422         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1423           if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1424             Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1425         }
1426       }
1427 
1428       continue;
1429     }
1430 
1431     IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1432     Builder.SetInsertPoint(Intr);
1433     switch (Intr->getIntrinsicID()) {
1434     case Intrinsic::lifetime_start:
1435     case Intrinsic::lifetime_end:
1436       // These intrinsics are for address space 0 only
1437       Intr->eraseFromParent();
1438       continue;
1439     case Intrinsic::memcpy:
1440     case Intrinsic::memmove:
1441       // These have 2 pointer operands. In case if second pointer also needs
1442       // to be replaced we defer processing of these intrinsics until all
1443       // other values are processed.
1444       DeferredIntrs.push_back(Intr);
1445       continue;
1446     case Intrinsic::memset: {
1447       MemSetInst *MemSet = cast<MemSetInst>(Intr);
1448       Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1449                            MemSet->getLength(), MemSet->getDestAlign(),
1450                            MemSet->isVolatile());
1451       Intr->eraseFromParent();
1452       continue;
1453     }
1454     case Intrinsic::invariant_start:
1455     case Intrinsic::invariant_end:
1456     case Intrinsic::launder_invariant_group:
1457     case Intrinsic::strip_invariant_group:
1458       Intr->eraseFromParent();
1459       // FIXME: I think the invariant marker should still theoretically apply,
1460       // but the intrinsics need to be changed to accept pointers with any
1461       // address space.
1462       continue;
1463     case Intrinsic::objectsize: {
1464       Value *Src = Intr->getOperand(0);
1465       Function *ObjectSize = Intrinsic::getDeclaration(
1466           Mod, Intrinsic::objectsize,
1467           {Intr->getType(),
1468            PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)});
1469 
1470       CallInst *NewCall = Builder.CreateCall(
1471           ObjectSize,
1472           {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1473       Intr->replaceAllUsesWith(NewCall);
1474       Intr->eraseFromParent();
1475       continue;
1476     }
1477     default:
1478       Intr->print(errs());
1479       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1480     }
1481   }
1482 
1483   for (IntrinsicInst *Intr : DeferredIntrs) {
1484     Builder.SetInsertPoint(Intr);
1485     Intrinsic::ID ID = Intr->getIntrinsicID();
1486     assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1487 
1488     MemTransferInst *MI = cast<MemTransferInst>(Intr);
1489     auto *B = Builder.CreateMemTransferInst(
1490         ID, MI->getRawDest(), MI->getDestAlign(), MI->getRawSource(),
1491         MI->getSourceAlign(), MI->getLength(), MI->isVolatile());
1492 
1493     for (unsigned I = 0; I != 2; ++I) {
1494       if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1495         B->addDereferenceableParamAttr(I, Bytes);
1496       }
1497     }
1498 
1499     Intr->eraseFromParent();
1500   }
1501 
1502   return true;
1503 }
1504