1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Eliminates allocas by either converting them into vectors or by migrating
10 // them to local address space.
11 //
12 // Two passes are exposed by this file:
13 //    - "promote-alloca-to-vector", which runs early in the pipeline and only
14 //      promotes to vector. Promotion to vector is almost always profitable
15 //      except when the alloca is too big and the promotion would result in
16 //      very high register pressure.
17 //    - "promote-alloca", which does both promotion to vector and LDS and runs
18 //      much later in the pipeline. This runs after SROA because promoting to
19 //      LDS is of course less profitable than getting rid of the alloca or
20 //      vectorizing it, thus we only want to do it when the only alternative is
21 //      lowering the alloca to stack.
22 //
23 // Note that both of them exist for the old and new PMs. The new PM passes are
24 // declared in AMDGPU.h and the legacy PM ones are declared here.s
25 //
26 //===----------------------------------------------------------------------===//
27 
28 #include "AMDGPU.h"
29 #include "GCNSubtarget.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Analysis/CaptureTracking.h"
33 #include "llvm/Analysis/InstSimplifyFolder.h"
34 #include "llvm/Analysis/InstructionSimplify.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/IntrinsicsAMDGPU.h"
40 #include "llvm/IR/IntrinsicsR600.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Transforms/Utils/SSAUpdater.h"
45 
46 #define DEBUG_TYPE "amdgpu-promote-alloca"
47 
48 using namespace llvm;
49 
50 namespace {
51 
52 static cl::opt<bool>
53     DisablePromoteAllocaToVector("disable-promote-alloca-to-vector",
54                                  cl::desc("Disable promote alloca to vector"),
55                                  cl::init(false));
56 
57 static cl::opt<bool>
58     DisablePromoteAllocaToLDS("disable-promote-alloca-to-lds",
59                               cl::desc("Disable promote alloca to LDS"),
60                               cl::init(false));
61 
62 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
63     "amdgpu-promote-alloca-to-vector-limit",
64     cl::desc("Maximum byte size to consider promote alloca to vector"),
65     cl::init(0));
66 
67 // Shared implementation which can do both promotion to vector and to LDS.
68 class AMDGPUPromoteAllocaImpl {
69 private:
70   const TargetMachine &TM;
71   Module *Mod = nullptr;
72   const DataLayout *DL = nullptr;
73 
74   // FIXME: This should be per-kernel.
75   uint32_t LocalMemLimit = 0;
76   uint32_t CurrentLocalMemUsage = 0;
77   unsigned MaxVGPRs;
78 
79   bool IsAMDGCN = false;
80   bool IsAMDHSA = false;
81 
82   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
83   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
84 
85   /// BaseAlloca is the alloca root the search started from.
86   /// Val may be that alloca or a recursive user of it.
87   bool collectUsesWithPtrTypes(Value *BaseAlloca, Value *Val,
88                                std::vector<Value *> &WorkList) const;
89 
90   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92   /// Returns true if both operands are derived from the same alloca. Val should
93   /// be the same value as one of the input operands of UseInst.
94   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95                                        Instruction *UseInst, int OpIdx0,
96                                        int OpIdx1) const;
97 
98   /// Check whether we have enough local memory for promotion.
99   bool hasSufficientLocalMem(const Function &F);
100 
101   bool tryPromoteAllocaToVector(AllocaInst &I);
102   bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
103 
104 public:
AMDGPUPromoteAllocaImpl(TargetMachine & TM)105   AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {
106     const Triple &TT = TM.getTargetTriple();
107     IsAMDGCN = TT.getArch() == Triple::amdgcn;
108     IsAMDHSA = TT.getOS() == Triple::AMDHSA;
109   }
110 
111   bool run(Function &F, bool PromoteToLDS);
112 };
113 
114 // FIXME: This can create globals so should be a module pass.
115 class AMDGPUPromoteAlloca : public FunctionPass {
116 public:
117   static char ID;
118 
AMDGPUPromoteAlloca()119   AMDGPUPromoteAlloca() : FunctionPass(ID) {}
120 
runOnFunction(Function & F)121   bool runOnFunction(Function &F) override {
122     if (skipFunction(F))
123       return false;
124     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
125       return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
126           .run(F, /*PromoteToLDS*/ true);
127     return false;
128   }
129 
getPassName() const130   StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
131 
getAnalysisUsage(AnalysisUsage & AU) const132   void getAnalysisUsage(AnalysisUsage &AU) const override {
133     AU.setPreservesCFG();
134     FunctionPass::getAnalysisUsage(AU);
135   }
136 };
137 
138 class AMDGPUPromoteAllocaToVector : public FunctionPass {
139 public:
140   static char ID;
141 
AMDGPUPromoteAllocaToVector()142   AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
143 
runOnFunction(Function & F)144   bool runOnFunction(Function &F) override {
145     if (skipFunction(F))
146       return false;
147     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
148       return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
149           .run(F, /*PromoteToLDS*/ false);
150     return false;
151   }
152 
getPassName() const153   StringRef getPassName() const override {
154     return "AMDGPU Promote Alloca to vector";
155   }
156 
getAnalysisUsage(AnalysisUsage & AU) const157   void getAnalysisUsage(AnalysisUsage &AU) const override {
158     AU.setPreservesCFG();
159     FunctionPass::getAnalysisUsage(AU);
160   }
161 };
162 
getMaxVGPRs(const TargetMachine & TM,const Function & F)163 unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
164   if (!TM.getTargetTriple().isAMDGCN())
165     return 128;
166 
167   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
168   unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
169 
170   // A non-entry function has only 32 caller preserved registers.
171   // Do not promote alloca which will force spilling unless we know the function
172   // will be inlined.
173   if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
174       !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
175     MaxVGPRs = std::min(MaxVGPRs, 32u);
176   return MaxVGPRs;
177 }
178 
179 } // end anonymous namespace
180 
181 char AMDGPUPromoteAlloca::ID = 0;
182 char AMDGPUPromoteAllocaToVector::ID = 0;
183 
184 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
185                       "AMDGPU promote alloca to vector or LDS", false, false)
186 // Move LDS uses from functions to kernels before promote alloca for accurate
187 // estimation of LDS available
188 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDSLegacy)
189 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
190                     "AMDGPU promote alloca to vector or LDS", false, false)
191 
192 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
193                 "AMDGPU promote alloca to vector", false, false)
194 
195 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
196 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
197 
run(Function & F,FunctionAnalysisManager & AM)198 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
199                                                FunctionAnalysisManager &AM) {
200   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ true);
201   if (Changed) {
202     PreservedAnalyses PA;
203     PA.preserveSet<CFGAnalyses>();
204     return PA;
205   }
206   return PreservedAnalyses::all();
207 }
208 
209 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)210 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
211   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ false);
212   if (Changed) {
213     PreservedAnalyses PA;
214     PA.preserveSet<CFGAnalyses>();
215     return PA;
216   }
217   return PreservedAnalyses::all();
218 }
219 
createAMDGPUPromoteAlloca()220 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
221   return new AMDGPUPromoteAlloca();
222 }
223 
createAMDGPUPromoteAllocaToVector()224 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
225   return new AMDGPUPromoteAllocaToVector();
226 }
227 
run(Function & F,bool PromoteToLDS)228 bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
229   Mod = F.getParent();
230   DL = &Mod->getDataLayout();
231 
232   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
233   if (!ST.isPromoteAllocaEnabled())
234     return false;
235 
236   MaxVGPRs = getMaxVGPRs(TM, F);
237 
238   bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
239 
240   SmallVector<AllocaInst *, 16> Allocas;
241   for (Instruction &I : F.getEntryBlock()) {
242     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
243       // Array allocations are probably not worth handling, since an allocation
244       // of the array type is the canonical form.
245       if (!AI->isStaticAlloca() || AI->isArrayAllocation())
246         continue;
247       Allocas.push_back(AI);
248     }
249   }
250 
251   bool Changed = false;
252   for (AllocaInst *AI : Allocas) {
253     if (tryPromoteAllocaToVector(*AI))
254       Changed = true;
255     else if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
256       Changed = true;
257   }
258 
259   // NOTE: tryPromoteAllocaToVector removes the alloca, so Allocas contains
260   // dangling pointers. If we want to reuse it past this point, the loop above
261   // would need to be updated to remove successfully promoted allocas.
262 
263   return Changed;
264 }
265 
266 struct MemTransferInfo {
267   ConstantInt *SrcIndex = nullptr;
268   ConstantInt *DestIndex = nullptr;
269 };
270 
271 // Checks if the instruction I is a memset user of the alloca AI that we can
272 // deal with. Currently, only non-volatile memsets that affect the whole alloca
273 // are handled.
isSupportedMemset(MemSetInst * I,AllocaInst * AI,const DataLayout & DL)274 static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI,
275                               const DataLayout &DL) {
276   using namespace PatternMatch;
277   // For now we only care about non-volatile memsets that affect the whole type
278   // (start at index 0 and fill the whole alloca).
279   //
280   // TODO: Now that we moved to PromoteAlloca we could handle any memsets
281   // (except maybe volatile ones?) - we just need to use shufflevector if it
282   // only affects a subset of the vector.
283   const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
284   return I->getOperand(0) == AI &&
285          match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
286 }
287 
288 static Value *
calculateVectorIndex(Value * Ptr,const std::map<GetElementPtrInst *,Value * > & GEPIdx)289 calculateVectorIndex(Value *Ptr,
290                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
291   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
292   if (!GEP)
293     return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
294 
295   auto I = GEPIdx.find(GEP);
296   assert(I != GEPIdx.end() && "Must have entry for GEP!");
297   return I->second;
298 }
299 
GEPToVectorIndex(GetElementPtrInst * GEP,AllocaInst * Alloca,Type * VecElemTy,const DataLayout & DL)300 static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
301                                Type *VecElemTy, const DataLayout &DL) {
302   // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
303   // helper.
304   unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
305   MapVector<Value *, APInt> VarOffsets;
306   APInt ConstOffset(BW, 0);
307   if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
308       !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
309     return nullptr;
310 
311   unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
312   if (VarOffsets.size() > 1)
313     return nullptr;
314 
315   if (VarOffsets.size() == 1) {
316     // Only handle cases where we don't need to insert extra arithmetic
317     // instructions.
318     const auto &VarOffset = VarOffsets.front();
319     if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
320       return nullptr;
321     return VarOffset.first;
322   }
323 
324   APInt Quot;
325   uint64_t Rem;
326   APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
327   if (Rem != 0)
328     return nullptr;
329 
330   return ConstantInt::get(GEP->getContext(), Quot);
331 }
332 
333 /// Promotes a single user of the alloca to a vector form.
334 ///
335 /// \param Inst           Instruction to be promoted.
336 /// \param DL             Module Data Layout.
337 /// \param VectorTy       Vectorized Type.
338 /// \param VecStoreSize   Size of \p VectorTy in bytes.
339 /// \param ElementSize    Size of \p VectorTy element type in bytes.
340 /// \param TransferInfo   MemTransferInst info map.
341 /// \param GEPVectorIdx   GEP -> VectorIdx cache.
342 /// \param CurVal         Current value of the vector (e.g. last stored value)
343 /// \param[out]  DeferredLoads \p Inst is added to this vector if it can't
344 ///              be promoted now. This happens when promoting requires \p
345 ///              CurVal, but \p CurVal is nullptr.
346 /// \return the stored value if \p Inst would have written to the alloca, or
347 ///         nullptr otherwise.
promoteAllocaUserToVector(Instruction * Inst,const DataLayout & DL,FixedVectorType * VectorTy,unsigned VecStoreSize,unsigned ElementSize,DenseMap<MemTransferInst *,MemTransferInfo> & TransferInfo,std::map<GetElementPtrInst *,Value * > & GEPVectorIdx,Value * CurVal,SmallVectorImpl<LoadInst * > & DeferredLoads)348 static Value *promoteAllocaUserToVector(
349     Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy,
350     unsigned VecStoreSize, unsigned ElementSize,
351     DenseMap<MemTransferInst *, MemTransferInfo> &TransferInfo,
352     std::map<GetElementPtrInst *, Value *> &GEPVectorIdx, Value *CurVal,
353     SmallVectorImpl<LoadInst *> &DeferredLoads) {
354   // Note: we use InstSimplifyFolder because it can leverage the DataLayout
355   // to do more folding, especially in the case of vector splats.
356   IRBuilder<InstSimplifyFolder> Builder(Inst->getContext(),
357                                         InstSimplifyFolder(DL));
358   Builder.SetInsertPoint(Inst);
359 
360   const auto GetOrLoadCurrentVectorValue = [&]() -> Value * {
361     if (CurVal)
362       return CurVal;
363 
364     // If the current value is not known, insert a dummy load and lower it on
365     // the second pass.
366     LoadInst *Dummy =
367         Builder.CreateLoad(VectorTy, PoisonValue::get(Builder.getPtrTy()),
368                            "promotealloca.dummyload");
369     DeferredLoads.push_back(Dummy);
370     return Dummy;
371   };
372 
373   const auto CreateTempPtrIntCast = [&Builder, DL](Value *Val,
374                                                    Type *PtrTy) -> Value * {
375     assert(DL.getTypeStoreSize(Val->getType()) == DL.getTypeStoreSize(PtrTy));
376     const unsigned Size = DL.getTypeStoreSizeInBits(PtrTy);
377     if (!PtrTy->isVectorTy())
378       return Builder.CreateBitOrPointerCast(Val, Builder.getIntNTy(Size));
379     const unsigned NumPtrElts = cast<FixedVectorType>(PtrTy)->getNumElements();
380     // If we want to cast to cast, e.g. a <2 x ptr> into a <4 x i32>, we need to
381     // first cast the ptr vector to <2 x i64>.
382     assert((Size % NumPtrElts == 0) && "Vector size not divisble");
383     Type *EltTy = Builder.getIntNTy(Size / NumPtrElts);
384     return Builder.CreateBitOrPointerCast(
385         Val, FixedVectorType::get(EltTy, NumPtrElts));
386   };
387 
388   Type *VecEltTy = VectorTy->getElementType();
389 
390   switch (Inst->getOpcode()) {
391   case Instruction::Load: {
392     // Loads can only be lowered if the value is known.
393     if (!CurVal) {
394       DeferredLoads.push_back(cast<LoadInst>(Inst));
395       return nullptr;
396     }
397 
398     Value *Index = calculateVectorIndex(
399         cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx);
400 
401     // We're loading the full vector.
402     Type *AccessTy = Inst->getType();
403     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
404     if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
405       if (AccessTy->isPtrOrPtrVectorTy())
406         CurVal = CreateTempPtrIntCast(CurVal, AccessTy);
407       else if (CurVal->getType()->isPtrOrPtrVectorTy())
408         CurVal = CreateTempPtrIntCast(CurVal, CurVal->getType());
409       Value *NewVal = Builder.CreateBitOrPointerCast(CurVal, AccessTy);
410       Inst->replaceAllUsesWith(NewVal);
411       return nullptr;
412     }
413 
414     // Loading a subvector.
415     if (isa<FixedVectorType>(AccessTy)) {
416       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
417       const unsigned NumLoadedElts = AccessSize / DL.getTypeStoreSize(VecEltTy);
418       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumLoadedElts);
419       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
420 
421       Value *SubVec = PoisonValue::get(SubVecTy);
422       for (unsigned K = 0; K < NumLoadedElts; ++K) {
423         Value *CurIdx =
424             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
425         SubVec = Builder.CreateInsertElement(
426             SubVec, Builder.CreateExtractElement(CurVal, CurIdx), K);
427       }
428 
429       if (AccessTy->isPtrOrPtrVectorTy())
430         SubVec = CreateTempPtrIntCast(SubVec, AccessTy);
431       else if (SubVecTy->isPtrOrPtrVectorTy())
432         SubVec = CreateTempPtrIntCast(SubVec, SubVecTy);
433 
434       SubVec = Builder.CreateBitOrPointerCast(SubVec, AccessTy);
435       Inst->replaceAllUsesWith(SubVec);
436       return nullptr;
437     }
438 
439     // We're loading one element.
440     Value *ExtractElement = Builder.CreateExtractElement(CurVal, Index);
441     if (AccessTy != VecEltTy)
442       ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, AccessTy);
443 
444     Inst->replaceAllUsesWith(ExtractElement);
445     return nullptr;
446   }
447   case Instruction::Store: {
448     // For stores, it's a bit trickier and it depends on whether we're storing
449     // the full vector or not. If we're storing the full vector, we don't need
450     // to know the current value. If this is a store of a single element, we
451     // need to know the value.
452     StoreInst *SI = cast<StoreInst>(Inst);
453     Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx);
454     Value *Val = SI->getValueOperand();
455 
456     // We're storing the full vector, we can handle this without knowing CurVal.
457     Type *AccessTy = Val->getType();
458     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
459     if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
460       if (AccessTy->isPtrOrPtrVectorTy())
461         Val = CreateTempPtrIntCast(Val, AccessTy);
462       else if (VectorTy->isPtrOrPtrVectorTy())
463         Val = CreateTempPtrIntCast(Val, VectorTy);
464       return Builder.CreateBitOrPointerCast(Val, VectorTy);
465     }
466 
467     // Storing a subvector.
468     if (isa<FixedVectorType>(AccessTy)) {
469       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
470       const unsigned NumWrittenElts =
471           AccessSize / DL.getTypeStoreSize(VecEltTy);
472       const unsigned NumVecElts = VectorTy->getNumElements();
473       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumWrittenElts);
474       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
475 
476       if (SubVecTy->isPtrOrPtrVectorTy())
477         Val = CreateTempPtrIntCast(Val, SubVecTy);
478       else if (AccessTy->isPtrOrPtrVectorTy())
479         Val = CreateTempPtrIntCast(Val, AccessTy);
480 
481       Val = Builder.CreateBitOrPointerCast(Val, SubVecTy);
482 
483       Value *CurVec = GetOrLoadCurrentVectorValue();
484       for (unsigned K = 0, NumElts = std::min(NumWrittenElts, NumVecElts);
485            K < NumElts; ++K) {
486         Value *CurIdx =
487             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
488         CurVec = Builder.CreateInsertElement(
489             CurVec, Builder.CreateExtractElement(Val, K), CurIdx);
490       }
491       return CurVec;
492     }
493 
494     if (Val->getType() != VecEltTy)
495       Val = Builder.CreateBitOrPointerCast(Val, VecEltTy);
496     return Builder.CreateInsertElement(GetOrLoadCurrentVectorValue(), Val,
497                                        Index);
498   }
499   case Instruction::Call: {
500     if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) {
501       // For memcpy, we need to know curval.
502       ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
503       unsigned NumCopied = Length->getZExtValue() / ElementSize;
504       MemTransferInfo *TI = &TransferInfo[MTI];
505       unsigned SrcBegin = TI->SrcIndex->getZExtValue();
506       unsigned DestBegin = TI->DestIndex->getZExtValue();
507 
508       SmallVector<int> Mask;
509       for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
510         if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
511           Mask.push_back(SrcBegin++);
512         } else {
513           Mask.push_back(Idx);
514         }
515       }
516 
517       return Builder.CreateShuffleVector(GetOrLoadCurrentVectorValue(), Mask);
518     }
519 
520     if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
521       // For memset, we don't need to know the previous value because we
522       // currently only allow memsets that cover the whole alloca.
523       Value *Elt = MSI->getOperand(1);
524       const unsigned BytesPerElt = DL.getTypeStoreSize(VecEltTy);
525       if (BytesPerElt > 1) {
526         Value *EltBytes = Builder.CreateVectorSplat(BytesPerElt, Elt);
527 
528         // If the element type of the vector is a pointer, we need to first cast
529         // to an integer, then use a PtrCast.
530         if (VecEltTy->isPointerTy()) {
531           Type *PtrInt = Builder.getIntNTy(BytesPerElt * 8);
532           Elt = Builder.CreateBitCast(EltBytes, PtrInt);
533           Elt = Builder.CreateIntToPtr(Elt, VecEltTy);
534         } else
535           Elt = Builder.CreateBitCast(EltBytes, VecEltTy);
536       }
537 
538       return Builder.CreateVectorSplat(VectorTy->getElementCount(), Elt);
539     }
540 
541     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
542       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
543         Intr->replaceAllUsesWith(
544             Builder.getIntN(Intr->getType()->getIntegerBitWidth(),
545                             DL.getTypeAllocSize(VectorTy)));
546         return nullptr;
547       }
548     }
549 
550     llvm_unreachable("Unsupported call when promoting alloca to vector");
551   }
552 
553   default:
554     llvm_unreachable("Inconsistency in instructions promotable to vector");
555   }
556 
557   llvm_unreachable("Did not return after promoting instruction!");
558 }
559 
isSupportedAccessType(FixedVectorType * VecTy,Type * AccessTy,const DataLayout & DL)560 static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy,
561                                   const DataLayout &DL) {
562   // Access as a vector type can work if the size of the access vector is a
563   // multiple of the size of the alloca's vector element type.
564   //
565   // Examples:
566   //    - VecTy = <8 x float>, AccessTy = <4 x float> -> OK
567   //    - VecTy = <4 x double>, AccessTy = <2 x float> -> OK
568   //    - VecTy = <4 x double>, AccessTy = <3 x float> -> NOT OK
569   //        - 3*32 is not a multiple of 64
570   //
571   // We could handle more complicated cases, but it'd make things a lot more
572   // complicated.
573   if (isa<FixedVectorType>(AccessTy)) {
574     TypeSize AccTS = DL.getTypeStoreSize(AccessTy);
575     TypeSize VecTS = DL.getTypeStoreSize(VecTy->getElementType());
576     return AccTS.isKnownMultipleOf(VecTS);
577   }
578 
579   return CastInst::isBitOrNoopPointerCastable(VecTy->getElementType(), AccessTy,
580                                               DL);
581 }
582 
583 /// Iterates over an instruction worklist that may contain multiple instructions
584 /// from the same basic block, but in a different order.
585 template <typename InstContainer>
forEachWorkListItem(const InstContainer & WorkList,std::function<void (Instruction *)> Fn)586 static void forEachWorkListItem(const InstContainer &WorkList,
587                                 std::function<void(Instruction *)> Fn) {
588   // Bucket up uses of the alloca by the block they occur in.
589   // This is important because we have to handle multiple defs/uses in a block
590   // ourselves: SSAUpdater is purely for cross-block references.
591   DenseMap<BasicBlock *, SmallDenseSet<Instruction *>> UsesByBlock;
592   for (Instruction *User : WorkList)
593     UsesByBlock[User->getParent()].insert(User);
594 
595   for (Instruction *User : WorkList) {
596     BasicBlock *BB = User->getParent();
597     auto &BlockUses = UsesByBlock[BB];
598 
599     // Already processed, skip.
600     if (BlockUses.empty())
601       continue;
602 
603     // Only user in the block, directly process it.
604     if (BlockUses.size() == 1) {
605       Fn(User);
606       continue;
607     }
608 
609     // Multiple users in the block, do a linear scan to see users in order.
610     for (Instruction &Inst : *BB) {
611       if (!BlockUses.contains(&Inst))
612         continue;
613 
614       Fn(&Inst);
615     }
616 
617     // Clear the block so we know it's been processed.
618     BlockUses.clear();
619   }
620 }
621 
622 // FIXME: Should try to pick the most likely to be profitable allocas first.
tryPromoteAllocaToVector(AllocaInst & Alloca)623 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
624   LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
625 
626   if (DisablePromoteAllocaToVector) {
627     LLVM_DEBUG(dbgs() << "  Promote alloca to vector is disabled\n");
628     return false;
629   }
630 
631   Type *AllocaTy = Alloca.getAllocatedType();
632   auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
633   if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
634     if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
635         ArrayTy->getNumElements() > 0)
636       VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
637                                       ArrayTy->getNumElements());
638   }
639 
640   // Use up to 1/4 of available register budget for vectorization.
641   unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
642                                               : (MaxVGPRs * 32);
643 
644   if (DL->getTypeSizeInBits(AllocaTy) * 4 > Limit) {
645     LLVM_DEBUG(dbgs() << "  Alloca too big for vectorization with " << MaxVGPRs
646                       << " registers available\n");
647     return false;
648   }
649 
650   // FIXME: There is no reason why we can't support larger arrays, we
651   // are just being conservative for now.
652   // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
653   // equivalent. Potentially these could also be promoted but we don't currently
654   // handle this case
655   if (!VectorTy) {
656     LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
657     return false;
658   }
659 
660   if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
661     LLVM_DEBUG(dbgs() << "  " << *VectorTy
662                       << " has an unsupported number of elements\n");
663     return false;
664   }
665 
666   std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
667   SmallVector<Instruction *> WorkList;
668   SmallVector<Instruction *> UsersToRemove;
669   SmallVector<Instruction *> DeferredInsts;
670   SmallVector<Use *, 8> Uses;
671   DenseMap<MemTransferInst *, MemTransferInfo> TransferInfo;
672 
673   const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
674     LLVM_DEBUG(dbgs() << "  Cannot promote alloca to vector: " << Msg << "\n"
675                       << "    " << *Inst << "\n");
676     return false;
677   };
678 
679   for (Use &U : Alloca.uses())
680     Uses.push_back(&U);
681 
682   LLVM_DEBUG(dbgs() << "  Attempting promotion to: " << *VectorTy << "\n");
683 
684   Type *VecEltTy = VectorTy->getElementType();
685   unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
686   while (!Uses.empty()) {
687     Use *U = Uses.pop_back_val();
688     Instruction *Inst = cast<Instruction>(U->getUser());
689 
690     if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
691       // This is a store of the pointer, not to the pointer.
692       if (isa<StoreInst>(Inst) &&
693           U->getOperandNo() != StoreInst::getPointerOperandIndex())
694         return RejectUser(Inst, "pointer is being stored");
695 
696       Type *AccessTy = getLoadStoreType(Inst);
697       if (AccessTy->isAggregateType())
698         return RejectUser(Inst, "unsupported load/store as aggregate");
699       assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
700 
701       // Check that this is a simple access of a vector element.
702       bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
703                                           : cast<StoreInst>(Inst)->isSimple();
704       if (!IsSimple)
705         return RejectUser(Inst, "not a simple load or store");
706 
707       Ptr = Ptr->stripPointerCasts();
708 
709       // Alloca already accessed as vector.
710       if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
711                                 DL->getTypeStoreSize(AccessTy)) {
712         WorkList.push_back(Inst);
713         continue;
714       }
715 
716       if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
717         return RejectUser(Inst, "not a supported access type");
718 
719       WorkList.push_back(Inst);
720       continue;
721     }
722 
723     if (isa<BitCastInst>(Inst)) {
724       // Look through bitcasts.
725       for (Use &U : Inst->uses())
726         Uses.push_back(&U);
727       UsersToRemove.push_back(Inst);
728       continue;
729     }
730 
731     if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
732       // If we can't compute a vector index from this GEP, then we can't
733       // promote this alloca to vector.
734       Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
735       if (!Index)
736         return RejectUser(Inst, "cannot compute vector index for GEP");
737 
738       GEPVectorIdx[GEP] = Index;
739       for (Use &U : Inst->uses())
740         Uses.push_back(&U);
741       UsersToRemove.push_back(Inst);
742       continue;
743     }
744 
745     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
746         MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
747       WorkList.push_back(Inst);
748       continue;
749     }
750 
751     if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
752       if (TransferInst->isVolatile())
753         return RejectUser(Inst, "mem transfer inst is volatile");
754 
755       ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
756       if (!Len || (Len->getZExtValue() % ElementSize))
757         return RejectUser(Inst, "mem transfer inst length is non-constant or "
758                                 "not a multiple of the vector element size");
759 
760       if (!TransferInfo.count(TransferInst)) {
761         DeferredInsts.push_back(Inst);
762         WorkList.push_back(Inst);
763         TransferInfo[TransferInst] = MemTransferInfo();
764       }
765 
766       auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
767         GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
768         if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
769           return nullptr;
770 
771         return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
772       };
773 
774       unsigned OpNum = U->getOperandNo();
775       MemTransferInfo *TI = &TransferInfo[TransferInst];
776       if (OpNum == 0) {
777         Value *Dest = TransferInst->getDest();
778         ConstantInt *Index = getPointerIndexOfAlloca(Dest);
779         if (!Index)
780           return RejectUser(Inst, "could not calculate constant dest index");
781         TI->DestIndex = Index;
782       } else {
783         assert(OpNum == 1);
784         Value *Src = TransferInst->getSource();
785         ConstantInt *Index = getPointerIndexOfAlloca(Src);
786         if (!Index)
787           return RejectUser(Inst, "could not calculate constant src index");
788         TI->SrcIndex = Index;
789       }
790       continue;
791     }
792 
793     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
794       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
795         WorkList.push_back(Inst);
796         continue;
797       }
798     }
799 
800     // Ignore assume-like intrinsics and comparisons used in assumes.
801     if (isAssumeLikeIntrinsic(Inst)) {
802       if (!Inst->use_empty())
803         return RejectUser(Inst, "assume-like intrinsic cannot have any users");
804       UsersToRemove.push_back(Inst);
805       continue;
806     }
807 
808     if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
809           return isAssumeLikeIntrinsic(cast<Instruction>(U));
810         })) {
811       UsersToRemove.push_back(Inst);
812       continue;
813     }
814 
815     return RejectUser(Inst, "unhandled alloca user");
816   }
817 
818   while (!DeferredInsts.empty()) {
819     Instruction *Inst = DeferredInsts.pop_back_val();
820     MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
821     // TODO: Support the case if the pointers are from different alloca or
822     // from different address spaces.
823     MemTransferInfo &Info = TransferInfo[TransferInst];
824     if (!Info.SrcIndex || !Info.DestIndex)
825       return RejectUser(
826           Inst, "mem transfer inst is missing constant src and/or dst index");
827   }
828 
829   LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
830                     << *VectorTy << '\n');
831   const unsigned VecStoreSize = DL->getTypeStoreSize(VectorTy);
832 
833   // Alloca is uninitialized memory. Imitate that by making the first value
834   // undef.
835   SSAUpdater Updater;
836   Updater.Initialize(VectorTy, "promotealloca");
837   Updater.AddAvailableValue(Alloca.getParent(), UndefValue::get(VectorTy));
838 
839   // First handle the initial worklist.
840   SmallVector<LoadInst *, 4> DeferredLoads;
841   forEachWorkListItem(WorkList, [&](Instruction *I) {
842     BasicBlock *BB = I->getParent();
843     // On the first pass, we only take values that are trivially known, i.e.
844     // where AddAvailableValue was already called in this block.
845     Value *Result = promoteAllocaUserToVector(
846         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
847         Updater.FindValueForBlock(BB), DeferredLoads);
848     if (Result)
849       Updater.AddAvailableValue(BB, Result);
850   });
851 
852   // Then handle deferred loads.
853   forEachWorkListItem(DeferredLoads, [&](Instruction *I) {
854     SmallVector<LoadInst *, 0> NewDLs;
855     BasicBlock *BB = I->getParent();
856     // On the second pass, we use GetValueInMiddleOfBlock to guarantee we always
857     // get a value, inserting PHIs as needed.
858     Value *Result = promoteAllocaUserToVector(
859         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
860         Updater.GetValueInMiddleOfBlock(I->getParent()), NewDLs);
861     if (Result)
862       Updater.AddAvailableValue(BB, Result);
863     assert(NewDLs.empty() && "No more deferred loads should be queued!");
864   });
865 
866   // Delete all instructions. On the first pass, new dummy loads may have been
867   // added so we need to collect them too.
868   DenseSet<Instruction *> InstsToDelete(WorkList.begin(), WorkList.end());
869   InstsToDelete.insert(DeferredLoads.begin(), DeferredLoads.end());
870   for (Instruction *I : InstsToDelete) {
871     assert(I->use_empty());
872     I->eraseFromParent();
873   }
874 
875   // Delete all the users that are known to be removeable.
876   for (Instruction *I : reverse(UsersToRemove)) {
877     I->dropDroppableUses();
878     assert(I->use_empty());
879     I->eraseFromParent();
880   }
881 
882   // Alloca should now be dead too.
883   assert(Alloca.use_empty());
884   Alloca.eraseFromParent();
885   return true;
886 }
887 
888 std::pair<Value *, Value *>
getLocalSizeYZ(IRBuilder<> & Builder)889 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
890   Function &F = *Builder.GetInsertBlock()->getParent();
891   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
892 
893   if (!IsAMDHSA) {
894     Function *LocalSizeYFn =
895         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
896     Function *LocalSizeZFn =
897         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
898 
899     CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
900     CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
901 
902     ST.makeLIDRangeMetadata(LocalSizeY);
903     ST.makeLIDRangeMetadata(LocalSizeZ);
904 
905     return std::pair(LocalSizeY, LocalSizeZ);
906   }
907 
908   // We must read the size out of the dispatch pointer.
909   assert(IsAMDGCN);
910 
911   // We are indexing into this struct, and want to extract the workgroup_size_*
912   // fields.
913   //
914   //   typedef struct hsa_kernel_dispatch_packet_s {
915   //     uint16_t header;
916   //     uint16_t setup;
917   //     uint16_t workgroup_size_x ;
918   //     uint16_t workgroup_size_y;
919   //     uint16_t workgroup_size_z;
920   //     uint16_t reserved0;
921   //     uint32_t grid_size_x ;
922   //     uint32_t grid_size_y ;
923   //     uint32_t grid_size_z;
924   //
925   //     uint32_t private_segment_size;
926   //     uint32_t group_segment_size;
927   //     uint64_t kernel_object;
928   //
929   // #ifdef HSA_LARGE_MODEL
930   //     void *kernarg_address;
931   // #elif defined HSA_LITTLE_ENDIAN
932   //     void *kernarg_address;
933   //     uint32_t reserved1;
934   // #else
935   //     uint32_t reserved1;
936   //     void *kernarg_address;
937   // #endif
938   //     uint64_t reserved2;
939   //     hsa_signal_t completion_signal; // uint64_t wrapper
940   //   } hsa_kernel_dispatch_packet_t
941   //
942   Function *DispatchPtrFn =
943       Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
944 
945   CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
946   DispatchPtr->addRetAttr(Attribute::NoAlias);
947   DispatchPtr->addRetAttr(Attribute::NonNull);
948   F.removeFnAttr("amdgpu-no-dispatch-ptr");
949 
950   // Size of the dispatch packet struct.
951   DispatchPtr->addDereferenceableRetAttr(64);
952 
953   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
954   Value *CastDispatchPtr = Builder.CreateBitCast(
955       DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
956 
957   // We could do a single 64-bit load here, but it's likely that the basic
958   // 32-bit and extract sequence is already present, and it is probably easier
959   // to CSE this. The loads should be mergeable later anyway.
960   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
961   LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
962 
963   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
964   LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
965 
966   MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
967   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
968   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
969   ST.makeLIDRangeMetadata(LoadZU);
970 
971   // Extract y component. Upper half of LoadZU should be zero already.
972   Value *Y = Builder.CreateLShr(LoadXY, 16);
973 
974   return std::pair(Y, LoadZU);
975 }
976 
getWorkitemID(IRBuilder<> & Builder,unsigned N)977 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
978                                               unsigned N) {
979   Function *F = Builder.GetInsertBlock()->getParent();
980   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
981   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
982   StringRef AttrName;
983 
984   switch (N) {
985   case 0:
986     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
987                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
988     AttrName = "amdgpu-no-workitem-id-x";
989     break;
990   case 1:
991     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
992                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
993     AttrName = "amdgpu-no-workitem-id-y";
994     break;
995 
996   case 2:
997     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
998                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
999     AttrName = "amdgpu-no-workitem-id-z";
1000     break;
1001   default:
1002     llvm_unreachable("invalid dimension");
1003   }
1004 
1005   Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
1006   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
1007   ST.makeLIDRangeMetadata(CI);
1008   F->removeFnAttr(AttrName);
1009 
1010   return CI;
1011 }
1012 
isCallPromotable(CallInst * CI)1013 static bool isCallPromotable(CallInst *CI) {
1014   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1015   if (!II)
1016     return false;
1017 
1018   switch (II->getIntrinsicID()) {
1019   case Intrinsic::memcpy:
1020   case Intrinsic::memmove:
1021   case Intrinsic::memset:
1022   case Intrinsic::lifetime_start:
1023   case Intrinsic::lifetime_end:
1024   case Intrinsic::invariant_start:
1025   case Intrinsic::invariant_end:
1026   case Intrinsic::launder_invariant_group:
1027   case Intrinsic::strip_invariant_group:
1028   case Intrinsic::objectsize:
1029     return true;
1030   default:
1031     return false;
1032   }
1033 }
1034 
binaryOpIsDerivedFromSameAlloca(Value * BaseAlloca,Value * Val,Instruction * Inst,int OpIdx0,int OpIdx1) const1035 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
1036     Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
1037     int OpIdx1) const {
1038   // Figure out which operand is the one we might not be promoting.
1039   Value *OtherOp = Inst->getOperand(OpIdx0);
1040   if (Val == OtherOp)
1041     OtherOp = Inst->getOperand(OpIdx1);
1042 
1043   if (isa<ConstantPointerNull>(OtherOp))
1044     return true;
1045 
1046   Value *OtherObj = getUnderlyingObject(OtherOp);
1047   if (!isa<AllocaInst>(OtherObj))
1048     return false;
1049 
1050   // TODO: We should be able to replace undefs with the right pointer type.
1051 
1052   // TODO: If we know the other base object is another promotable
1053   // alloca, not necessarily this alloca, we can do this. The
1054   // important part is both must have the same address space at
1055   // the end.
1056   if (OtherObj != BaseAlloca) {
1057     LLVM_DEBUG(
1058         dbgs() << "Found a binary instruction with another alloca object\n");
1059     return false;
1060   }
1061 
1062   return true;
1063 }
1064 
collectUsesWithPtrTypes(Value * BaseAlloca,Value * Val,std::vector<Value * > & WorkList) const1065 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
1066     Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
1067 
1068   for (User *User : Val->users()) {
1069     if (is_contained(WorkList, User))
1070       continue;
1071 
1072     if (CallInst *CI = dyn_cast<CallInst>(User)) {
1073       if (!isCallPromotable(CI))
1074         return false;
1075 
1076       WorkList.push_back(User);
1077       continue;
1078     }
1079 
1080     Instruction *UseInst = cast<Instruction>(User);
1081     if (UseInst->getOpcode() == Instruction::PtrToInt)
1082       return false;
1083 
1084     if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
1085       if (LI->isVolatile())
1086         return false;
1087 
1088       continue;
1089     }
1090 
1091     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
1092       if (SI->isVolatile())
1093         return false;
1094 
1095       // Reject if the stored value is not the pointer operand.
1096       if (SI->getPointerOperand() != Val)
1097         return false;
1098     } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
1099       if (RMW->isVolatile())
1100         return false;
1101     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
1102       if (CAS->isVolatile())
1103         return false;
1104     }
1105 
1106     // Only promote a select if we know that the other select operand
1107     // is from another pointer that will also be promoted.
1108     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
1109       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
1110         return false;
1111 
1112       // May need to rewrite constant operands.
1113       WorkList.push_back(ICmp);
1114     }
1115 
1116     if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
1117       // Give up if the pointer may be captured.
1118       if (PointerMayBeCaptured(UseInst, true, true))
1119         return false;
1120       // Don't collect the users of this.
1121       WorkList.push_back(User);
1122       continue;
1123     }
1124 
1125     // Do not promote vector/aggregate type instructions. It is hard to track
1126     // their users.
1127     if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
1128       return false;
1129 
1130     if (!User->getType()->isPointerTy())
1131       continue;
1132 
1133     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
1134       // Be conservative if an address could be computed outside the bounds of
1135       // the alloca.
1136       if (!GEP->isInBounds())
1137         return false;
1138     }
1139 
1140     // Only promote a select if we know that the other select operand is from
1141     // another pointer that will also be promoted.
1142     if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
1143       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
1144         return false;
1145     }
1146 
1147     // Repeat for phis.
1148     if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
1149       // TODO: Handle more complex cases. We should be able to replace loops
1150       // over arrays.
1151       switch (Phi->getNumIncomingValues()) {
1152       case 1:
1153         break;
1154       case 2:
1155         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
1156           return false;
1157         break;
1158       default:
1159         return false;
1160       }
1161     }
1162 
1163     WorkList.push_back(User);
1164     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
1165       return false;
1166   }
1167 
1168   return true;
1169 }
1170 
hasSufficientLocalMem(const Function & F)1171 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
1172 
1173   FunctionType *FTy = F.getFunctionType();
1174   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1175 
1176   // If the function has any arguments in the local address space, then it's
1177   // possible these arguments require the entire local memory space, so
1178   // we cannot use local memory in the pass.
1179   for (Type *ParamTy : FTy->params()) {
1180     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
1181     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1182       LocalMemLimit = 0;
1183       LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
1184                            "local memory disabled.\n");
1185       return false;
1186     }
1187   }
1188 
1189   LocalMemLimit = ST.getAddressableLocalMemorySize();
1190   if (LocalMemLimit == 0)
1191     return false;
1192 
1193   SmallVector<const Constant *, 16> Stack;
1194   SmallPtrSet<const Constant *, 8> VisitedConstants;
1195   SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
1196 
1197   auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
1198     for (const User *U : Val->users()) {
1199       if (const Instruction *Use = dyn_cast<Instruction>(U)) {
1200         if (Use->getParent()->getParent() == &F)
1201           return true;
1202       } else {
1203         const Constant *C = cast<Constant>(U);
1204         if (VisitedConstants.insert(C).second)
1205           Stack.push_back(C);
1206       }
1207     }
1208 
1209     return false;
1210   };
1211 
1212   for (GlobalVariable &GV : Mod->globals()) {
1213     if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
1214       continue;
1215 
1216     if (visitUsers(&GV, &GV)) {
1217       UsedLDS.insert(&GV);
1218       Stack.clear();
1219       continue;
1220     }
1221 
1222     // For any ConstantExpr uses, we need to recursively search the users until
1223     // we see a function.
1224     while (!Stack.empty()) {
1225       const Constant *C = Stack.pop_back_val();
1226       if (visitUsers(&GV, C)) {
1227         UsedLDS.insert(&GV);
1228         Stack.clear();
1229         break;
1230       }
1231     }
1232   }
1233 
1234   const DataLayout &DL = Mod->getDataLayout();
1235   SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
1236   AllocatedSizes.reserve(UsedLDS.size());
1237 
1238   for (const GlobalVariable *GV : UsedLDS) {
1239     Align Alignment =
1240         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
1241     uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
1242 
1243     // HIP uses an extern unsized array in local address space for dynamically
1244     // allocated shared memory.  In that case, we have to disable the promotion.
1245     if (GV->hasExternalLinkage() && AllocSize == 0) {
1246       LocalMemLimit = 0;
1247       LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
1248                            "local memory. Promoting to local memory "
1249                            "disabled.\n");
1250       return false;
1251     }
1252 
1253     AllocatedSizes.emplace_back(AllocSize, Alignment);
1254   }
1255 
1256   // Sort to try to estimate the worst case alignment padding
1257   //
1258   // FIXME: We should really do something to fix the addresses to a more optimal
1259   // value instead
1260   llvm::sort(AllocatedSizes, llvm::less_second());
1261 
1262   // Check how much local memory is being used by global objects
1263   CurrentLocalMemUsage = 0;
1264 
1265   // FIXME: Try to account for padding here. The real padding and address is
1266   // currently determined from the inverse order of uses in the function when
1267   // legalizing, which could also potentially change. We try to estimate the
1268   // worst case here, but we probably should fix the addresses earlier.
1269   for (auto Alloc : AllocatedSizes) {
1270     CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
1271     CurrentLocalMemUsage += Alloc.first;
1272   }
1273 
1274   unsigned MaxOccupancy =
1275       ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, F);
1276 
1277   // Restrict local memory usage so that we don't drastically reduce occupancy,
1278   // unless it is already significantly reduced.
1279 
1280   // TODO: Have some sort of hint or other heuristics to guess occupancy based
1281   // on other factors..
1282   unsigned OccupancyHint = ST.getWavesPerEU(F).second;
1283   if (OccupancyHint == 0)
1284     OccupancyHint = 7;
1285 
1286   // Clamp to max value.
1287   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
1288 
1289   // Check the hint but ignore it if it's obviously wrong from the existing LDS
1290   // usage.
1291   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
1292 
1293   // Round up to the next tier of usage.
1294   unsigned MaxSizeWithWaveCount =
1295       ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
1296 
1297   // Program is possibly broken by using more local mem than available.
1298   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
1299     return false;
1300 
1301   LocalMemLimit = MaxSizeWithWaveCount;
1302 
1303   LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
1304                     << " bytes of LDS\n"
1305                     << "  Rounding size to " << MaxSizeWithWaveCount
1306                     << " with a maximum occupancy of " << MaxOccupancy << '\n'
1307                     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
1308                     << " available for promotion\n");
1309 
1310   return true;
1311 }
1312 
1313 // FIXME: Should try to pick the most likely to be profitable allocas first.
tryPromoteAllocaToLDS(AllocaInst & I,bool SufficientLDS)1314 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
1315                                                     bool SufficientLDS) {
1316   LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
1317 
1318   if (DisablePromoteAllocaToLDS) {
1319     LLVM_DEBUG(dbgs() << "  Promote alloca to LDS is disabled\n");
1320     return false;
1321   }
1322 
1323   const DataLayout &DL = Mod->getDataLayout();
1324   IRBuilder<> Builder(&I);
1325 
1326   const Function &ContainingFunction = *I.getParent()->getParent();
1327   CallingConv::ID CC = ContainingFunction.getCallingConv();
1328 
1329   // Don't promote the alloca to LDS for shader calling conventions as the work
1330   // item ID intrinsics are not supported for these calling conventions.
1331   // Furthermore not all LDS is available for some of the stages.
1332   switch (CC) {
1333   case CallingConv::AMDGPU_KERNEL:
1334   case CallingConv::SPIR_KERNEL:
1335     break;
1336   default:
1337     LLVM_DEBUG(
1338         dbgs()
1339         << " promote alloca to LDS not supported with calling convention.\n");
1340     return false;
1341   }
1342 
1343   // Not likely to have sufficient local memory for promotion.
1344   if (!SufficientLDS)
1345     return false;
1346 
1347   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
1348   unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
1349 
1350   Align Alignment =
1351       DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
1352 
1353   // FIXME: This computed padding is likely wrong since it depends on inverse
1354   // usage order.
1355   //
1356   // FIXME: It is also possible that if we're allowed to use all of the memory
1357   // could end up using more than the maximum due to alignment padding.
1358 
1359   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
1360   uint32_t AllocSize =
1361       WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
1362   NewSize += AllocSize;
1363 
1364   if (NewSize > LocalMemLimit) {
1365     LLVM_DEBUG(dbgs() << "  " << AllocSize
1366                       << " bytes of local memory not available to promote\n");
1367     return false;
1368   }
1369 
1370   CurrentLocalMemUsage = NewSize;
1371 
1372   std::vector<Value *> WorkList;
1373 
1374   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
1375     LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
1376     return false;
1377   }
1378 
1379   LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1380 
1381   Function *F = I.getParent()->getParent();
1382 
1383   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1384   GlobalVariable *GV = new GlobalVariable(
1385       *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy),
1386       Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1387       GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
1388   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1389   GV->setAlignment(I.getAlign());
1390 
1391   Value *TCntY, *TCntZ;
1392 
1393   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1394   Value *TIdX = getWorkitemID(Builder, 0);
1395   Value *TIdY = getWorkitemID(Builder, 1);
1396   Value *TIdZ = getWorkitemID(Builder, 2);
1397 
1398   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1399   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1400   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1401   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1402   TID = Builder.CreateAdd(TID, TIdZ);
1403 
1404   LLVMContext &Context = Mod->getContext();
1405   Value *Indices[] = {Constant::getNullValue(Type::getInt32Ty(Context)), TID};
1406 
1407   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1408   I.mutateType(Offset->getType());
1409   I.replaceAllUsesWith(Offset);
1410   I.eraseFromParent();
1411 
1412   SmallVector<IntrinsicInst *> DeferredIntrs;
1413 
1414   for (Value *V : WorkList) {
1415     CallInst *Call = dyn_cast<CallInst>(V);
1416     if (!Call) {
1417       if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1418         PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1419 
1420         if (isa<ConstantPointerNull>(CI->getOperand(0)))
1421           CI->setOperand(0, ConstantPointerNull::get(NewTy));
1422 
1423         if (isa<ConstantPointerNull>(CI->getOperand(1)))
1424           CI->setOperand(1, ConstantPointerNull::get(NewTy));
1425 
1426         continue;
1427       }
1428 
1429       // The operand's value should be corrected on its own and we don't want to
1430       // touch the users.
1431       if (isa<AddrSpaceCastInst>(V))
1432         continue;
1433 
1434       PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1435 
1436       // FIXME: It doesn't really make sense to try to do this for all
1437       // instructions.
1438       V->mutateType(NewTy);
1439 
1440       // Adjust the types of any constant operands.
1441       if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1442         if (isa<ConstantPointerNull>(SI->getOperand(1)))
1443           SI->setOperand(1, ConstantPointerNull::get(NewTy));
1444 
1445         if (isa<ConstantPointerNull>(SI->getOperand(2)))
1446           SI->setOperand(2, ConstantPointerNull::get(NewTy));
1447       } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1448         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1449           if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1450             Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1451         }
1452       }
1453 
1454       continue;
1455     }
1456 
1457     IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1458     Builder.SetInsertPoint(Intr);
1459     switch (Intr->getIntrinsicID()) {
1460     case Intrinsic::lifetime_start:
1461     case Intrinsic::lifetime_end:
1462       // These intrinsics are for address space 0 only
1463       Intr->eraseFromParent();
1464       continue;
1465     case Intrinsic::memcpy:
1466     case Intrinsic::memmove:
1467       // These have 2 pointer operands. In case if second pointer also needs
1468       // to be replaced we defer processing of these intrinsics until all
1469       // other values are processed.
1470       DeferredIntrs.push_back(Intr);
1471       continue;
1472     case Intrinsic::memset: {
1473       MemSetInst *MemSet = cast<MemSetInst>(Intr);
1474       Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1475                            MemSet->getLength(), MemSet->getDestAlign(),
1476                            MemSet->isVolatile());
1477       Intr->eraseFromParent();
1478       continue;
1479     }
1480     case Intrinsic::invariant_start:
1481     case Intrinsic::invariant_end:
1482     case Intrinsic::launder_invariant_group:
1483     case Intrinsic::strip_invariant_group:
1484       Intr->eraseFromParent();
1485       // FIXME: I think the invariant marker should still theoretically apply,
1486       // but the intrinsics need to be changed to accept pointers with any
1487       // address space.
1488       continue;
1489     case Intrinsic::objectsize: {
1490       Value *Src = Intr->getOperand(0);
1491       Function *ObjectSize = Intrinsic::getDeclaration(
1492           Mod, Intrinsic::objectsize,
1493           {Intr->getType(),
1494            PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)});
1495 
1496       CallInst *NewCall = Builder.CreateCall(
1497           ObjectSize,
1498           {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1499       Intr->replaceAllUsesWith(NewCall);
1500       Intr->eraseFromParent();
1501       continue;
1502     }
1503     default:
1504       Intr->print(errs());
1505       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1506     }
1507   }
1508 
1509   for (IntrinsicInst *Intr : DeferredIntrs) {
1510     Builder.SetInsertPoint(Intr);
1511     Intrinsic::ID ID = Intr->getIntrinsicID();
1512     assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1513 
1514     MemTransferInst *MI = cast<MemTransferInst>(Intr);
1515     auto *B = Builder.CreateMemTransferInst(
1516         ID, MI->getRawDest(), MI->getDestAlign(), MI->getRawSource(),
1517         MI->getSourceAlign(), MI->getLength(), MI->isVolatile());
1518 
1519     for (unsigned I = 0; I != 2; ++I) {
1520       if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1521         B->addDereferenceableParamAttr(I, Bytes);
1522       }
1523     }
1524 
1525     Intr->eraseFromParent();
1526   }
1527 
1528   return true;
1529 }
1530