1 //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass replaces accesses to kernel arguments with loads from
10 /// offsets from the kernarg base pointer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "GCNSubtarget.h"
16 #include "llvm/CodeGen/TargetPassConfig.h"
17 #include "llvm/IR/IntrinsicsAMDGPU.h"
18 #include "llvm/IR/IRBuilder.h"
19 #include "llvm/IR/MDBuilder.h"
20 #include "llvm/Target/TargetMachine.h"
21 #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
22 
23 using namespace llvm;
24 
25 namespace {
26 
27 class AMDGPULowerKernelArguments : public FunctionPass{
28 public:
29   static char ID;
30 
31   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
32 
33   bool runOnFunction(Function &F) override;
34 
35   void getAnalysisUsage(AnalysisUsage &AU) const override {
36     AU.addRequired<TargetPassConfig>();
37     AU.setPreservesAll();
38  }
39 };
40 
41 } // end anonymous namespace
42 
43 // skip allocas
44 static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
45   BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
46   for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
47     AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
48 
49     // If this is a dynamic alloca, the value may depend on the loaded kernargs,
50     // so loads will need to be inserted before it.
51     if (!AI || !AI->isStaticAlloca())
52       break;
53   }
54 
55   return InsPt;
56 }
57 
58 bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
59   CallingConv::ID CC = F.getCallingConv();
60   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
61     return false;
62 
63   auto &TPC = getAnalysis<TargetPassConfig>();
64 
65   const TargetMachine &TM = TPC.getTM<TargetMachine>();
66   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
67   LLVMContext &Ctx = F.getParent()->getContext();
68   const DataLayout &DL = F.getParent()->getDataLayout();
69   BasicBlock &EntryBlock = *F.begin();
70   IRBuilder<> Builder(&*getInsertPt(EntryBlock));
71 
72   const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
73   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
74 
75   Align MaxAlign;
76   // FIXME: Alignment is broken with explicit arg offset.;
77   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
78   if (TotalKernArgSize == 0)
79     return false;
80 
81   CallInst *KernArgSegment =
82       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
83                               nullptr, F.getName() + ".kernarg.segment");
84 
85   KernArgSegment->addRetAttr(Attribute::NonNull);
86   KernArgSegment->addRetAttr(
87       Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
88 
89   uint64_t ExplicitArgOffset = 0;
90 
91   for (Argument &Arg : F.args()) {
92     const bool IsByRef = Arg.hasByRefAttr();
93     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
94     MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
95     Align ABITypeAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
96 
97     uint64_t Size = DL.getTypeSizeInBits(ArgTy);
98     uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
99 
100     uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
101     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
102 
103     if (Arg.use_empty())
104       continue;
105 
106     // If this is byval, the loads are already explicit in the function. We just
107     // need to rewrite the pointer values.
108     if (IsByRef) {
109       Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
110           Builder.getInt8Ty(), KernArgSegment, EltOffset,
111           Arg.getName() + ".byval.kernarg.offset");
112 
113       Value *CastOffsetPtr =
114           Builder.CreateAddrSpaceCast(ArgOffsetPtr, Arg.getType());
115       Arg.replaceAllUsesWith(CastOffsetPtr);
116       continue;
117     }
118 
119     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
120       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
121       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
122       // can't represent this with range metadata because it's only allowed for
123       // integer types.
124       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
125            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
126           !ST.hasUsableDSOffset())
127         continue;
128 
129       // FIXME: We can replace this with equivalent alias.scope/noalias
130       // metadata, but this appears to be a lot of work.
131       if (Arg.hasNoAliasAttr())
132         continue;
133     }
134 
135     auto *VT = dyn_cast<FixedVectorType>(ArgTy);
136     bool IsV3 = VT && VT->getNumElements() == 3;
137     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
138 
139     VectorType *V4Ty = nullptr;
140 
141     int64_t AlignDownOffset = alignDown(EltOffset, 4);
142     int64_t OffsetDiff = EltOffset - AlignDownOffset;
143     Align AdjustedAlign = commonAlignment(
144         KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
145 
146     Value *ArgPtr;
147     Type *AdjustedArgTy;
148     if (DoShiftOpt) { // FIXME: Handle aggregate types
149       // Since we don't have sub-dword scalar loads, avoid doing an extload by
150       // loading earlier than the argument address, and extracting the relevant
151       // bits.
152       //
153       // Additionally widen any sub-dword load to i32 even if suitably aligned,
154       // so that CSE between different argument loads works easily.
155       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
156           Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
157           Arg.getName() + ".kernarg.offset.align.down");
158       AdjustedArgTy = Builder.getInt32Ty();
159     } else {
160       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
161           Builder.getInt8Ty(), KernArgSegment, EltOffset,
162           Arg.getName() + ".kernarg.offset");
163       AdjustedArgTy = ArgTy;
164     }
165 
166     if (IsV3 && Size >= 32) {
167       V4Ty = FixedVectorType::get(VT->getElementType(), 4);
168       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
169       AdjustedArgTy = V4Ty;
170     }
171 
172     LoadInst *Load =
173         Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
174     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
175 
176     MDBuilder MDB(Ctx);
177 
178     if (isa<PointerType>(ArgTy)) {
179       if (Arg.hasNonNullAttr())
180         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
181 
182       uint64_t DerefBytes = Arg.getDereferenceableBytes();
183       if (DerefBytes != 0) {
184         Load->setMetadata(
185           LLVMContext::MD_dereferenceable,
186           MDNode::get(Ctx,
187                       MDB.createConstant(
188                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
189       }
190 
191       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
192       if (DerefOrNullBytes != 0) {
193         Load->setMetadata(
194           LLVMContext::MD_dereferenceable_or_null,
195           MDNode::get(Ctx,
196                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
197                                                           DerefOrNullBytes))));
198       }
199 
200       if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
201         Load->setMetadata(
202             LLVMContext::MD_align,
203             MDNode::get(Ctx, MDB.createConstant(ConstantInt::get(
204                                  Builder.getInt64Ty(), ParamAlign->value()))));
205       }
206     }
207 
208     // TODO: Convert noalias arg to !noalias
209 
210     if (DoShiftOpt) {
211       Value *ExtractBits = OffsetDiff == 0 ?
212         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
213 
214       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
215       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
216       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
217                                             Arg.getName() + ".load");
218       Arg.replaceAllUsesWith(NewVal);
219     } else if (IsV3) {
220       Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
221                                                 Arg.getName() + ".load");
222       Arg.replaceAllUsesWith(Shuf);
223     } else {
224       Load->setName(Arg.getName() + ".load");
225       Arg.replaceAllUsesWith(Load);
226     }
227   }
228 
229   KernArgSegment->addRetAttr(
230       Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
231 
232   return true;
233 }
234 
235 INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
236                       "AMDGPU Lower Kernel Arguments", false, false)
237 INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
238                     false, false)
239 
240 char AMDGPULowerKernelArguments::ID = 0;
241 
242 FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
243   return new AMDGPULowerKernelArguments();
244 }
245