1 //===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "SIMachineFunctionInfo.h"
10 #include "AMDGPUArgumentUsageInfo.h"
11 #include "AMDGPUTargetMachine.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIRegisterInfo.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "Utils/AMDGPUBaseInfo.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/CodeGen/MachineBasicBlock.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Function.h"
23 #include <cassert>
24 #include <vector>
25
26 #define MAX_LANES 64
27
28 using namespace llvm;
29
SIMachineFunctionInfo(const MachineFunction & MF)30 SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
31 : AMDGPUMachineFunction(MF),
32 PrivateSegmentBuffer(false),
33 DispatchPtr(false),
34 QueuePtr(false),
35 KernargSegmentPtr(false),
36 DispatchID(false),
37 FlatScratchInit(false),
38 WorkGroupIDX(false),
39 WorkGroupIDY(false),
40 WorkGroupIDZ(false),
41 WorkGroupInfo(false),
42 PrivateSegmentWaveByteOffset(false),
43 WorkItemIDX(false),
44 WorkItemIDY(false),
45 WorkItemIDZ(false),
46 ImplicitBufferPtr(false),
47 ImplicitArgPtr(false),
48 GITPtrHigh(0xffffffff),
49 HighBitsOf32BitAddress(0),
50 GDSSize(0) {
51 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
52 const Function &F = MF.getFunction();
53 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
54 WavesPerEU = ST.getWavesPerEU(F);
55
56 Occupancy = ST.computeOccupancy(F, getLDSSize());
57 CallingConv::ID CC = F.getCallingConv();
58
59 // FIXME: Should have analysis or something rather than attribute to detect
60 // calls.
61 const bool HasCalls = F.hasFnAttribute("amdgpu-calls");
62
63 // Enable all kernel inputs if we have the fixed ABI. Don't bother if we don't
64 // have any calls.
65 const bool UseFixedABI = AMDGPUTargetMachine::EnableFixedFunctionABI &&
66 (!isEntryFunction() || HasCalls);
67
68 if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) {
69 if (!F.arg_empty())
70 KernargSegmentPtr = true;
71 WorkGroupIDX = true;
72 WorkItemIDX = true;
73 } else if (CC == CallingConv::AMDGPU_PS) {
74 PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
75 }
76
77 if (!isEntryFunction()) {
78 // TODO: Pick a high register, and shift down, similar to a kernel.
79 FrameOffsetReg = AMDGPU::SGPR33;
80 StackPtrOffsetReg = AMDGPU::SGPR32;
81
82 if (!ST.enableFlatScratch()) {
83 // Non-entry functions have no special inputs for now, other registers
84 // required for scratch access.
85 ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
86
87 ArgInfo.PrivateSegmentBuffer =
88 ArgDescriptor::createRegister(ScratchRSrcReg);
89 }
90
91 if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
92 ImplicitArgPtr = true;
93 } else {
94 if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) {
95 KernargSegmentPtr = true;
96 MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(),
97 MaxKernArgAlign);
98 }
99 }
100
101 if (UseFixedABI) {
102 WorkGroupIDX = true;
103 WorkGroupIDY = true;
104 WorkGroupIDZ = true;
105 WorkItemIDX = true;
106 WorkItemIDY = true;
107 WorkItemIDZ = true;
108 ImplicitArgPtr = true;
109 } else {
110 if (F.hasFnAttribute("amdgpu-work-group-id-x"))
111 WorkGroupIDX = true;
112
113 if (F.hasFnAttribute("amdgpu-work-group-id-y"))
114 WorkGroupIDY = true;
115
116 if (F.hasFnAttribute("amdgpu-work-group-id-z"))
117 WorkGroupIDZ = true;
118
119 if (F.hasFnAttribute("amdgpu-work-item-id-x"))
120 WorkItemIDX = true;
121
122 if (F.hasFnAttribute("amdgpu-work-item-id-y"))
123 WorkItemIDY = true;
124
125 if (F.hasFnAttribute("amdgpu-work-item-id-z"))
126 WorkItemIDZ = true;
127 }
128
129 bool HasStackObjects = F.hasFnAttribute("amdgpu-stack-objects");
130 if (isEntryFunction()) {
131 // X, XY, and XYZ are the only supported combinations, so make sure Y is
132 // enabled if Z is.
133 if (WorkItemIDZ)
134 WorkItemIDY = true;
135
136 PrivateSegmentWaveByteOffset = true;
137
138 // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
139 if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
140 (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
141 ArgInfo.PrivateSegmentWaveByteOffset =
142 ArgDescriptor::createRegister(AMDGPU::SGPR5);
143 }
144
145 bool isAmdHsaOrMesa = ST.isAmdHsaOrMesa(F);
146 if (isAmdHsaOrMesa) {
147 if (!ST.enableFlatScratch())
148 PrivateSegmentBuffer = true;
149
150 if (UseFixedABI) {
151 DispatchPtr = true;
152 QueuePtr = true;
153
154 // FIXME: We don't need this?
155 DispatchID = true;
156 } else {
157 if (F.hasFnAttribute("amdgpu-dispatch-ptr"))
158 DispatchPtr = true;
159
160 if (F.hasFnAttribute("amdgpu-queue-ptr"))
161 QueuePtr = true;
162
163 if (F.hasFnAttribute("amdgpu-dispatch-id"))
164 DispatchID = true;
165 }
166 } else if (ST.isMesaGfxShader(F)) {
167 ImplicitBufferPtr = true;
168 }
169
170 if (UseFixedABI || F.hasFnAttribute("amdgpu-kernarg-segment-ptr"))
171 KernargSegmentPtr = true;
172
173 if (ST.hasFlatAddressSpace() && isEntryFunction() &&
174 (isAmdHsaOrMesa || ST.enableFlatScratch())) {
175 // TODO: This could be refined a lot. The attribute is a poor way of
176 // detecting calls or stack objects that may require it before argument
177 // lowering.
178 if (HasCalls || HasStackObjects || ST.enableFlatScratch())
179 FlatScratchInit = true;
180 }
181
182 Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
183 StringRef S = A.getValueAsString();
184 if (!S.empty())
185 S.consumeInteger(0, GITPtrHigh);
186
187 A = F.getFnAttribute("amdgpu-32bit-address-high-bits");
188 S = A.getValueAsString();
189 if (!S.empty())
190 S.consumeInteger(0, HighBitsOf32BitAddress);
191
192 S = F.getFnAttribute("amdgpu-gds-size").getValueAsString();
193 if (!S.empty())
194 S.consumeInteger(0, GDSSize);
195 }
196
limitOccupancy(const MachineFunction & MF)197 void SIMachineFunctionInfo::limitOccupancy(const MachineFunction &MF) {
198 limitOccupancy(getMaxWavesPerEU());
199 const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>();
200 limitOccupancy(ST.getOccupancyWithLocalMemSize(getLDSSize(),
201 MF.getFunction()));
202 }
203
addPrivateSegmentBuffer(const SIRegisterInfo & TRI)204 Register SIMachineFunctionInfo::addPrivateSegmentBuffer(
205 const SIRegisterInfo &TRI) {
206 ArgInfo.PrivateSegmentBuffer =
207 ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
208 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SGPR_128RegClass));
209 NumUserSGPRs += 4;
210 return ArgInfo.PrivateSegmentBuffer.getRegister();
211 }
212
addDispatchPtr(const SIRegisterInfo & TRI)213 Register SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
214 ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
215 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
216 NumUserSGPRs += 2;
217 return ArgInfo.DispatchPtr.getRegister();
218 }
219
addQueuePtr(const SIRegisterInfo & TRI)220 Register SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
221 ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
222 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
223 NumUserSGPRs += 2;
224 return ArgInfo.QueuePtr.getRegister();
225 }
226
addKernargSegmentPtr(const SIRegisterInfo & TRI)227 Register SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
228 ArgInfo.KernargSegmentPtr
229 = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
230 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
231 NumUserSGPRs += 2;
232 return ArgInfo.KernargSegmentPtr.getRegister();
233 }
234
addDispatchID(const SIRegisterInfo & TRI)235 Register SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
236 ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
237 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
238 NumUserSGPRs += 2;
239 return ArgInfo.DispatchID.getRegister();
240 }
241
addFlatScratchInit(const SIRegisterInfo & TRI)242 Register SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
243 ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
244 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
245 NumUserSGPRs += 2;
246 return ArgInfo.FlatScratchInit.getRegister();
247 }
248
addImplicitBufferPtr(const SIRegisterInfo & TRI)249 Register SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) {
250 ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
251 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
252 NumUserSGPRs += 2;
253 return ArgInfo.ImplicitBufferPtr.getRegister();
254 }
255
isCalleeSavedReg(const MCPhysReg * CSRegs,MCPhysReg Reg)256 bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs,
257 MCPhysReg Reg) {
258 for (unsigned I = 0; CSRegs[I]; ++I) {
259 if (CSRegs[I] == Reg)
260 return true;
261 }
262
263 return false;
264 }
265
266 /// \p returns true if \p NumLanes slots are available in VGPRs already used for
267 /// SGPR spilling.
268 //
269 // FIXME: This only works after processFunctionBeforeFrameFinalized
haveFreeLanesForSGPRSpill(const MachineFunction & MF,unsigned NumNeed) const270 bool SIMachineFunctionInfo::haveFreeLanesForSGPRSpill(const MachineFunction &MF,
271 unsigned NumNeed) const {
272 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
273 unsigned WaveSize = ST.getWavefrontSize();
274 return NumVGPRSpillLanes + NumNeed <= WaveSize * SpillVGPRs.size();
275 }
276
277 /// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI.
allocateSGPRSpillToVGPR(MachineFunction & MF,int FI)278 bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
279 int FI) {
280 std::vector<SpilledReg> &SpillLanes = SGPRToVGPRSpills[FI];
281
282 // This has already been allocated.
283 if (!SpillLanes.empty())
284 return true;
285
286 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
287 const SIRegisterInfo *TRI = ST.getRegisterInfo();
288 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
289 MachineRegisterInfo &MRI = MF.getRegInfo();
290 unsigned WaveSize = ST.getWavefrontSize();
291 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
292
293 unsigned Size = FrameInfo.getObjectSize(FI);
294 unsigned NumLanes = Size / 4;
295
296 if (NumLanes > WaveSize)
297 return false;
298
299 assert(Size >= 4 && "invalid sgpr spill size");
300 assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs");
301
302 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
303
304 // Make sure to handle the case where a wide SGPR spill may span between two
305 // VGPRs.
306 for (unsigned I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) {
307 Register LaneVGPR;
308 unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize);
309
310 // Reserve a VGPR (when NumVGPRSpillLanes = 0, WaveSize, 2*WaveSize, ..) and
311 // when one of the two conditions is true:
312 // 1. One reserved VGPR being tracked by VGPRReservedForSGPRSpill is not yet
313 // reserved.
314 // 2. All spill lanes of reserved VGPR(s) are full and another spill lane is
315 // required.
316 if (FuncInfo->VGPRReservedForSGPRSpill && NumVGPRSpillLanes < WaveSize) {
317 assert(FuncInfo->VGPRReservedForSGPRSpill == SpillVGPRs.back().VGPR);
318 LaneVGPR = FuncInfo->VGPRReservedForSGPRSpill;
319 } else if (VGPRIndex == 0) {
320 LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
321 if (LaneVGPR == AMDGPU::NoRegister) {
322 // We have no VGPRs left for spilling SGPRs. Reset because we will not
323 // partially spill the SGPR to VGPRs.
324 SGPRToVGPRSpills.erase(FI);
325 NumVGPRSpillLanes -= I;
326 return false;
327 }
328
329 Optional<int> CSRSpillFI;
330 if ((FrameInfo.hasCalls() || !isEntryFunction()) && CSRegs &&
331 isCalleeSavedReg(CSRegs, LaneVGPR)) {
332 CSRSpillFI = FrameInfo.CreateSpillStackObject(4, Align(4));
333 }
334
335 SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, CSRSpillFI));
336
337 // Add this register as live-in to all blocks to avoid machine verifer
338 // complaining about use of an undefined physical register.
339 for (MachineBasicBlock &BB : MF)
340 BB.addLiveIn(LaneVGPR);
341 } else {
342 LaneVGPR = SpillVGPRs.back().VGPR;
343 }
344
345 SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex));
346 }
347
348 return true;
349 }
350
351 /// Reserve a VGPR for spilling of SGPRs
reserveVGPRforSGPRSpills(MachineFunction & MF)352 bool SIMachineFunctionInfo::reserveVGPRforSGPRSpills(MachineFunction &MF) {
353 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
354 const SIRegisterInfo *TRI = ST.getRegisterInfo();
355 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
356
357 Register LaneVGPR = TRI->findUnusedRegister(
358 MF.getRegInfo(), &AMDGPU::VGPR_32RegClass, MF, true);
359 if (LaneVGPR == Register())
360 return false;
361 SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, None));
362 FuncInfo->VGPRReservedForSGPRSpill = LaneVGPR;
363 return true;
364 }
365
366 /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI.
367 /// Either AGPR is spilled to VGPR to vice versa.
368 /// Returns true if a \p FI can be eliminated completely.
allocateVGPRSpillToAGPR(MachineFunction & MF,int FI,bool isAGPRtoVGPR)369 bool SIMachineFunctionInfo::allocateVGPRSpillToAGPR(MachineFunction &MF,
370 int FI,
371 bool isAGPRtoVGPR) {
372 MachineRegisterInfo &MRI = MF.getRegInfo();
373 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
374 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
375
376 assert(ST.hasMAIInsts() && FrameInfo.isSpillSlotObjectIndex(FI));
377
378 auto &Spill = VGPRToAGPRSpills[FI];
379
380 // This has already been allocated.
381 if (!Spill.Lanes.empty())
382 return Spill.FullyAllocated;
383
384 unsigned Size = FrameInfo.getObjectSize(FI);
385 unsigned NumLanes = Size / 4;
386 Spill.Lanes.resize(NumLanes, AMDGPU::NoRegister);
387
388 const TargetRegisterClass &RC =
389 isAGPRtoVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::AGPR_32RegClass;
390 auto Regs = RC.getRegisters();
391
392 auto &SpillRegs = isAGPRtoVGPR ? SpillAGPR : SpillVGPR;
393 const SIRegisterInfo *TRI = ST.getRegisterInfo();
394 Spill.FullyAllocated = true;
395
396 // FIXME: Move allocation logic out of MachineFunctionInfo and initialize
397 // once.
398 BitVector OtherUsedRegs;
399 OtherUsedRegs.resize(TRI->getNumRegs());
400
401 const uint32_t *CSRMask =
402 TRI->getCallPreservedMask(MF, MF.getFunction().getCallingConv());
403 if (CSRMask)
404 OtherUsedRegs.setBitsInMask(CSRMask);
405
406 // TODO: Should include register tuples, but doesn't matter with current
407 // usage.
408 for (MCPhysReg Reg : SpillAGPR)
409 OtherUsedRegs.set(Reg);
410 for (MCPhysReg Reg : SpillVGPR)
411 OtherUsedRegs.set(Reg);
412
413 SmallVectorImpl<MCPhysReg>::const_iterator NextSpillReg = Regs.begin();
414 for (unsigned I = 0; I < NumLanes; ++I) {
415 NextSpillReg = std::find_if(
416 NextSpillReg, Regs.end(), [&MRI, &OtherUsedRegs](MCPhysReg Reg) {
417 return MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg) &&
418 !OtherUsedRegs[Reg];
419 });
420
421 if (NextSpillReg == Regs.end()) { // Registers exhausted
422 Spill.FullyAllocated = false;
423 break;
424 }
425
426 OtherUsedRegs.set(*NextSpillReg);
427 SpillRegs.push_back(*NextSpillReg);
428 Spill.Lanes[I] = *NextSpillReg++;
429 }
430
431 return Spill.FullyAllocated;
432 }
433
removeDeadFrameIndices(MachineFrameInfo & MFI)434 void SIMachineFunctionInfo::removeDeadFrameIndices(MachineFrameInfo &MFI) {
435 // The FP & BP spills haven't been inserted yet, so keep them around.
436 for (auto &R : SGPRToVGPRSpills) {
437 if (R.first != FramePointerSaveIndex && R.first != BasePointerSaveIndex)
438 MFI.RemoveStackObject(R.first);
439 }
440
441 // All other SPGRs must be allocated on the default stack, so reset the stack
442 // ID.
443 for (int i = MFI.getObjectIndexBegin(), e = MFI.getObjectIndexEnd(); i != e;
444 ++i)
445 if (i != FramePointerSaveIndex && i != BasePointerSaveIndex)
446 MFI.setStackID(i, TargetStackID::Default);
447
448 for (auto &R : VGPRToAGPRSpills) {
449 if (R.second.FullyAllocated)
450 MFI.RemoveStackObject(R.first);
451 }
452 }
453
getNextUserSGPR() const454 MCPhysReg SIMachineFunctionInfo::getNextUserSGPR() const {
455 assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs");
456 return AMDGPU::SGPR0 + NumUserSGPRs;
457 }
458
getNextSystemSGPR() const459 MCPhysReg SIMachineFunctionInfo::getNextSystemSGPR() const {
460 return AMDGPU::SGPR0 + NumUserSGPRs + NumSystemSGPRs;
461 }
462
463 Register
getGITPtrLoReg(const MachineFunction & MF) const464 SIMachineFunctionInfo::getGITPtrLoReg(const MachineFunction &MF) const {
465 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
466 if (!ST.isAmdPalOS())
467 return Register();
468 Register GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in
469 if (ST.hasMergedShaders()) {
470 switch (MF.getFunction().getCallingConv()) {
471 case CallingConv::AMDGPU_HS:
472 case CallingConv::AMDGPU_GS:
473 // Low GIT address is passed in s8 rather than s0 for an LS+HS or
474 // ES+GS merged shader on gfx9+.
475 GitPtrLo = AMDGPU::SGPR8;
476 return GitPtrLo;
477 default:
478 return GitPtrLo;
479 }
480 }
481 return GitPtrLo;
482 }
483
regToString(Register Reg,const TargetRegisterInfo & TRI)484 static yaml::StringValue regToString(Register Reg,
485 const TargetRegisterInfo &TRI) {
486 yaml::StringValue Dest;
487 {
488 raw_string_ostream OS(Dest.Value);
489 OS << printReg(Reg, &TRI);
490 }
491 return Dest;
492 }
493
494 static Optional<yaml::SIArgumentInfo>
convertArgumentInfo(const AMDGPUFunctionArgInfo & ArgInfo,const TargetRegisterInfo & TRI)495 convertArgumentInfo(const AMDGPUFunctionArgInfo &ArgInfo,
496 const TargetRegisterInfo &TRI) {
497 yaml::SIArgumentInfo AI;
498
499 auto convertArg = [&](Optional<yaml::SIArgument> &A,
500 const ArgDescriptor &Arg) {
501 if (!Arg)
502 return false;
503
504 // Create a register or stack argument.
505 yaml::SIArgument SA = yaml::SIArgument::createArgument(Arg.isRegister());
506 if (Arg.isRegister()) {
507 raw_string_ostream OS(SA.RegisterName.Value);
508 OS << printReg(Arg.getRegister(), &TRI);
509 } else
510 SA.StackOffset = Arg.getStackOffset();
511 // Check and update the optional mask.
512 if (Arg.isMasked())
513 SA.Mask = Arg.getMask();
514
515 A = SA;
516 return true;
517 };
518
519 bool Any = false;
520 Any |= convertArg(AI.PrivateSegmentBuffer, ArgInfo.PrivateSegmentBuffer);
521 Any |= convertArg(AI.DispatchPtr, ArgInfo.DispatchPtr);
522 Any |= convertArg(AI.QueuePtr, ArgInfo.QueuePtr);
523 Any |= convertArg(AI.KernargSegmentPtr, ArgInfo.KernargSegmentPtr);
524 Any |= convertArg(AI.DispatchID, ArgInfo.DispatchID);
525 Any |= convertArg(AI.FlatScratchInit, ArgInfo.FlatScratchInit);
526 Any |= convertArg(AI.PrivateSegmentSize, ArgInfo.PrivateSegmentSize);
527 Any |= convertArg(AI.WorkGroupIDX, ArgInfo.WorkGroupIDX);
528 Any |= convertArg(AI.WorkGroupIDY, ArgInfo.WorkGroupIDY);
529 Any |= convertArg(AI.WorkGroupIDZ, ArgInfo.WorkGroupIDZ);
530 Any |= convertArg(AI.WorkGroupInfo, ArgInfo.WorkGroupInfo);
531 Any |= convertArg(AI.PrivateSegmentWaveByteOffset,
532 ArgInfo.PrivateSegmentWaveByteOffset);
533 Any |= convertArg(AI.ImplicitArgPtr, ArgInfo.ImplicitArgPtr);
534 Any |= convertArg(AI.ImplicitBufferPtr, ArgInfo.ImplicitBufferPtr);
535 Any |= convertArg(AI.WorkItemIDX, ArgInfo.WorkItemIDX);
536 Any |= convertArg(AI.WorkItemIDY, ArgInfo.WorkItemIDY);
537 Any |= convertArg(AI.WorkItemIDZ, ArgInfo.WorkItemIDZ);
538
539 if (Any)
540 return AI;
541
542 return None;
543 }
544
SIMachineFunctionInfo(const llvm::SIMachineFunctionInfo & MFI,const TargetRegisterInfo & TRI)545 yaml::SIMachineFunctionInfo::SIMachineFunctionInfo(
546 const llvm::SIMachineFunctionInfo &MFI, const TargetRegisterInfo &TRI)
547 : ExplicitKernArgSize(MFI.getExplicitKernArgSize()),
548 MaxKernArgAlign(MFI.getMaxKernArgAlign()), LDSSize(MFI.getLDSSize()),
549 DynLDSAlign(MFI.getDynLDSAlign()), IsEntryFunction(MFI.isEntryFunction()),
550 NoSignedZerosFPMath(MFI.hasNoSignedZerosFPMath()),
551 MemoryBound(MFI.isMemoryBound()), WaveLimiter(MFI.needsWaveLimiter()),
552 HasSpilledSGPRs(MFI.hasSpilledSGPRs()),
553 HasSpilledVGPRs(MFI.hasSpilledVGPRs()),
554 HighBitsOf32BitAddress(MFI.get32BitAddressHighBits()),
555 ScratchRSrcReg(regToString(MFI.getScratchRSrcReg(), TRI)),
556 FrameOffsetReg(regToString(MFI.getFrameOffsetReg(), TRI)),
557 StackPtrOffsetReg(regToString(MFI.getStackPtrOffsetReg(), TRI)),
558 ArgInfo(convertArgumentInfo(MFI.getArgInfo(), TRI)), Mode(MFI.getMode()) {
559 }
560
mappingImpl(yaml::IO & YamlIO)561 void yaml::SIMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) {
562 MappingTraits<SIMachineFunctionInfo>::mapping(YamlIO, *this);
563 }
564
initializeBaseYamlFields(const yaml::SIMachineFunctionInfo & YamlMFI)565 bool SIMachineFunctionInfo::initializeBaseYamlFields(
566 const yaml::SIMachineFunctionInfo &YamlMFI) {
567 ExplicitKernArgSize = YamlMFI.ExplicitKernArgSize;
568 MaxKernArgAlign = assumeAligned(YamlMFI.MaxKernArgAlign);
569 LDSSize = YamlMFI.LDSSize;
570 DynLDSAlign = YamlMFI.DynLDSAlign;
571 HighBitsOf32BitAddress = YamlMFI.HighBitsOf32BitAddress;
572 IsEntryFunction = YamlMFI.IsEntryFunction;
573 NoSignedZerosFPMath = YamlMFI.NoSignedZerosFPMath;
574 MemoryBound = YamlMFI.MemoryBound;
575 WaveLimiter = YamlMFI.WaveLimiter;
576 HasSpilledSGPRs = YamlMFI.HasSpilledSGPRs;
577 HasSpilledVGPRs = YamlMFI.HasSpilledVGPRs;
578 return false;
579 }
580
581 // Remove VGPR which was reserved for SGPR spills if there are no spilled SGPRs
removeVGPRForSGPRSpill(Register ReservedVGPR,MachineFunction & MF)582 bool SIMachineFunctionInfo::removeVGPRForSGPRSpill(Register ReservedVGPR,
583 MachineFunction &MF) {
584 for (auto *i = SpillVGPRs.begin(); i < SpillVGPRs.end(); i++) {
585 if (i->VGPR == ReservedVGPR) {
586 SpillVGPRs.erase(i);
587
588 for (MachineBasicBlock &MBB : MF) {
589 MBB.removeLiveIn(ReservedVGPR);
590 MBB.sortUniqueLiveIns();
591 }
592 this->VGPRReservedForSGPRSpill = AMDGPU::NoRegister;
593 return true;
594 }
595 }
596 return false;
597 }
598