1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPUTargetTransformInfo.h"
11 #include "AMDGPU.h"
12 #include "SIDefines.h"
13 #include "AMDGPUAsmUtils.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/BinaryFormat/ELF.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/IR/Attributes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/MC/MCInstrInfo.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionELF.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/SubtargetFeature.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include <algorithm>
36 #include <cassert>
37 #include <cstdint>
38 #include <cstring>
39 #include <utility>
40
41 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
42
43 #define GET_INSTRINFO_NAMED_OPS
44 #define GET_INSTRMAP_INFO
45 #include "AMDGPUGenInstrInfo.inc"
46 #undef GET_INSTRMAP_INFO
47 #undef GET_INSTRINFO_NAMED_OPS
48
49 namespace {
50
51 /// \returns Bit mask for given bit \p Shift and bit \p Width.
getBitMask(unsigned Shift,unsigned Width)52 unsigned getBitMask(unsigned Shift, unsigned Width) {
53 return ((1 << Width) - 1) << Shift;
54 }
55
56 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
57 ///
58 /// \returns Packed \p Dst.
packBits(unsigned Src,unsigned Dst,unsigned Shift,unsigned Width)59 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
60 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
61 Dst |= (Src << Shift) & getBitMask(Shift, Width);
62 return Dst;
63 }
64
65 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
66 ///
67 /// \returns Unpacked bits.
unpackBits(unsigned Src,unsigned Shift,unsigned Width)68 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
69 return (Src & getBitMask(Shift, Width)) >> Shift;
70 }
71
72 /// \returns Vmcnt bit shift (lower bits).
getVmcntBitShiftLo()73 unsigned getVmcntBitShiftLo() { return 0; }
74
75 /// \returns Vmcnt bit width (lower bits).
getVmcntBitWidthLo()76 unsigned getVmcntBitWidthLo() { return 4; }
77
78 /// \returns Expcnt bit shift.
getExpcntBitShift()79 unsigned getExpcntBitShift() { return 4; }
80
81 /// \returns Expcnt bit width.
getExpcntBitWidth()82 unsigned getExpcntBitWidth() { return 3; }
83
84 /// \returns Lgkmcnt bit shift.
getLgkmcntBitShift()85 unsigned getLgkmcntBitShift() { return 8; }
86
87 /// \returns Lgkmcnt bit width.
getLgkmcntBitWidth(unsigned VersionMajor)88 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return (VersionMajor >= 10) ? 6 : 4;
90 }
91
92 /// \returns Vmcnt bit shift (higher bits).
getVmcntBitShiftHi()93 unsigned getVmcntBitShiftHi() { return 14; }
94
95 /// \returns Vmcnt bit width (higher bits).
getVmcntBitWidthHi()96 unsigned getVmcntBitWidthHi() { return 2; }
97
98 } // end namespace anonymous
99
100 namespace llvm {
101
102 namespace AMDGPU {
103
104 #define GET_MIMGBaseOpcodesTable_IMPL
105 #define GET_MIMGDimInfoTable_IMPL
106 #define GET_MIMGInfoTable_IMPL
107 #define GET_MIMGLZMappingTable_IMPL
108 #define GET_MIMGMIPMappingTable_IMPL
109 #include "AMDGPUGenSearchableTables.inc"
110
getMIMGOpcode(unsigned BaseOpcode,unsigned MIMGEncoding,unsigned VDataDwords,unsigned VAddrDwords)111 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
112 unsigned VDataDwords, unsigned VAddrDwords) {
113 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
114 VDataDwords, VAddrDwords);
115 return Info ? Info->Opcode : -1;
116 }
117
getMIMGBaseOpcode(unsigned Opc)118 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
119 const MIMGInfo *Info = getMIMGInfo(Opc);
120 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
121 }
122
getMaskedMIMGOp(unsigned Opc,unsigned NewChannels)123 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
124 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
125 const MIMGInfo *NewInfo =
126 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
127 NewChannels, OrigInfo->VAddrDwords);
128 return NewInfo ? NewInfo->Opcode : -1;
129 }
130
131 struct MUBUFInfo {
132 uint16_t Opcode;
133 uint16_t BaseOpcode;
134 uint8_t dwords;
135 bool has_vaddr;
136 bool has_srsrc;
137 bool has_soffset;
138 };
139
140 #define GET_MUBUFInfoTable_DECL
141 #define GET_MUBUFInfoTable_IMPL
142 #include "AMDGPUGenSearchableTables.inc"
143
getMUBUFBaseOpcode(unsigned Opc)144 int getMUBUFBaseOpcode(unsigned Opc) {
145 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
146 return Info ? Info->BaseOpcode : -1;
147 }
148
getMUBUFOpcode(unsigned BaseOpc,unsigned Dwords)149 int getMUBUFOpcode(unsigned BaseOpc, unsigned Dwords) {
150 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndDwords(BaseOpc, Dwords);
151 return Info ? Info->Opcode : -1;
152 }
153
getMUBUFDwords(unsigned Opc)154 int getMUBUFDwords(unsigned Opc) {
155 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
156 return Info ? Info->dwords : 0;
157 }
158
getMUBUFHasVAddr(unsigned Opc)159 bool getMUBUFHasVAddr(unsigned Opc) {
160 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
161 return Info ? Info->has_vaddr : false;
162 }
163
getMUBUFHasSrsrc(unsigned Opc)164 bool getMUBUFHasSrsrc(unsigned Opc) {
165 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
166 return Info ? Info->has_srsrc : false;
167 }
168
getMUBUFHasSoffset(unsigned Opc)169 bool getMUBUFHasSoffset(unsigned Opc) {
170 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
171 return Info ? Info->has_soffset : false;
172 }
173
174 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
175 // header files, so we need to wrap it in a function that takes unsigned
176 // instead.
getMCOpcode(uint16_t Opcode,unsigned Gen)177 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
178 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
179 }
180
181 namespace IsaInfo {
182
streamIsaVersion(const MCSubtargetInfo * STI,raw_ostream & Stream)183 void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream) {
184 auto TargetTriple = STI->getTargetTriple();
185 auto Version = getIsaVersion(STI->getCPU());
186
187 Stream << TargetTriple.getArchName() << '-'
188 << TargetTriple.getVendorName() << '-'
189 << TargetTriple.getOSName() << '-'
190 << TargetTriple.getEnvironmentName() << '-'
191 << "gfx"
192 << Version.Major
193 << Version.Minor
194 << Version.Stepping;
195
196 if (hasXNACK(*STI))
197 Stream << "+xnack";
198 if (hasSRAMECC(*STI))
199 Stream << "+sram-ecc";
200
201 Stream.flush();
202 }
203
hasCodeObjectV3(const MCSubtargetInfo * STI)204 bool hasCodeObjectV3(const MCSubtargetInfo *STI) {
205 return STI->getTargetTriple().getOS() == Triple::AMDHSA &&
206 STI->getFeatureBits().test(FeatureCodeObjectV3);
207 }
208
getWavefrontSize(const MCSubtargetInfo * STI)209 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
210 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
211 return 16;
212 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
213 return 32;
214
215 return 64;
216 }
217
getLocalMemorySize(const MCSubtargetInfo * STI)218 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
219 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
220 return 32768;
221 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
222 return 65536;
223
224 return 0;
225 }
226
getEUsPerCU(const MCSubtargetInfo * STI)227 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
228 return 4;
229 }
230
getMaxWorkGroupsPerCU(const MCSubtargetInfo * STI,unsigned FlatWorkGroupSize)231 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
232 unsigned FlatWorkGroupSize) {
233 assert(FlatWorkGroupSize != 0);
234 if (STI->getTargetTriple().getArch() != Triple::amdgcn)
235 return 8;
236 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
237 if (N == 1)
238 return 40;
239 N = 40 / N;
240 return std::min(N, 16u);
241 }
242
getMaxWavesPerCU(const MCSubtargetInfo * STI)243 unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI) {
244 return getMaxWavesPerEU() * getEUsPerCU(STI);
245 }
246
getMaxWavesPerCU(const MCSubtargetInfo * STI,unsigned FlatWorkGroupSize)247 unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI,
248 unsigned FlatWorkGroupSize) {
249 return getWavesPerWorkGroup(STI, FlatWorkGroupSize);
250 }
251
getMinWavesPerEU(const MCSubtargetInfo * STI)252 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
253 return 1;
254 }
255
getMaxWavesPerEU()256 unsigned getMaxWavesPerEU() {
257 // FIXME: Need to take scratch memory into account.
258 return 10;
259 }
260
getMaxWavesPerEU(const MCSubtargetInfo * STI,unsigned FlatWorkGroupSize)261 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI,
262 unsigned FlatWorkGroupSize) {
263 return alignTo(getMaxWavesPerCU(STI, FlatWorkGroupSize),
264 getEUsPerCU(STI)) / getEUsPerCU(STI);
265 }
266
getMinFlatWorkGroupSize(const MCSubtargetInfo * STI)267 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
268 return 1;
269 }
270
getMaxFlatWorkGroupSize(const MCSubtargetInfo * STI)271 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
272 return 2048;
273 }
274
getWavesPerWorkGroup(const MCSubtargetInfo * STI,unsigned FlatWorkGroupSize)275 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
276 unsigned FlatWorkGroupSize) {
277 return alignTo(FlatWorkGroupSize, getWavefrontSize(STI)) /
278 getWavefrontSize(STI);
279 }
280
getSGPRAllocGranule(const MCSubtargetInfo * STI)281 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
282 IsaVersion Version = getIsaVersion(STI->getCPU());
283 if (Version.Major >= 10)
284 return getAddressableNumSGPRs(STI);
285 if (Version.Major >= 8)
286 return 16;
287 return 8;
288 }
289
getSGPREncodingGranule(const MCSubtargetInfo * STI)290 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
291 return 8;
292 }
293
getTotalNumSGPRs(const MCSubtargetInfo * STI)294 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
295 IsaVersion Version = getIsaVersion(STI->getCPU());
296 if (Version.Major >= 8)
297 return 800;
298 return 512;
299 }
300
getAddressableNumSGPRs(const MCSubtargetInfo * STI)301 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
302 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
303 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
304
305 IsaVersion Version = getIsaVersion(STI->getCPU());
306 if (Version.Major >= 10)
307 return 106;
308 if (Version.Major >= 8)
309 return 102;
310 return 104;
311 }
312
getMinNumSGPRs(const MCSubtargetInfo * STI,unsigned WavesPerEU)313 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
314 assert(WavesPerEU != 0);
315
316 IsaVersion Version = getIsaVersion(STI->getCPU());
317 if (Version.Major >= 10)
318 return 0;
319
320 if (WavesPerEU >= getMaxWavesPerEU())
321 return 0;
322
323 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
324 if (STI->getFeatureBits().test(FeatureTrapHandler))
325 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
326 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
327 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
328 }
329
getMaxNumSGPRs(const MCSubtargetInfo * STI,unsigned WavesPerEU,bool Addressable)330 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
331 bool Addressable) {
332 assert(WavesPerEU != 0);
333
334 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
335 IsaVersion Version = getIsaVersion(STI->getCPU());
336 if (Version.Major >= 10)
337 return Addressable ? AddressableNumSGPRs : 108;
338 if (Version.Major >= 8 && !Addressable)
339 AddressableNumSGPRs = 112;
340 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
341 if (STI->getFeatureBits().test(FeatureTrapHandler))
342 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
343 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
344 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
345 }
346
getNumExtraSGPRs(const MCSubtargetInfo * STI,bool VCCUsed,bool FlatScrUsed,bool XNACKUsed)347 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
348 bool FlatScrUsed, bool XNACKUsed) {
349 unsigned ExtraSGPRs = 0;
350 if (VCCUsed)
351 ExtraSGPRs = 2;
352
353 IsaVersion Version = getIsaVersion(STI->getCPU());
354 if (Version.Major >= 10)
355 return ExtraSGPRs;
356
357 if (Version.Major < 8) {
358 if (FlatScrUsed)
359 ExtraSGPRs = 4;
360 } else {
361 if (XNACKUsed)
362 ExtraSGPRs = 4;
363
364 if (FlatScrUsed)
365 ExtraSGPRs = 6;
366 }
367
368 return ExtraSGPRs;
369 }
370
getNumExtraSGPRs(const MCSubtargetInfo * STI,bool VCCUsed,bool FlatScrUsed)371 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
372 bool FlatScrUsed) {
373 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
374 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
375 }
376
getNumSGPRBlocks(const MCSubtargetInfo * STI,unsigned NumSGPRs)377 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
378 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
379 // SGPRBlocks is actual number of SGPR blocks minus 1.
380 return NumSGPRs / getSGPREncodingGranule(STI) - 1;
381 }
382
getVGPRAllocGranule(const MCSubtargetInfo * STI,Optional<bool> EnableWavefrontSize32)383 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
384 Optional<bool> EnableWavefrontSize32) {
385 bool IsWave32 = EnableWavefrontSize32 ?
386 *EnableWavefrontSize32 :
387 STI->getFeatureBits().test(FeatureWavefrontSize32);
388 return IsWave32 ? 8 : 4;
389 }
390
getVGPREncodingGranule(const MCSubtargetInfo * STI,Optional<bool> EnableWavefrontSize32)391 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
392 Optional<bool> EnableWavefrontSize32) {
393 return getVGPRAllocGranule(STI, EnableWavefrontSize32);
394 }
395
getTotalNumVGPRs(const MCSubtargetInfo * STI)396 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
397 return 256;
398 }
399
getAddressableNumVGPRs(const MCSubtargetInfo * STI)400 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
401 return getTotalNumVGPRs(STI);
402 }
403
getMinNumVGPRs(const MCSubtargetInfo * STI,unsigned WavesPerEU)404 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
405 assert(WavesPerEU != 0);
406
407 if (WavesPerEU >= getMaxWavesPerEU())
408 return 0;
409 unsigned MinNumVGPRs =
410 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
411 getVGPRAllocGranule(STI)) + 1;
412 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
413 }
414
getMaxNumVGPRs(const MCSubtargetInfo * STI,unsigned WavesPerEU)415 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
416 assert(WavesPerEU != 0);
417
418 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
419 getVGPRAllocGranule(STI));
420 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
421 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
422 }
423
getNumVGPRBlocks(const MCSubtargetInfo * STI,unsigned NumVGPRs,Optional<bool> EnableWavefrontSize32)424 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
425 Optional<bool> EnableWavefrontSize32) {
426 NumVGPRs = alignTo(std::max(1u, NumVGPRs),
427 getVGPREncodingGranule(STI, EnableWavefrontSize32));
428 // VGPRBlocks is actual number of VGPR blocks minus 1.
429 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
430 }
431
432 } // end namespace IsaInfo
433
initDefaultAMDKernelCodeT(amd_kernel_code_t & Header,const MCSubtargetInfo * STI)434 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
435 const MCSubtargetInfo *STI) {
436 IsaVersion Version = getIsaVersion(STI->getCPU());
437
438 memset(&Header, 0, sizeof(Header));
439
440 Header.amd_kernel_code_version_major = 1;
441 Header.amd_kernel_code_version_minor = 2;
442 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
443 Header.amd_machine_version_major = Version.Major;
444 Header.amd_machine_version_minor = Version.Minor;
445 Header.amd_machine_version_stepping = Version.Stepping;
446 Header.kernel_code_entry_byte_offset = sizeof(Header);
447 Header.wavefront_size = 6;
448
449 // If the code object does not support indirect functions, then the value must
450 // be 0xffffffff.
451 Header.call_convention = -1;
452
453 // These alignment values are specified in powers of two, so alignment =
454 // 2^n. The minimum alignment is 2^4 = 16.
455 Header.kernarg_segment_alignment = 4;
456 Header.group_segment_alignment = 4;
457 Header.private_segment_alignment = 4;
458
459 if (Version.Major >= 10) {
460 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
461 Header.wavefront_size = 5;
462 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
463 }
464 Header.compute_pgm_resource_registers |=
465 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
466 S_00B848_MEM_ORDERED(1);
467 }
468 }
469
getDefaultAmdhsaKernelDescriptor(const MCSubtargetInfo * STI)470 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
471 const MCSubtargetInfo *STI) {
472 IsaVersion Version = getIsaVersion(STI->getCPU());
473
474 amdhsa::kernel_descriptor_t KD;
475 memset(&KD, 0, sizeof(KD));
476
477 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
478 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
479 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
480 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
481 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
482 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
483 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
484 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
485 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
486 if (Version.Major >= 10) {
487 AMDHSA_BITS_SET(KD.kernel_code_properties,
488 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
489 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
490 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
491 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
492 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
493 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
494 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
495 }
496 return KD;
497 }
498
isGroupSegment(const GlobalValue * GV)499 bool isGroupSegment(const GlobalValue *GV) {
500 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
501 }
502
isGlobalSegment(const GlobalValue * GV)503 bool isGlobalSegment(const GlobalValue *GV) {
504 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
505 }
506
isReadOnlySegment(const GlobalValue * GV)507 bool isReadOnlySegment(const GlobalValue *GV) {
508 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
509 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
510 }
511
shouldEmitConstantsToTextSection(const Triple & TT)512 bool shouldEmitConstantsToTextSection(const Triple &TT) {
513 return TT.getOS() != Triple::AMDHSA;
514 }
515
getIntegerAttribute(const Function & F,StringRef Name,int Default)516 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
517 Attribute A = F.getFnAttribute(Name);
518 int Result = Default;
519
520 if (A.isStringAttribute()) {
521 StringRef Str = A.getValueAsString();
522 if (Str.getAsInteger(0, Result)) {
523 LLVMContext &Ctx = F.getContext();
524 Ctx.emitError("can't parse integer attribute " + Name);
525 }
526 }
527
528 return Result;
529 }
530
getIntegerPairAttribute(const Function & F,StringRef Name,std::pair<int,int> Default,bool OnlyFirstRequired)531 std::pair<int, int> getIntegerPairAttribute(const Function &F,
532 StringRef Name,
533 std::pair<int, int> Default,
534 bool OnlyFirstRequired) {
535 Attribute A = F.getFnAttribute(Name);
536 if (!A.isStringAttribute())
537 return Default;
538
539 LLVMContext &Ctx = F.getContext();
540 std::pair<int, int> Ints = Default;
541 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
542 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
543 Ctx.emitError("can't parse first integer attribute " + Name);
544 return Default;
545 }
546 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
547 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
548 Ctx.emitError("can't parse second integer attribute " + Name);
549 return Default;
550 }
551 }
552
553 return Ints;
554 }
555
getVmcntBitMask(const IsaVersion & Version)556 unsigned getVmcntBitMask(const IsaVersion &Version) {
557 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
558 if (Version.Major < 9)
559 return VmcntLo;
560
561 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
562 return VmcntLo | VmcntHi;
563 }
564
getExpcntBitMask(const IsaVersion & Version)565 unsigned getExpcntBitMask(const IsaVersion &Version) {
566 return (1 << getExpcntBitWidth()) - 1;
567 }
568
getLgkmcntBitMask(const IsaVersion & Version)569 unsigned getLgkmcntBitMask(const IsaVersion &Version) {
570 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
571 }
572
getWaitcntBitMask(const IsaVersion & Version)573 unsigned getWaitcntBitMask(const IsaVersion &Version) {
574 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
575 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
576 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(),
577 getLgkmcntBitWidth(Version.Major));
578 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
579 if (Version.Major < 9)
580 return Waitcnt;
581
582 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
583 return Waitcnt | VmcntHi;
584 }
585
decodeVmcnt(const IsaVersion & Version,unsigned Waitcnt)586 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
587 unsigned VmcntLo =
588 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
589 if (Version.Major < 9)
590 return VmcntLo;
591
592 unsigned VmcntHi =
593 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
594 VmcntHi <<= getVmcntBitWidthLo();
595 return VmcntLo | VmcntHi;
596 }
597
decodeExpcnt(const IsaVersion & Version,unsigned Waitcnt)598 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
599 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
600 }
601
decodeLgkmcnt(const IsaVersion & Version,unsigned Waitcnt)602 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
603 return unpackBits(Waitcnt, getLgkmcntBitShift(),
604 getLgkmcntBitWidth(Version.Major));
605 }
606
decodeWaitcnt(const IsaVersion & Version,unsigned Waitcnt,unsigned & Vmcnt,unsigned & Expcnt,unsigned & Lgkmcnt)607 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
608 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
609 Vmcnt = decodeVmcnt(Version, Waitcnt);
610 Expcnt = decodeExpcnt(Version, Waitcnt);
611 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
612 }
613
decodeWaitcnt(const IsaVersion & Version,unsigned Encoded)614 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
615 Waitcnt Decoded;
616 Decoded.VmCnt = decodeVmcnt(Version, Encoded);
617 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
618 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
619 return Decoded;
620 }
621
encodeVmcnt(const IsaVersion & Version,unsigned Waitcnt,unsigned Vmcnt)622 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
623 unsigned Vmcnt) {
624 Waitcnt =
625 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
626 if (Version.Major < 9)
627 return Waitcnt;
628
629 Vmcnt >>= getVmcntBitWidthLo();
630 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
631 }
632
encodeExpcnt(const IsaVersion & Version,unsigned Waitcnt,unsigned Expcnt)633 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
634 unsigned Expcnt) {
635 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
636 }
637
encodeLgkmcnt(const IsaVersion & Version,unsigned Waitcnt,unsigned Lgkmcnt)638 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
639 unsigned Lgkmcnt) {
640 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(),
641 getLgkmcntBitWidth(Version.Major));
642 }
643
encodeWaitcnt(const IsaVersion & Version,unsigned Vmcnt,unsigned Expcnt,unsigned Lgkmcnt)644 unsigned encodeWaitcnt(const IsaVersion &Version,
645 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
646 unsigned Waitcnt = getWaitcntBitMask(Version);
647 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
648 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
649 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
650 return Waitcnt;
651 }
652
encodeWaitcnt(const IsaVersion & Version,const Waitcnt & Decoded)653 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
654 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
655 }
656
657 //===----------------------------------------------------------------------===//
658 // hwreg
659 //===----------------------------------------------------------------------===//
660
661 namespace Hwreg {
662
getHwregId(const StringRef Name)663 int64_t getHwregId(const StringRef Name) {
664 for (int Id = ID_SYMBOLIC_FIRST_; Id < ID_SYMBOLIC_LAST_; ++Id) {
665 if (IdSymbolic[Id] && Name == IdSymbolic[Id])
666 return Id;
667 }
668 return ID_UNKNOWN_;
669 }
670
getLastSymbolicHwreg(const MCSubtargetInfo & STI)671 static unsigned getLastSymbolicHwreg(const MCSubtargetInfo &STI) {
672 if (isSI(STI) || isCI(STI) || isVI(STI))
673 return ID_SYMBOLIC_FIRST_GFX9_;
674 else if (isGFX9(STI))
675 return ID_SYMBOLIC_FIRST_GFX10_;
676 else
677 return ID_SYMBOLIC_LAST_;
678 }
679
isValidHwreg(int64_t Id,const MCSubtargetInfo & STI)680 bool isValidHwreg(int64_t Id, const MCSubtargetInfo &STI) {
681 return ID_SYMBOLIC_FIRST_ <= Id && Id < getLastSymbolicHwreg(STI) &&
682 IdSymbolic[Id];
683 }
684
isValidHwreg(int64_t Id)685 bool isValidHwreg(int64_t Id) {
686 return 0 <= Id && isUInt<ID_WIDTH_>(Id);
687 }
688
isValidHwregOffset(int64_t Offset)689 bool isValidHwregOffset(int64_t Offset) {
690 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
691 }
692
isValidHwregWidth(int64_t Width)693 bool isValidHwregWidth(int64_t Width) {
694 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
695 }
696
encodeHwreg(uint64_t Id,uint64_t Offset,uint64_t Width)697 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
698 return (Id << ID_SHIFT_) |
699 (Offset << OFFSET_SHIFT_) |
700 ((Width - 1) << WIDTH_M1_SHIFT_);
701 }
702
getHwreg(unsigned Id,const MCSubtargetInfo & STI)703 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
704 return isValidHwreg(Id, STI) ? IdSymbolic[Id] : "";
705 }
706
decodeHwreg(unsigned Val,unsigned & Id,unsigned & Offset,unsigned & Width)707 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
708 Id = (Val & ID_MASK_) >> ID_SHIFT_;
709 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
710 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
711 }
712
713 } // namespace Hwreg
714
715 //===----------------------------------------------------------------------===//
716 // SendMsg
717 //===----------------------------------------------------------------------===//
718
719 namespace SendMsg {
720
getMsgId(const StringRef Name)721 int64_t getMsgId(const StringRef Name) {
722 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
723 if (IdSymbolic[i] && Name == IdSymbolic[i])
724 return i;
725 }
726 return ID_UNKNOWN_;
727 }
728
isValidMsgId(int64_t MsgId)729 static bool isValidMsgId(int64_t MsgId) {
730 return (ID_GAPS_FIRST_ <= MsgId && MsgId < ID_GAPS_LAST_) && IdSymbolic[MsgId];
731 }
732
isValidMsgId(int64_t MsgId,const MCSubtargetInfo & STI,bool Strict)733 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI, bool Strict) {
734 if (Strict) {
735 if (MsgId == ID_GS_ALLOC_REQ || MsgId == ID_GET_DOORBELL)
736 return isGFX9(STI) || isGFX10(STI);
737 else
738 return isValidMsgId(MsgId);
739 } else {
740 return 0 <= MsgId && isUInt<ID_WIDTH_>(MsgId);
741 }
742 }
743
getMsgName(int64_t MsgId)744 StringRef getMsgName(int64_t MsgId) {
745 return isValidMsgId(MsgId)? IdSymbolic[MsgId] : "";
746 }
747
getMsgOpId(int64_t MsgId,const StringRef Name)748 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
749 const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
750 const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
751 const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
752 for (int i = F; i < L; ++i) {
753 if (Name == S[i]) {
754 return i;
755 }
756 }
757 return OP_UNKNOWN_;
758 }
759
isValidMsgOp(int64_t MsgId,int64_t OpId,bool Strict)760 bool isValidMsgOp(int64_t MsgId, int64_t OpId, bool Strict) {
761
762 if (!Strict)
763 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
764
765 switch(MsgId)
766 {
767 case ID_GS:
768 return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
769 case ID_GS_DONE:
770 return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
771 case ID_SYSMSG:
772 return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
773 default:
774 return OpId == OP_NONE_;
775 }
776 }
777
getMsgOpName(int64_t MsgId,int64_t OpId)778 StringRef getMsgOpName(int64_t MsgId, int64_t OpId) {
779 assert(msgRequiresOp(MsgId));
780 return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
781 }
782
isValidMsgStream(int64_t MsgId,int64_t OpId,int64_t StreamId,bool Strict)783 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, bool Strict) {
784
785 if (!Strict)
786 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
787
788 switch(MsgId)
789 {
790 case ID_GS:
791 return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
792 case ID_GS_DONE:
793 return (OpId == OP_GS_NOP)?
794 (StreamId == STREAM_ID_NONE_) :
795 (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
796 default:
797 return StreamId == STREAM_ID_NONE_;
798 }
799 }
800
msgRequiresOp(int64_t MsgId)801 bool msgRequiresOp(int64_t MsgId) {
802 return MsgId == ID_GS || MsgId == ID_GS_DONE || MsgId == ID_SYSMSG;
803 }
804
msgSupportsStream(int64_t MsgId,int64_t OpId)805 bool msgSupportsStream(int64_t MsgId, int64_t OpId) {
806 return (MsgId == ID_GS || MsgId == ID_GS_DONE) && OpId != OP_GS_NOP;
807 }
808
decodeMsg(unsigned Val,uint16_t & MsgId,uint16_t & OpId,uint16_t & StreamId)809 void decodeMsg(unsigned Val,
810 uint16_t &MsgId,
811 uint16_t &OpId,
812 uint16_t &StreamId) {
813 MsgId = Val & ID_MASK_;
814 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
815 StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
816 }
817
encodeMsg(uint64_t MsgId,uint64_t OpId,uint64_t StreamId)818 uint64_t encodeMsg(uint64_t MsgId,
819 uint64_t OpId,
820 uint64_t StreamId) {
821 return (MsgId << ID_SHIFT_) |
822 (OpId << OP_SHIFT_) |
823 (StreamId << STREAM_ID_SHIFT_);
824 }
825
826 } // namespace SendMsg
827
828 //===----------------------------------------------------------------------===//
829 //
830 //===----------------------------------------------------------------------===//
831
getInitialPSInputAddr(const Function & F)832 unsigned getInitialPSInputAddr(const Function &F) {
833 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
834 }
835
isShader(CallingConv::ID cc)836 bool isShader(CallingConv::ID cc) {
837 switch(cc) {
838 case CallingConv::AMDGPU_VS:
839 case CallingConv::AMDGPU_LS:
840 case CallingConv::AMDGPU_HS:
841 case CallingConv::AMDGPU_ES:
842 case CallingConv::AMDGPU_GS:
843 case CallingConv::AMDGPU_PS:
844 case CallingConv::AMDGPU_CS:
845 return true;
846 default:
847 return false;
848 }
849 }
850
isCompute(CallingConv::ID cc)851 bool isCompute(CallingConv::ID cc) {
852 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
853 }
854
isEntryFunctionCC(CallingConv::ID CC)855 bool isEntryFunctionCC(CallingConv::ID CC) {
856 switch (CC) {
857 case CallingConv::AMDGPU_KERNEL:
858 case CallingConv::SPIR_KERNEL:
859 case CallingConv::AMDGPU_VS:
860 case CallingConv::AMDGPU_GS:
861 case CallingConv::AMDGPU_PS:
862 case CallingConv::AMDGPU_CS:
863 case CallingConv::AMDGPU_ES:
864 case CallingConv::AMDGPU_HS:
865 case CallingConv::AMDGPU_LS:
866 return true;
867 default:
868 return false;
869 }
870 }
871
hasXNACK(const MCSubtargetInfo & STI)872 bool hasXNACK(const MCSubtargetInfo &STI) {
873 return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
874 }
875
hasSRAMECC(const MCSubtargetInfo & STI)876 bool hasSRAMECC(const MCSubtargetInfo &STI) {
877 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
878 }
879
hasMIMG_R128(const MCSubtargetInfo & STI)880 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
881 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128];
882 }
883
hasPackedD16(const MCSubtargetInfo & STI)884 bool hasPackedD16(const MCSubtargetInfo &STI) {
885 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem];
886 }
887
isSI(const MCSubtargetInfo & STI)888 bool isSI(const MCSubtargetInfo &STI) {
889 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
890 }
891
isCI(const MCSubtargetInfo & STI)892 bool isCI(const MCSubtargetInfo &STI) {
893 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
894 }
895
isVI(const MCSubtargetInfo & STI)896 bool isVI(const MCSubtargetInfo &STI) {
897 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
898 }
899
isGFX9(const MCSubtargetInfo & STI)900 bool isGFX9(const MCSubtargetInfo &STI) {
901 return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
902 }
903
isGFX10(const MCSubtargetInfo & STI)904 bool isGFX10(const MCSubtargetInfo &STI) {
905 return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
906 }
907
isGCN3Encoding(const MCSubtargetInfo & STI)908 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
909 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
910 }
911
isSGPR(unsigned Reg,const MCRegisterInfo * TRI)912 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
913 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
914 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
915 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
916 Reg == AMDGPU::SCC;
917 }
918
isRegIntersect(unsigned Reg0,unsigned Reg1,const MCRegisterInfo * TRI)919 bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI) {
920 for (MCRegAliasIterator R(Reg0, TRI, true); R.isValid(); ++R) {
921 if (*R == Reg1) return true;
922 }
923 return false;
924 }
925
926 #define MAP_REG2REG \
927 using namespace AMDGPU; \
928 switch(Reg) { \
929 default: return Reg; \
930 CASE_CI_VI(FLAT_SCR) \
931 CASE_CI_VI(FLAT_SCR_LO) \
932 CASE_CI_VI(FLAT_SCR_HI) \
933 CASE_VI_GFX9_GFX10(TTMP0) \
934 CASE_VI_GFX9_GFX10(TTMP1) \
935 CASE_VI_GFX9_GFX10(TTMP2) \
936 CASE_VI_GFX9_GFX10(TTMP3) \
937 CASE_VI_GFX9_GFX10(TTMP4) \
938 CASE_VI_GFX9_GFX10(TTMP5) \
939 CASE_VI_GFX9_GFX10(TTMP6) \
940 CASE_VI_GFX9_GFX10(TTMP7) \
941 CASE_VI_GFX9_GFX10(TTMP8) \
942 CASE_VI_GFX9_GFX10(TTMP9) \
943 CASE_VI_GFX9_GFX10(TTMP10) \
944 CASE_VI_GFX9_GFX10(TTMP11) \
945 CASE_VI_GFX9_GFX10(TTMP12) \
946 CASE_VI_GFX9_GFX10(TTMP13) \
947 CASE_VI_GFX9_GFX10(TTMP14) \
948 CASE_VI_GFX9_GFX10(TTMP15) \
949 CASE_VI_GFX9_GFX10(TTMP0_TTMP1) \
950 CASE_VI_GFX9_GFX10(TTMP2_TTMP3) \
951 CASE_VI_GFX9_GFX10(TTMP4_TTMP5) \
952 CASE_VI_GFX9_GFX10(TTMP6_TTMP7) \
953 CASE_VI_GFX9_GFX10(TTMP8_TTMP9) \
954 CASE_VI_GFX9_GFX10(TTMP10_TTMP11) \
955 CASE_VI_GFX9_GFX10(TTMP12_TTMP13) \
956 CASE_VI_GFX9_GFX10(TTMP14_TTMP15) \
957 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3) \
958 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7) \
959 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11) \
960 CASE_VI_GFX9_GFX10(TTMP12_TTMP13_TTMP14_TTMP15) \
961 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
962 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
963 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
964 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
965 }
966
967 #define CASE_CI_VI(node) \
968 assert(!isSI(STI)); \
969 case node: return isCI(STI) ? node##_ci : node##_vi;
970
971 #define CASE_VI_GFX9_GFX10(node) \
972 case node: return (isGFX9(STI) || isGFX10(STI)) ? node##_gfx9_gfx10 : node##_vi;
973
getMCReg(unsigned Reg,const MCSubtargetInfo & STI)974 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
975 if (STI.getTargetTriple().getArch() == Triple::r600)
976 return Reg;
977 MAP_REG2REG
978 }
979
980 #undef CASE_CI_VI
981 #undef CASE_VI_GFX9_GFX10
982
983 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
984 #define CASE_VI_GFX9_GFX10(node) case node##_vi: case node##_gfx9_gfx10: return node;
985
mc2PseudoReg(unsigned Reg)986 unsigned mc2PseudoReg(unsigned Reg) {
987 MAP_REG2REG
988 }
989
990 #undef CASE_CI_VI
991 #undef CASE_VI_GFX9_GFX10
992 #undef MAP_REG2REG
993
isSISrcOperand(const MCInstrDesc & Desc,unsigned OpNo)994 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
995 assert(OpNo < Desc.NumOperands);
996 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
997 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
998 OpType <= AMDGPU::OPERAND_SRC_LAST;
999 }
1000
isSISrcFPOperand(const MCInstrDesc & Desc,unsigned OpNo)1001 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1002 assert(OpNo < Desc.NumOperands);
1003 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1004 switch (OpType) {
1005 case AMDGPU::OPERAND_REG_IMM_FP32:
1006 case AMDGPU::OPERAND_REG_IMM_FP64:
1007 case AMDGPU::OPERAND_REG_IMM_FP16:
1008 case AMDGPU::OPERAND_REG_IMM_V2FP16:
1009 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1010 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1011 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1012 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1013 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1014 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1015 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
1016 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
1017 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
1018 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1019 return true;
1020 default:
1021 return false;
1022 }
1023 }
1024
isSISrcInlinableOperand(const MCInstrDesc & Desc,unsigned OpNo)1025 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1026 assert(OpNo < Desc.NumOperands);
1027 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1028 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
1029 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
1030 }
1031
1032 // Avoid using MCRegisterClass::getSize, since that function will go away
1033 // (move from MC* level to Target* level). Return size in bits.
getRegBitWidth(unsigned RCID)1034 unsigned getRegBitWidth(unsigned RCID) {
1035 switch (RCID) {
1036 case AMDGPU::SGPR_32RegClassID:
1037 case AMDGPU::VGPR_32RegClassID:
1038 case AMDGPU::VRegOrLds_32RegClassID:
1039 case AMDGPU::AGPR_32RegClassID:
1040 case AMDGPU::VS_32RegClassID:
1041 case AMDGPU::AV_32RegClassID:
1042 case AMDGPU::SReg_32RegClassID:
1043 case AMDGPU::SReg_32_XM0RegClassID:
1044 case AMDGPU::SRegOrLds_32RegClassID:
1045 return 32;
1046 case AMDGPU::SGPR_64RegClassID:
1047 case AMDGPU::VS_64RegClassID:
1048 case AMDGPU::AV_64RegClassID:
1049 case AMDGPU::SReg_64RegClassID:
1050 case AMDGPU::VReg_64RegClassID:
1051 case AMDGPU::AReg_64RegClassID:
1052 case AMDGPU::SReg_64_XEXECRegClassID:
1053 return 64;
1054 case AMDGPU::SGPR_96RegClassID:
1055 case AMDGPU::SReg_96RegClassID:
1056 case AMDGPU::VReg_96RegClassID:
1057 return 96;
1058 case AMDGPU::SGPR_128RegClassID:
1059 case AMDGPU::SReg_128RegClassID:
1060 case AMDGPU::VReg_128RegClassID:
1061 case AMDGPU::AReg_128RegClassID:
1062 return 128;
1063 case AMDGPU::SGPR_160RegClassID:
1064 case AMDGPU::SReg_160RegClassID:
1065 case AMDGPU::VReg_160RegClassID:
1066 return 160;
1067 case AMDGPU::SReg_256RegClassID:
1068 case AMDGPU::VReg_256RegClassID:
1069 return 256;
1070 case AMDGPU::SReg_512RegClassID:
1071 case AMDGPU::VReg_512RegClassID:
1072 case AMDGPU::AReg_512RegClassID:
1073 return 512;
1074 case AMDGPU::SReg_1024RegClassID:
1075 case AMDGPU::VReg_1024RegClassID:
1076 case AMDGPU::AReg_1024RegClassID:
1077 return 1024;
1078 default:
1079 llvm_unreachable("Unexpected register class");
1080 }
1081 }
1082
getRegBitWidth(const MCRegisterClass & RC)1083 unsigned getRegBitWidth(const MCRegisterClass &RC) {
1084 return getRegBitWidth(RC.getID());
1085 }
1086
getRegOperandSize(const MCRegisterInfo * MRI,const MCInstrDesc & Desc,unsigned OpNo)1087 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
1088 unsigned OpNo) {
1089 assert(OpNo < Desc.NumOperands);
1090 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
1091 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
1092 }
1093
isInlinableLiteral64(int64_t Literal,bool HasInv2Pi)1094 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
1095 if (Literal >= -16 && Literal <= 64)
1096 return true;
1097
1098 uint64_t Val = static_cast<uint64_t>(Literal);
1099 return (Val == DoubleToBits(0.0)) ||
1100 (Val == DoubleToBits(1.0)) ||
1101 (Val == DoubleToBits(-1.0)) ||
1102 (Val == DoubleToBits(0.5)) ||
1103 (Val == DoubleToBits(-0.5)) ||
1104 (Val == DoubleToBits(2.0)) ||
1105 (Val == DoubleToBits(-2.0)) ||
1106 (Val == DoubleToBits(4.0)) ||
1107 (Val == DoubleToBits(-4.0)) ||
1108 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
1109 }
1110
isInlinableLiteral32(int32_t Literal,bool HasInv2Pi)1111 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
1112 if (Literal >= -16 && Literal <= 64)
1113 return true;
1114
1115 // The actual type of the operand does not seem to matter as long
1116 // as the bits match one of the inline immediate values. For example:
1117 //
1118 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
1119 // so it is a legal inline immediate.
1120 //
1121 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
1122 // floating-point, so it is a legal inline immediate.
1123
1124 uint32_t Val = static_cast<uint32_t>(Literal);
1125 return (Val == FloatToBits(0.0f)) ||
1126 (Val == FloatToBits(1.0f)) ||
1127 (Val == FloatToBits(-1.0f)) ||
1128 (Val == FloatToBits(0.5f)) ||
1129 (Val == FloatToBits(-0.5f)) ||
1130 (Val == FloatToBits(2.0f)) ||
1131 (Val == FloatToBits(-2.0f)) ||
1132 (Val == FloatToBits(4.0f)) ||
1133 (Val == FloatToBits(-4.0f)) ||
1134 (Val == 0x3e22f983 && HasInv2Pi);
1135 }
1136
isInlinableLiteral16(int16_t Literal,bool HasInv2Pi)1137 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
1138 if (!HasInv2Pi)
1139 return false;
1140
1141 if (Literal >= -16 && Literal <= 64)
1142 return true;
1143
1144 uint16_t Val = static_cast<uint16_t>(Literal);
1145 return Val == 0x3C00 || // 1.0
1146 Val == 0xBC00 || // -1.0
1147 Val == 0x3800 || // 0.5
1148 Val == 0xB800 || // -0.5
1149 Val == 0x4000 || // 2.0
1150 Val == 0xC000 || // -2.0
1151 Val == 0x4400 || // 4.0
1152 Val == 0xC400 || // -4.0
1153 Val == 0x3118; // 1/2pi
1154 }
1155
isInlinableLiteralV216(int32_t Literal,bool HasInv2Pi)1156 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
1157 assert(HasInv2Pi);
1158
1159 if (isInt<16>(Literal) || isUInt<16>(Literal)) {
1160 int16_t Trunc = static_cast<int16_t>(Literal);
1161 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
1162 }
1163 if (!(Literal & 0xffff))
1164 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
1165
1166 int16_t Lo16 = static_cast<int16_t>(Literal);
1167 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
1168 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
1169 }
1170
isArgPassedInSGPR(const Argument * A)1171 bool isArgPassedInSGPR(const Argument *A) {
1172 const Function *F = A->getParent();
1173
1174 // Arguments to compute shaders are never a source of divergence.
1175 CallingConv::ID CC = F->getCallingConv();
1176 switch (CC) {
1177 case CallingConv::AMDGPU_KERNEL:
1178 case CallingConv::SPIR_KERNEL:
1179 return true;
1180 case CallingConv::AMDGPU_VS:
1181 case CallingConv::AMDGPU_LS:
1182 case CallingConv::AMDGPU_HS:
1183 case CallingConv::AMDGPU_ES:
1184 case CallingConv::AMDGPU_GS:
1185 case CallingConv::AMDGPU_PS:
1186 case CallingConv::AMDGPU_CS:
1187 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
1188 // Everything else is in VGPRs.
1189 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
1190 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
1191 default:
1192 // TODO: Should calls support inreg for SGPR inputs?
1193 return false;
1194 }
1195 }
1196
hasSMEMByteOffset(const MCSubtargetInfo & ST)1197 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
1198 return isGCN3Encoding(ST) || isGFX10(ST);
1199 }
1200
getSMRDEncodedOffset(const MCSubtargetInfo & ST,int64_t ByteOffset)1201 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
1202 if (hasSMEMByteOffset(ST))
1203 return ByteOffset;
1204 return ByteOffset >> 2;
1205 }
1206
isLegalSMRDImmOffset(const MCSubtargetInfo & ST,int64_t ByteOffset)1207 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
1208 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
1209 return (hasSMEMByteOffset(ST)) ?
1210 isUInt<20>(EncodedOffset) : isUInt<8>(EncodedOffset);
1211 }
1212
1213 // Given Imm, split it into the values to put into the SOffset and ImmOffset
1214 // fields in an MUBUF instruction. Return false if it is not possible (due to a
1215 // hardware bug needing a workaround).
1216 //
1217 // The required alignment ensures that individual address components remain
1218 // aligned if they are aligned to begin with. It also ensures that additional
1219 // offsets within the given alignment can be added to the resulting ImmOffset.
splitMUBUFOffset(uint32_t Imm,uint32_t & SOffset,uint32_t & ImmOffset,const GCNSubtarget * Subtarget,uint32_t Align)1220 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
1221 const GCNSubtarget *Subtarget, uint32_t Align) {
1222 const uint32_t MaxImm = alignDown(4095, Align);
1223 uint32_t Overflow = 0;
1224
1225 if (Imm > MaxImm) {
1226 if (Imm <= MaxImm + 64) {
1227 // Use an SOffset inline constant for 4..64
1228 Overflow = Imm - MaxImm;
1229 Imm = MaxImm;
1230 } else {
1231 // Try to keep the same value in SOffset for adjacent loads, so that
1232 // the corresponding register contents can be re-used.
1233 //
1234 // Load values with all low-bits (except for alignment bits) set into
1235 // SOffset, so that a larger range of values can be covered using
1236 // s_movk_i32.
1237 //
1238 // Atomic operations fail to work correctly when individual address
1239 // components are unaligned, even if their sum is aligned.
1240 uint32_t High = (Imm + Align) & ~4095;
1241 uint32_t Low = (Imm + Align) & 4095;
1242 Imm = Low;
1243 Overflow = High - Align;
1244 }
1245 }
1246
1247 // There is a hardware bug in SI and CI which prevents address clamping in
1248 // MUBUF instructions from working correctly with SOffsets. The immediate
1249 // offset is unaffected.
1250 if (Overflow > 0 &&
1251 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
1252 return false;
1253
1254 ImmOffset = Imm;
1255 SOffset = Overflow;
1256 return true;
1257 }
1258
SIModeRegisterDefaults(const Function & F)1259 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) {
1260 *this = getDefaultForCallingConv(F.getCallingConv());
1261
1262 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
1263 if (!IEEEAttr.empty())
1264 IEEE = IEEEAttr == "true";
1265
1266 StringRef DX10ClampAttr
1267 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
1268 if (!DX10ClampAttr.empty())
1269 DX10Clamp = DX10ClampAttr == "true";
1270 }
1271
1272 namespace {
1273
1274 struct SourceOfDivergence {
1275 unsigned Intr;
1276 };
1277 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
1278
1279 #define GET_SourcesOfDivergence_IMPL
1280 #include "AMDGPUGenSearchableTables.inc"
1281
1282 } // end anonymous namespace
1283
isIntrinsicSourceOfDivergence(unsigned IntrID)1284 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
1285 return lookupSourceOfDivergence(IntrID);
1286 }
1287
1288 } // namespace AMDGPU
1289 } // namespace llvm
1290