1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/CodeGen/TargetRegisterInfo.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/Function.h"
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/IntrinsicsAMDGPU.h"
21 #include "llvm/IR/IntrinsicsR600.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/Support/AMDHSAKernelDescriptor.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/TargetParser/TargetParser.h"
29 #include <optional>
30 
31 #define GET_INSTRINFO_NAMED_OPS
32 #define GET_INSTRMAP_INFO
33 #include "AMDGPUGenInstrInfo.inc"
34 
35 static llvm::cl::opt<unsigned>
36     AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden,
37                             llvm::cl::desc("AMDHSA Code Object Version"),
38                             llvm::cl::init(4));
39 
40 namespace {
41 
42 /// \returns Bit mask for given bit \p Shift and bit \p Width.
43 unsigned getBitMask(unsigned Shift, unsigned Width) {
44   return ((1 << Width) - 1) << Shift;
45 }
46 
47 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
48 ///
49 /// \returns Packed \p Dst.
50 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
51   unsigned Mask = getBitMask(Shift, Width);
52   return ((Src << Shift) & Mask) | (Dst & ~Mask);
53 }
54 
55 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
56 ///
57 /// \returns Unpacked bits.
58 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
59   return (Src & getBitMask(Shift, Width)) >> Shift;
60 }
61 
62 /// \returns Vmcnt bit shift (lower bits).
63 unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
64   return VersionMajor >= 11 ? 10 : 0;
65 }
66 
67 /// \returns Vmcnt bit width (lower bits).
68 unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
69   return VersionMajor >= 11 ? 6 : 4;
70 }
71 
72 /// \returns Expcnt bit shift.
73 unsigned getExpcntBitShift(unsigned VersionMajor) {
74   return VersionMajor >= 11 ? 0 : 4;
75 }
76 
77 /// \returns Expcnt bit width.
78 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
79 
80 /// \returns Lgkmcnt bit shift.
81 unsigned getLgkmcntBitShift(unsigned VersionMajor) {
82   return VersionMajor >= 11 ? 4 : 8;
83 }
84 
85 /// \returns Lgkmcnt bit width.
86 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
87   return VersionMajor >= 10 ? 6 : 4;
88 }
89 
90 /// \returns Vmcnt bit shift (higher bits).
91 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
92 
93 /// \returns Vmcnt bit width (higher bits).
94 unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
95   return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
96 }
97 
98 /// \returns VmVsrc bit width
99 inline unsigned getVmVsrcBitWidth() { return 3; }
100 
101 /// \returns VmVsrc bit shift
102 inline unsigned getVmVsrcBitShift() { return 2; }
103 
104 /// \returns VaVdst bit width
105 inline unsigned getVaVdstBitWidth() { return 4; }
106 
107 /// \returns VaVdst bit shift
108 inline unsigned getVaVdstBitShift() { return 12; }
109 
110 /// \returns SaSdst bit width
111 inline unsigned getSaSdstBitWidth() { return 1; }
112 
113 /// \returns SaSdst bit shift
114 inline unsigned getSaSdstBitShift() { return 0; }
115 
116 } // end namespace anonymous
117 
118 namespace llvm {
119 
120 namespace AMDGPU {
121 
122 std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
123   if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
124     return std::nullopt;
125 
126   switch (AmdhsaCodeObjectVersion) {
127   case 2:
128     return ELF::ELFABIVERSION_AMDGPU_HSA_V2;
129   case 3:
130     return ELF::ELFABIVERSION_AMDGPU_HSA_V3;
131   case 4:
132     return ELF::ELFABIVERSION_AMDGPU_HSA_V4;
133   case 5:
134     return ELF::ELFABIVERSION_AMDGPU_HSA_V5;
135   default:
136     report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
137                        Twine(AmdhsaCodeObjectVersion));
138   }
139 }
140 
141 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
142   if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
143     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
144   return false;
145 }
146 
147 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
148   if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
149     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
150   return false;
151 }
152 
153 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
154   if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
155     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
156   return false;
157 }
158 
159 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
160   if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
161     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
162   return false;
163 }
164 
165 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) {
166   return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
167          isHsaAbiVersion5(STI);
168 }
169 
170 unsigned getAmdhsaCodeObjectVersion() {
171   return AmdhsaCodeObjectVersion;
172 }
173 
174 unsigned getCodeObjectVersion(const Module &M) {
175   if (auto Ver = mdconst::extract_or_null<ConstantInt>(
176       M.getModuleFlag("amdgpu_code_object_version"))) {
177     return (unsigned)Ver->getZExtValue() / 100;
178   }
179 
180   // Default code object version.
181   return AMDHSA_COV4;
182 }
183 
184 unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
185   switch (CodeObjectVersion) {
186   case AMDHSA_COV2:
187   case AMDHSA_COV3:
188   case AMDHSA_COV4:
189     return 48;
190   case AMDHSA_COV5:
191   default:
192     return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET;
193   }
194 }
195 
196 
197 // FIXME: All such magic numbers about the ABI should be in a
198 // central TD file.
199 unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
200   switch (CodeObjectVersion) {
201   case AMDHSA_COV2:
202   case AMDHSA_COV3:
203   case AMDHSA_COV4:
204     return 24;
205   case AMDHSA_COV5:
206   default:
207     return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET;
208   }
209 }
210 
211 unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
212   switch (CodeObjectVersion) {
213   case AMDHSA_COV2:
214   case AMDHSA_COV3:
215   case AMDHSA_COV4:
216     return 32;
217   case AMDHSA_COV5:
218   default:
219     return AMDGPU::ImplicitArg::DEFAULT_QUEUE_OFFSET;
220   }
221 }
222 
223 unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
224   switch (CodeObjectVersion) {
225   case AMDHSA_COV2:
226   case AMDHSA_COV3:
227   case AMDHSA_COV4:
228     return 40;
229   case AMDHSA_COV5:
230   default:
231     return AMDGPU::ImplicitArg::COMPLETION_ACTION_OFFSET;
232   }
233 }
234 
235 #define GET_MIMGBaseOpcodesTable_IMPL
236 #define GET_MIMGDimInfoTable_IMPL
237 #define GET_MIMGInfoTable_IMPL
238 #define GET_MIMGLZMappingTable_IMPL
239 #define GET_MIMGMIPMappingTable_IMPL
240 #define GET_MIMGBiasMappingTable_IMPL
241 #define GET_MIMGOffsetMappingTable_IMPL
242 #define GET_MIMGG16MappingTable_IMPL
243 #define GET_MAIInstInfoTable_IMPL
244 #include "AMDGPUGenSearchableTables.inc"
245 
246 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
247                   unsigned VDataDwords, unsigned VAddrDwords) {
248   const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
249                                              VDataDwords, VAddrDwords);
250   return Info ? Info->Opcode : -1;
251 }
252 
253 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
254   const MIMGInfo *Info = getMIMGInfo(Opc);
255   return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
256 }
257 
258 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
259   const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
260   const MIMGInfo *NewInfo =
261       getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
262                           NewChannels, OrigInfo->VAddrDwords);
263   return NewInfo ? NewInfo->Opcode : -1;
264 }
265 
266 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
267                            const MIMGDimInfo *Dim, bool IsA16,
268                            bool IsG16Supported) {
269   unsigned AddrWords = BaseOpcode->NumExtraArgs;
270   unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
271                             (BaseOpcode->LodOrClampOrMip ? 1 : 0);
272   if (IsA16)
273     AddrWords += divideCeil(AddrComponents, 2);
274   else
275     AddrWords += AddrComponents;
276 
277   // Note: For subtargets that support A16 but not G16, enabling A16 also
278   // enables 16 bit gradients.
279   // For subtargets that support A16 (operand) and G16 (done with a different
280   // instruction encoding), they are independent.
281 
282   if (BaseOpcode->Gradients) {
283     if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
284       // There are two gradients per coordinate, we pack them separately.
285       // For the 3d case,
286       // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
287       AddrWords += alignTo<2>(Dim->NumGradients / 2);
288     else
289       AddrWords += Dim->NumGradients;
290   }
291   return AddrWords;
292 }
293 
294 struct MUBUFInfo {
295   uint16_t Opcode;
296   uint16_t BaseOpcode;
297   uint8_t elements;
298   bool has_vaddr;
299   bool has_srsrc;
300   bool has_soffset;
301   bool IsBufferInv;
302 };
303 
304 struct MTBUFInfo {
305   uint16_t Opcode;
306   uint16_t BaseOpcode;
307   uint8_t elements;
308   bool has_vaddr;
309   bool has_srsrc;
310   bool has_soffset;
311 };
312 
313 struct SMInfo {
314   uint16_t Opcode;
315   bool IsBuffer;
316 };
317 
318 struct VOPInfo {
319   uint16_t Opcode;
320   bool IsSingle;
321 };
322 
323 struct VOPC64DPPInfo {
324   uint16_t Opcode;
325 };
326 
327 struct VOPDComponentInfo {
328   uint16_t BaseVOP;
329   uint16_t VOPDOp;
330   bool CanBeVOPDX;
331 };
332 
333 struct VOPDInfo {
334   uint16_t Opcode;
335   uint16_t OpX;
336   uint16_t OpY;
337 };
338 
339 struct VOPTrue16Info {
340   uint16_t Opcode;
341   bool IsTrue16;
342 };
343 
344 #define GET_MTBUFInfoTable_DECL
345 #define GET_MTBUFInfoTable_IMPL
346 #define GET_MUBUFInfoTable_DECL
347 #define GET_MUBUFInfoTable_IMPL
348 #define GET_SMInfoTable_DECL
349 #define GET_SMInfoTable_IMPL
350 #define GET_VOP1InfoTable_DECL
351 #define GET_VOP1InfoTable_IMPL
352 #define GET_VOP2InfoTable_DECL
353 #define GET_VOP2InfoTable_IMPL
354 #define GET_VOP3InfoTable_DECL
355 #define GET_VOP3InfoTable_IMPL
356 #define GET_VOPC64DPPTable_DECL
357 #define GET_VOPC64DPPTable_IMPL
358 #define GET_VOPC64DPP8Table_DECL
359 #define GET_VOPC64DPP8Table_IMPL
360 #define GET_VOPDComponentTable_DECL
361 #define GET_VOPDComponentTable_IMPL
362 #define GET_VOPDPairs_DECL
363 #define GET_VOPDPairs_IMPL
364 #define GET_VOPTrue16Table_DECL
365 #define GET_VOPTrue16Table_IMPL
366 #define GET_WMMAOpcode2AddrMappingTable_DECL
367 #define GET_WMMAOpcode2AddrMappingTable_IMPL
368 #define GET_WMMAOpcode3AddrMappingTable_DECL
369 #define GET_WMMAOpcode3AddrMappingTable_IMPL
370 #include "AMDGPUGenSearchableTables.inc"
371 
372 int getMTBUFBaseOpcode(unsigned Opc) {
373   const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
374   return Info ? Info->BaseOpcode : -1;
375 }
376 
377 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
378   const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
379   return Info ? Info->Opcode : -1;
380 }
381 
382 int getMTBUFElements(unsigned Opc) {
383   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
384   return Info ? Info->elements : 0;
385 }
386 
387 bool getMTBUFHasVAddr(unsigned Opc) {
388   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
389   return Info ? Info->has_vaddr : false;
390 }
391 
392 bool getMTBUFHasSrsrc(unsigned Opc) {
393   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
394   return Info ? Info->has_srsrc : false;
395 }
396 
397 bool getMTBUFHasSoffset(unsigned Opc) {
398   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
399   return Info ? Info->has_soffset : false;
400 }
401 
402 int getMUBUFBaseOpcode(unsigned Opc) {
403   const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
404   return Info ? Info->BaseOpcode : -1;
405 }
406 
407 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
408   const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
409   return Info ? Info->Opcode : -1;
410 }
411 
412 int getMUBUFElements(unsigned Opc) {
413   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
414   return Info ? Info->elements : 0;
415 }
416 
417 bool getMUBUFHasVAddr(unsigned Opc) {
418   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
419   return Info ? Info->has_vaddr : false;
420 }
421 
422 bool getMUBUFHasSrsrc(unsigned Opc) {
423   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
424   return Info ? Info->has_srsrc : false;
425 }
426 
427 bool getMUBUFHasSoffset(unsigned Opc) {
428   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
429   return Info ? Info->has_soffset : false;
430 }
431 
432 bool getMUBUFIsBufferInv(unsigned Opc) {
433   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
434   return Info ? Info->IsBufferInv : false;
435 }
436 
437 bool getSMEMIsBuffer(unsigned Opc) {
438   const SMInfo *Info = getSMEMOpcodeHelper(Opc);
439   return Info ? Info->IsBuffer : false;
440 }
441 
442 bool getVOP1IsSingle(unsigned Opc) {
443   const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
444   return Info ? Info->IsSingle : false;
445 }
446 
447 bool getVOP2IsSingle(unsigned Opc) {
448   const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
449   return Info ? Info->IsSingle : false;
450 }
451 
452 bool getVOP3IsSingle(unsigned Opc) {
453   const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
454   return Info ? Info->IsSingle : false;
455 }
456 
457 bool isVOPC64DPP(unsigned Opc) {
458   return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
459 }
460 
461 bool getMAIIsDGEMM(unsigned Opc) {
462   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
463   return Info ? Info->is_dgemm : false;
464 }
465 
466 bool getMAIIsGFX940XDL(unsigned Opc) {
467   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
468   return Info ? Info->is_gfx940_xdl : false;
469 }
470 
471 CanBeVOPD getCanBeVOPD(unsigned Opc) {
472   const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
473   if (Info)
474     return {Info->CanBeVOPDX, true};
475   else
476     return {false, false};
477 }
478 
479 unsigned getVOPDOpcode(unsigned Opc) {
480   const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
481   return Info ? Info->VOPDOp : ~0u;
482 }
483 
484 bool isVOPD(unsigned Opc) {
485   return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
486 }
487 
488 bool isMAC(unsigned Opc) {
489   return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
490          Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
491          Opc == AMDGPU::V_MAC_F32_e64_vi ||
492          Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
493          Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
494          Opc == AMDGPU::V_MAC_F16_e64_vi ||
495          Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
496          Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
497          Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
498          Opc == AMDGPU::V_FMAC_F32_e64_vi ||
499          Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
500          Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
501          Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
502          Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
503          Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
504          Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
505          Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
506          Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
507 }
508 
509 bool isPermlane16(unsigned Opc) {
510   return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
511          Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
512          Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
513          Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11;
514 }
515 
516 bool isTrue16Inst(unsigned Opc) {
517   const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
518   return Info ? Info->IsTrue16 : false;
519 }
520 
521 unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
522   const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
523   return Info ? Info->Opcode3Addr : ~0u;
524 }
525 
526 unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
527   const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
528   return Info ? Info->Opcode2Addr : ~0u;
529 }
530 
531 // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
532 // header files, so we need to wrap it in a function that takes unsigned
533 // instead.
534 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
535   return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
536 }
537 
538 int getVOPDFull(unsigned OpX, unsigned OpY) {
539   const VOPDInfo *Info = getVOPDInfoFromComponentOpcodes(OpX, OpY);
540   return Info ? Info->Opcode : -1;
541 }
542 
543 std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
544   const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
545   assert(Info);
546   auto OpX = getVOPDBaseFromComponent(Info->OpX);
547   auto OpY = getVOPDBaseFromComponent(Info->OpY);
548   assert(OpX && OpY);
549   return {OpX->BaseVOP, OpY->BaseVOP};
550 }
551 
552 namespace VOPD {
553 
554 ComponentProps::ComponentProps(const MCInstrDesc &OpDesc) {
555   assert(OpDesc.getNumDefs() == Component::DST_NUM);
556 
557   assert(OpDesc.getOperandConstraint(Component::SRC0, MCOI::TIED_TO) == -1);
558   assert(OpDesc.getOperandConstraint(Component::SRC1, MCOI::TIED_TO) == -1);
559   auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
560   assert(TiedIdx == -1 || TiedIdx == Component::DST);
561   HasSrc2Acc = TiedIdx != -1;
562 
563   SrcOperandsNum = OpDesc.getNumOperands() - OpDesc.getNumDefs();
564   assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
565 
566   auto OperandsNum = OpDesc.getNumOperands();
567   unsigned CompOprIdx;
568   for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
569     if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
570       MandatoryLiteralIdx = CompOprIdx;
571       break;
572     }
573   }
574 }
575 
576 unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
577   assert(CompOprIdx < Component::MAX_OPR_NUM);
578 
579   if (CompOprIdx == Component::DST)
580     return getIndexOfDstInParsedOperands();
581 
582   auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
583   if (CompSrcIdx < getCompParsedSrcOperandsNum())
584     return getIndexOfSrcInParsedOperands(CompSrcIdx);
585 
586   // The specified operand does not exist.
587   return 0;
588 }
589 
590 std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
591     std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {
592 
593   auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx);
594   auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx);
595 
596   unsigned CompOprIdx;
597   for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
598     unsigned BanksMasks = VOPD_VGPR_BANK_MASKS[CompOprIdx];
599     if (OpXRegs[CompOprIdx] && OpYRegs[CompOprIdx] &&
600         ((OpXRegs[CompOprIdx] & BanksMasks) ==
601          (OpYRegs[CompOprIdx] & BanksMasks)))
602       return CompOprIdx;
603   }
604 
605   return {};
606 }
607 
608 // Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
609 // by the specified component. If an operand is unused
610 // or is not a VGPR, the corresponding value is 0.
611 //
612 // GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
613 // for the specified component and MC operand. The callback must return 0
614 // if the operand is not a register or not a VGPR.
615 InstInfo::RegIndices InstInfo::getRegIndices(
616     unsigned CompIdx,
617     std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {
618   assert(CompIdx < COMPONENTS_NUM);
619 
620   const auto &Comp = CompInfo[CompIdx];
621   InstInfo::RegIndices RegIndices;
622 
623   RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
624 
625   for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
626     unsigned CompSrcIdx = CompOprIdx - DST_NUM;
627     RegIndices[CompOprIdx] =
628         Comp.hasRegSrcOperand(CompSrcIdx)
629             ? GetRegIdx(CompIdx, Comp.getIndexOfSrcInMCOperands(CompSrcIdx))
630             : 0;
631   }
632   return RegIndices;
633 }
634 
635 } // namespace VOPD
636 
637 VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY) {
638   return VOPD::InstInfo(OpX, OpY);
639 }
640 
641 VOPD::InstInfo getVOPDInstInfo(unsigned VOPDOpcode,
642                                const MCInstrInfo *InstrInfo) {
643   auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
644   const auto &OpXDesc = InstrInfo->get(OpX);
645   const auto &OpYDesc = InstrInfo->get(OpY);
646   VOPD::ComponentInfo OpXInfo(OpXDesc, VOPD::ComponentKind::COMPONENT_X);
647   VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo);
648   return VOPD::InstInfo(OpXInfo, OpYInfo);
649 }
650 
651 namespace IsaInfo {
652 
653 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI)
654     : STI(STI), XnackSetting(TargetIDSetting::Any),
655       SramEccSetting(TargetIDSetting::Any), CodeObjectVersion(0) {
656   if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
657     XnackSetting = TargetIDSetting::Unsupported;
658   if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
659     SramEccSetting = TargetIDSetting::Unsupported;
660 }
661 
662 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) {
663   // Check if xnack or sramecc is explicitly enabled or disabled.  In the
664   // absence of the target features we assume we must generate code that can run
665   // in any environment.
666   SubtargetFeatures Features(FS);
667   std::optional<bool> XnackRequested;
668   std::optional<bool> SramEccRequested;
669 
670   for (const std::string &Feature : Features.getFeatures()) {
671     if (Feature == "+xnack")
672       XnackRequested = true;
673     else if (Feature == "-xnack")
674       XnackRequested = false;
675     else if (Feature == "+sramecc")
676       SramEccRequested = true;
677     else if (Feature == "-sramecc")
678       SramEccRequested = false;
679   }
680 
681   bool XnackSupported = isXnackSupported();
682   bool SramEccSupported = isSramEccSupported();
683 
684   if (XnackRequested) {
685     if (XnackSupported) {
686       XnackSetting =
687           *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
688     } else {
689       // If a specific xnack setting was requested and this GPU does not support
690       // xnack emit a warning. Setting will remain set to "Unsupported".
691       if (*XnackRequested) {
692         errs() << "warning: xnack 'On' was requested for a processor that does "
693                   "not support it!\n";
694       } else {
695         errs() << "warning: xnack 'Off' was requested for a processor that "
696                   "does not support it!\n";
697       }
698     }
699   }
700 
701   if (SramEccRequested) {
702     if (SramEccSupported) {
703       SramEccSetting =
704           *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
705     } else {
706       // If a specific sramecc setting was requested and this GPU does not
707       // support sramecc emit a warning. Setting will remain set to
708       // "Unsupported".
709       if (*SramEccRequested) {
710         errs() << "warning: sramecc 'On' was requested for a processor that "
711                   "does not support it!\n";
712       } else {
713         errs() << "warning: sramecc 'Off' was requested for a processor that "
714                   "does not support it!\n";
715       }
716     }
717   }
718 }
719 
720 static TargetIDSetting
721 getTargetIDSettingFromFeatureString(StringRef FeatureString) {
722   if (FeatureString.endswith("-"))
723     return TargetIDSetting::Off;
724   if (FeatureString.endswith("+"))
725     return TargetIDSetting::On;
726 
727   llvm_unreachable("Malformed feature string");
728 }
729 
730 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) {
731   SmallVector<StringRef, 3> TargetIDSplit;
732   TargetID.split(TargetIDSplit, ':');
733 
734   for (const auto &FeatureString : TargetIDSplit) {
735     if (FeatureString.startswith("xnack"))
736       XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
737     if (FeatureString.startswith("sramecc"))
738       SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
739   }
740 }
741 
742 std::string AMDGPUTargetID::toString() const {
743   std::string StringRep;
744   raw_string_ostream StreamRep(StringRep);
745 
746   auto TargetTriple = STI.getTargetTriple();
747   auto Version = getIsaVersion(STI.getCPU());
748 
749   StreamRep << TargetTriple.getArchName() << '-'
750             << TargetTriple.getVendorName() << '-'
751             << TargetTriple.getOSName() << '-'
752             << TargetTriple.getEnvironmentName() << '-';
753 
754   std::string Processor;
755   // TODO: Following else statement is present here because we used various
756   // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
757   // Remove once all aliases are removed from GCNProcessors.td.
758   if (Version.Major >= 9)
759     Processor = STI.getCPU().str();
760   else
761     Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
762                  Twine(Version.Stepping))
763                     .str();
764 
765   std::string Features;
766   if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
767     switch (CodeObjectVersion) {
768     case AMDGPU::AMDHSA_COV2:
769       // Code object V2 only supported specific processors and had fixed
770       // settings for the XNACK.
771       if (Processor == "gfx600") {
772       } else if (Processor == "gfx601") {
773       } else if (Processor == "gfx602") {
774       } else if (Processor == "gfx700") {
775       } else if (Processor == "gfx701") {
776       } else if (Processor == "gfx702") {
777       } else if (Processor == "gfx703") {
778       } else if (Processor == "gfx704") {
779       } else if (Processor == "gfx705") {
780       } else if (Processor == "gfx801") {
781         if (!isXnackOnOrAny())
782           report_fatal_error(
783               "AMD GPU code object V2 does not support processor " +
784               Twine(Processor) + " without XNACK");
785       } else if (Processor == "gfx802") {
786       } else if (Processor == "gfx803") {
787       } else if (Processor == "gfx805") {
788       } else if (Processor == "gfx810") {
789         if (!isXnackOnOrAny())
790           report_fatal_error(
791               "AMD GPU code object V2 does not support processor " +
792               Twine(Processor) + " without XNACK");
793       } else if (Processor == "gfx900") {
794         if (isXnackOnOrAny())
795           Processor = "gfx901";
796       } else if (Processor == "gfx902") {
797         if (isXnackOnOrAny())
798           Processor = "gfx903";
799       } else if (Processor == "gfx904") {
800         if (isXnackOnOrAny())
801           Processor = "gfx905";
802       } else if (Processor == "gfx906") {
803         if (isXnackOnOrAny())
804           Processor = "gfx907";
805       } else if (Processor == "gfx90c") {
806         if (isXnackOnOrAny())
807           report_fatal_error(
808               "AMD GPU code object V2 does not support processor " +
809               Twine(Processor) + " with XNACK being ON or ANY");
810       } else {
811         report_fatal_error(
812             "AMD GPU code object V2 does not support processor " +
813             Twine(Processor));
814       }
815       break;
816     case AMDGPU::AMDHSA_COV3:
817       // xnack.
818       if (isXnackOnOrAny())
819         Features += "+xnack";
820       // In code object v2 and v3, "sramecc" feature was spelled with a
821       // hyphen ("sram-ecc").
822       if (isSramEccOnOrAny())
823         Features += "+sram-ecc";
824       break;
825     case AMDGPU::AMDHSA_COV4:
826     case AMDGPU::AMDHSA_COV5:
827       // sramecc.
828       if (getSramEccSetting() == TargetIDSetting::Off)
829         Features += ":sramecc-";
830       else if (getSramEccSetting() == TargetIDSetting::On)
831         Features += ":sramecc+";
832       // xnack.
833       if (getXnackSetting() == TargetIDSetting::Off)
834         Features += ":xnack-";
835       else if (getXnackSetting() == TargetIDSetting::On)
836         Features += ":xnack+";
837       break;
838     default:
839       break;
840     }
841   }
842 
843   StreamRep << Processor << Features;
844 
845   StreamRep.flush();
846   return StringRep;
847 }
848 
849 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
850   if (STI->getFeatureBits().test(FeatureWavefrontSize16))
851     return 16;
852   if (STI->getFeatureBits().test(FeatureWavefrontSize32))
853     return 32;
854 
855   return 64;
856 }
857 
858 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
859   unsigned BytesPerCU = 0;
860   if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
861     BytesPerCU = 32768;
862   if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
863     BytesPerCU = 65536;
864 
865   // "Per CU" really means "per whatever functional block the waves of a
866   // workgroup must share". So the effective local memory size is doubled in
867   // WGP mode on gfx10.
868   if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
869     BytesPerCU *= 2;
870 
871   return BytesPerCU;
872 }
873 
874 unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI) {
875   if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
876     return 32768;
877   if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
878     return 65536;
879   return 0;
880 }
881 
882 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
883   // "Per CU" really means "per whatever functional block the waves of a
884   // workgroup must share". For gfx10 in CU mode this is the CU, which contains
885   // two SIMDs.
886   if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
887     return 2;
888   // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
889   // two CUs, so a total of four SIMDs.
890   return 4;
891 }
892 
893 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
894                                unsigned FlatWorkGroupSize) {
895   assert(FlatWorkGroupSize != 0);
896   if (STI->getTargetTriple().getArch() != Triple::amdgcn)
897     return 8;
898   unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
899   unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
900   if (N == 1) {
901     // Single-wave workgroups don't consume barrier resources.
902     return MaxWaves;
903   }
904 
905   unsigned MaxBarriers = 16;
906   if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
907     MaxBarriers = 32;
908 
909   return std::min(MaxWaves / N, MaxBarriers);
910 }
911 
912 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
913   return 1;
914 }
915 
916 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
917   // FIXME: Need to take scratch memory into account.
918   if (isGFX90A(*STI))
919     return 8;
920   if (!isGFX10Plus(*STI))
921     return 10;
922   return hasGFX10_3Insts(*STI) ? 16 : 20;
923 }
924 
925 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
926                                    unsigned FlatWorkGroupSize) {
927   return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
928                     getEUsPerCU(STI));
929 }
930 
931 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
932   return 1;
933 }
934 
935 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
936   // Some subtargets allow encoding 2048, but this isn't tested or supported.
937   return 1024;
938 }
939 
940 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
941                               unsigned FlatWorkGroupSize) {
942   return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
943 }
944 
945 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
946   IsaVersion Version = getIsaVersion(STI->getCPU());
947   if (Version.Major >= 10)
948     return getAddressableNumSGPRs(STI);
949   if (Version.Major >= 8)
950     return 16;
951   return 8;
952 }
953 
954 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
955   return 8;
956 }
957 
958 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
959   IsaVersion Version = getIsaVersion(STI->getCPU());
960   if (Version.Major >= 8)
961     return 800;
962   return 512;
963 }
964 
965 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
966   if (STI->getFeatureBits().test(FeatureSGPRInitBug))
967     return FIXED_NUM_SGPRS_FOR_INIT_BUG;
968 
969   IsaVersion Version = getIsaVersion(STI->getCPU());
970   if (Version.Major >= 10)
971     return 106;
972   if (Version.Major >= 8)
973     return 102;
974   return 104;
975 }
976 
977 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
978   assert(WavesPerEU != 0);
979 
980   IsaVersion Version = getIsaVersion(STI->getCPU());
981   if (Version.Major >= 10)
982     return 0;
983 
984   if (WavesPerEU >= getMaxWavesPerEU(STI))
985     return 0;
986 
987   unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
988   if (STI->getFeatureBits().test(FeatureTrapHandler))
989     MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
990   MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
991   return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
992 }
993 
994 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
995                         bool Addressable) {
996   assert(WavesPerEU != 0);
997 
998   unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
999   IsaVersion Version = getIsaVersion(STI->getCPU());
1000   if (Version.Major >= 10)
1001     return Addressable ? AddressableNumSGPRs : 108;
1002   if (Version.Major >= 8 && !Addressable)
1003     AddressableNumSGPRs = 112;
1004   unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1005   if (STI->getFeatureBits().test(FeatureTrapHandler))
1006     MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1007   MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1008   return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1009 }
1010 
1011 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1012                           bool FlatScrUsed, bool XNACKUsed) {
1013   unsigned ExtraSGPRs = 0;
1014   if (VCCUsed)
1015     ExtraSGPRs = 2;
1016 
1017   IsaVersion Version = getIsaVersion(STI->getCPU());
1018   if (Version.Major >= 10)
1019     return ExtraSGPRs;
1020 
1021   if (Version.Major < 8) {
1022     if (FlatScrUsed)
1023       ExtraSGPRs = 4;
1024   } else {
1025     if (XNACKUsed)
1026       ExtraSGPRs = 4;
1027 
1028     if (FlatScrUsed ||
1029         STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1030       ExtraSGPRs = 6;
1031   }
1032 
1033   return ExtraSGPRs;
1034 }
1035 
1036 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1037                           bool FlatScrUsed) {
1038   return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1039                           STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1040 }
1041 
1042 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1043   NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
1044   // SGPRBlocks is actual number of SGPR blocks minus 1.
1045   return NumSGPRs / getSGPREncodingGranule(STI) - 1;
1046 }
1047 
1048 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
1049                              std::optional<bool> EnableWavefrontSize32) {
1050   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1051     return 8;
1052 
1053   bool IsWave32 = EnableWavefrontSize32 ?
1054       *EnableWavefrontSize32 :
1055       STI->getFeatureBits().test(FeatureWavefrontSize32);
1056 
1057   if (STI->getFeatureBits().test(FeatureGFX11FullVGPRs))
1058     return IsWave32 ? 24 : 12;
1059 
1060   if (hasGFX10_3Insts(*STI))
1061     return IsWave32 ? 16 : 8;
1062 
1063   return IsWave32 ? 8 : 4;
1064 }
1065 
1066 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
1067                                 std::optional<bool> EnableWavefrontSize32) {
1068   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1069     return 8;
1070 
1071   bool IsWave32 = EnableWavefrontSize32 ?
1072       *EnableWavefrontSize32 :
1073       STI->getFeatureBits().test(FeatureWavefrontSize32);
1074 
1075   return IsWave32 ? 8 : 4;
1076 }
1077 
1078 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1079   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1080     return 512;
1081   if (!isGFX10Plus(*STI))
1082     return 256;
1083   bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1084   if (STI->getFeatureBits().test(FeatureGFX11FullVGPRs))
1085     return IsWave32 ? 1536 : 768;
1086   return IsWave32 ? 1024 : 512;
1087 }
1088 
1089 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
1090   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1091     return 512;
1092   return 256;
1093 }
1094 
1095 unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI,
1096                                       unsigned NumVGPRs) {
1097   unsigned MaxWaves = getMaxWavesPerEU(STI);
1098   unsigned Granule = getVGPRAllocGranule(STI);
1099   if (NumVGPRs < Granule)
1100     return MaxWaves;
1101   unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1102   return std::min(std::max(getTotalNumVGPRs(STI) / RoundedRegs, 1u), MaxWaves);
1103 }
1104 
1105 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1106   assert(WavesPerEU != 0);
1107 
1108   unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1109   if (WavesPerEU >= MaxWavesPerEU)
1110     return 0;
1111 
1112   unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1113   unsigned AddrsableNumVGPRs = getAddressableNumVGPRs(STI);
1114   unsigned Granule = getVGPRAllocGranule(STI);
1115   unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1116 
1117   if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1118     return 0;
1119 
1120   unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs);
1121   if (WavesPerEU < MinWavesPerEU)
1122     return getMinNumVGPRs(STI, MinWavesPerEU);
1123 
1124   unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1125   unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1126   return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1127 }
1128 
1129 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1130   assert(WavesPerEU != 0);
1131 
1132   unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1133                                    getVGPRAllocGranule(STI));
1134   unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
1135   return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1136 }
1137 
1138 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1139                           std::optional<bool> EnableWavefrontSize32) {
1140   NumVGPRs = alignTo(std::max(1u, NumVGPRs),
1141                      getVGPREncodingGranule(STI, EnableWavefrontSize32));
1142   // VGPRBlocks is actual number of VGPR blocks minus 1.
1143   return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
1144 }
1145 
1146 } // end namespace IsaInfo
1147 
1148 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
1149                                const MCSubtargetInfo *STI) {
1150   IsaVersion Version = getIsaVersion(STI->getCPU());
1151 
1152   memset(&Header, 0, sizeof(Header));
1153 
1154   Header.amd_kernel_code_version_major = 1;
1155   Header.amd_kernel_code_version_minor = 2;
1156   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1157   Header.amd_machine_version_major = Version.Major;
1158   Header.amd_machine_version_minor = Version.Minor;
1159   Header.amd_machine_version_stepping = Version.Stepping;
1160   Header.kernel_code_entry_byte_offset = sizeof(Header);
1161   Header.wavefront_size = 6;
1162 
1163   // If the code object does not support indirect functions, then the value must
1164   // be 0xffffffff.
1165   Header.call_convention = -1;
1166 
1167   // These alignment values are specified in powers of two, so alignment =
1168   // 2^n.  The minimum alignment is 2^4 = 16.
1169   Header.kernarg_segment_alignment = 4;
1170   Header.group_segment_alignment = 4;
1171   Header.private_segment_alignment = 4;
1172 
1173   if (Version.Major >= 10) {
1174     if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1175       Header.wavefront_size = 5;
1176       Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
1177     }
1178     Header.compute_pgm_resource_registers |=
1179       S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1180       S_00B848_MEM_ORDERED(1);
1181   }
1182 }
1183 
1184 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
1185     const MCSubtargetInfo *STI) {
1186   IsaVersion Version = getIsaVersion(STI->getCPU());
1187 
1188   amdhsa::kernel_descriptor_t KD;
1189   memset(&KD, 0, sizeof(KD));
1190 
1191   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1192                   amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
1193                   amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
1194   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1195                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
1196   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1197                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
1198   AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
1199                   amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
1200   if (Version.Major >= 10) {
1201     AMDHSA_BITS_SET(KD.kernel_code_properties,
1202                     amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
1203                     STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
1204     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1205                     amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
1206                     STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
1207     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1208                     amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
1209   }
1210   if (AMDGPU::isGFX90A(*STI)) {
1211     AMDHSA_BITS_SET(KD.compute_pgm_rsrc3,
1212                     amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
1213                     STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
1214   }
1215   return KD;
1216 }
1217 
1218 bool isGroupSegment(const GlobalValue *GV) {
1219   return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
1220 }
1221 
1222 bool isGlobalSegment(const GlobalValue *GV) {
1223   return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
1224 }
1225 
1226 bool isReadOnlySegment(const GlobalValue *GV) {
1227   unsigned AS = GV->getAddressSpace();
1228   return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1229          AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
1230 }
1231 
1232 bool shouldEmitConstantsToTextSection(const Triple &TT) {
1233   return TT.getArch() == Triple::r600;
1234 }
1235 
1236 std::pair<unsigned, unsigned>
1237 getIntegerPairAttribute(const Function &F, StringRef Name,
1238                         std::pair<unsigned, unsigned> Default,
1239                         bool OnlyFirstRequired) {
1240   Attribute A = F.getFnAttribute(Name);
1241   if (!A.isStringAttribute())
1242     return Default;
1243 
1244   LLVMContext &Ctx = F.getContext();
1245   std::pair<unsigned, unsigned> Ints = Default;
1246   std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1247   if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1248     Ctx.emitError("can't parse first integer attribute " + Name);
1249     return Default;
1250   }
1251   if (Strs.second.trim().getAsInteger(0, Ints.second)) {
1252     if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1253       Ctx.emitError("can't parse second integer attribute " + Name);
1254       return Default;
1255     }
1256   }
1257 
1258   return Ints;
1259 }
1260 
1261 unsigned getVmcntBitMask(const IsaVersion &Version) {
1262   return (1 << (getVmcntBitWidthLo(Version.Major) +
1263                 getVmcntBitWidthHi(Version.Major))) -
1264          1;
1265 }
1266 
1267 unsigned getExpcntBitMask(const IsaVersion &Version) {
1268   return (1 << getExpcntBitWidth(Version.Major)) - 1;
1269 }
1270 
1271 unsigned getLgkmcntBitMask(const IsaVersion &Version) {
1272   return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1273 }
1274 
1275 unsigned getWaitcntBitMask(const IsaVersion &Version) {
1276   unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1277                                 getVmcntBitWidthLo(Version.Major));
1278   unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1279                                getExpcntBitWidth(Version.Major));
1280   unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1281                                 getLgkmcntBitWidth(Version.Major));
1282   unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1283                                 getVmcntBitWidthHi(Version.Major));
1284   return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1285 }
1286 
1287 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1288   unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1289                                 getVmcntBitWidthLo(Version.Major));
1290   unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1291                                 getVmcntBitWidthHi(Version.Major));
1292   return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1293 }
1294 
1295 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1296   return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1297                     getExpcntBitWidth(Version.Major));
1298 }
1299 
1300 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1301   return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1302                     getLgkmcntBitWidth(Version.Major));
1303 }
1304 
1305 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1306                    unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1307   Vmcnt = decodeVmcnt(Version, Waitcnt);
1308   Expcnt = decodeExpcnt(Version, Waitcnt);
1309   Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1310 }
1311 
1312 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1313   Waitcnt Decoded;
1314   Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1315   Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1316   Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1317   return Decoded;
1318 }
1319 
1320 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1321                      unsigned Vmcnt) {
1322   Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1323                      getVmcntBitWidthLo(Version.Major));
1324   return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1325                   getVmcntBitShiftHi(Version.Major),
1326                   getVmcntBitWidthHi(Version.Major));
1327 }
1328 
1329 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1330                       unsigned Expcnt) {
1331   return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1332                   getExpcntBitWidth(Version.Major));
1333 }
1334 
1335 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1336                        unsigned Lgkmcnt) {
1337   return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1338                   getLgkmcntBitWidth(Version.Major));
1339 }
1340 
1341 unsigned encodeWaitcnt(const IsaVersion &Version,
1342                        unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1343   unsigned Waitcnt = getWaitcntBitMask(Version);
1344   Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1345   Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1346   Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1347   return Waitcnt;
1348 }
1349 
1350 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1351   return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1352 }
1353 
1354 //===----------------------------------------------------------------------===//
1355 // Custom Operands.
1356 //
1357 // A table of custom operands shall describe "primary" operand names
1358 // first followed by aliases if any. It is not required but recommended
1359 // to arrange operands so that operand encoding match operand position
1360 // in the table. This will make disassembly a bit more efficient.
1361 // Unused slots in the table shall have an empty name.
1362 //
1363 //===----------------------------------------------------------------------===//
1364 
1365 template <class T>
1366 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1367                        T Context) {
1368   return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1369          (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1370 }
1371 
1372 template <class T>
1373 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1374                      const CustomOperand<T> OpInfo[], int OpInfoSize,
1375                      T Context) {
1376   int InvalidIdx = OPR_ID_UNKNOWN;
1377   for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1378     if (Test(OpInfo[Idx])) {
1379       if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1380         return Idx;
1381       InvalidIdx = OPR_ID_UNSUPPORTED;
1382     }
1383   }
1384   return InvalidIdx;
1385 }
1386 
1387 template <class T>
1388 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1389                      int OpInfoSize, T Context) {
1390   auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1391   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1392 }
1393 
1394 template <class T>
1395 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1396                      T Context, bool QuickCheck = true) {
1397   auto Test = [=](const CustomOperand<T> &Op) {
1398     return Op.Encoding == Id && !Op.Name.empty();
1399   };
1400   // This is an optimization that should work in most cases.
1401   // As a side effect, it may cause selection of an alias
1402   // instead of a primary operand name in case of sparse tables.
1403   if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1404       OpInfo[Id].Encoding == Id) {
1405     return Id;
1406   }
1407   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1408 }
1409 
1410 //===----------------------------------------------------------------------===//
1411 // Custom Operand Values
1412 //===----------------------------------------------------------------------===//
1413 
1414 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr,
1415                                                 int Size,
1416                                                 const MCSubtargetInfo &STI) {
1417   unsigned Enc = 0;
1418   for (int Idx = 0; Idx < Size; ++Idx) {
1419     const auto &Op = Opr[Idx];
1420     if (Op.isSupported(STI))
1421       Enc |= Op.encode(Op.Default);
1422   }
1423   return Enc;
1424 }
1425 
1426 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr,
1427                                             int Size, unsigned Code,
1428                                             bool &HasNonDefaultVal,
1429                                             const MCSubtargetInfo &STI) {
1430   unsigned UsedOprMask = 0;
1431   HasNonDefaultVal = false;
1432   for (int Idx = 0; Idx < Size; ++Idx) {
1433     const auto &Op = Opr[Idx];
1434     if (!Op.isSupported(STI))
1435       continue;
1436     UsedOprMask |= Op.getMask();
1437     unsigned Val = Op.decode(Code);
1438     if (!Op.isValid(Val))
1439       return false;
1440     HasNonDefaultVal |= (Val != Op.Default);
1441   }
1442   return (Code & ~UsedOprMask) == 0;
1443 }
1444 
1445 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1446                                 unsigned Code, int &Idx, StringRef &Name,
1447                                 unsigned &Val, bool &IsDefault,
1448                                 const MCSubtargetInfo &STI) {
1449   while (Idx < Size) {
1450     const auto &Op = Opr[Idx++];
1451     if (Op.isSupported(STI)) {
1452       Name = Op.Name;
1453       Val = Op.decode(Code);
1454       IsDefault = (Val == Op.Default);
1455       return true;
1456     }
1457   }
1458 
1459   return false;
1460 }
1461 
1462 static int encodeCustomOperandVal(const CustomOperandVal &Op,
1463                                   int64_t InputVal) {
1464   if (InputVal < 0 || InputVal > Op.Max)
1465     return OPR_VAL_INVALID;
1466   return Op.encode(InputVal);
1467 }
1468 
1469 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1470                                const StringRef Name, int64_t InputVal,
1471                                unsigned &UsedOprMask,
1472                                const MCSubtargetInfo &STI) {
1473   int InvalidId = OPR_ID_UNKNOWN;
1474   for (int Idx = 0; Idx < Size; ++Idx) {
1475     const auto &Op = Opr[Idx];
1476     if (Op.Name == Name) {
1477       if (!Op.isSupported(STI)) {
1478         InvalidId = OPR_ID_UNSUPPORTED;
1479         continue;
1480       }
1481       auto OprMask = Op.getMask();
1482       if (OprMask & UsedOprMask)
1483         return OPR_ID_DUPLICATE;
1484       UsedOprMask |= OprMask;
1485       return encodeCustomOperandVal(Op, InputVal);
1486     }
1487   }
1488   return InvalidId;
1489 }
1490 
1491 //===----------------------------------------------------------------------===//
1492 // DepCtr
1493 //===----------------------------------------------------------------------===//
1494 
1495 namespace DepCtr {
1496 
1497 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) {
1498   static int Default = -1;
1499   if (Default == -1)
1500     Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI);
1501   return Default;
1502 }
1503 
1504 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1505                               const MCSubtargetInfo &STI) {
1506   return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code,
1507                                          HasNonDefaultVal, STI);
1508 }
1509 
1510 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1511                   bool &IsDefault, const MCSubtargetInfo &STI) {
1512   return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1513                              IsDefault, STI);
1514 }
1515 
1516 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1517                  const MCSubtargetInfo &STI) {
1518   return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1519                              STI);
1520 }
1521 
1522 unsigned decodeFieldVmVsrc(unsigned Encoded) {
1523   return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
1524 }
1525 
1526 unsigned decodeFieldVaVdst(unsigned Encoded) {
1527   return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
1528 }
1529 
1530 unsigned decodeFieldSaSdst(unsigned Encoded) {
1531   return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
1532 }
1533 
1534 unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
1535   return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
1536 }
1537 
1538 unsigned encodeFieldVmVsrc(unsigned VmVsrc) {
1539   return encodeFieldVmVsrc(0xffff, VmVsrc);
1540 }
1541 
1542 unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
1543   return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
1544 }
1545 
1546 unsigned encodeFieldVaVdst(unsigned VaVdst) {
1547   return encodeFieldVaVdst(0xffff, VaVdst);
1548 }
1549 
1550 unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
1551   return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
1552 }
1553 
1554 unsigned encodeFieldSaSdst(unsigned SaSdst) {
1555   return encodeFieldSaSdst(0xffff, SaSdst);
1556 }
1557 
1558 } // namespace DepCtr
1559 
1560 //===----------------------------------------------------------------------===//
1561 // hwreg
1562 //===----------------------------------------------------------------------===//
1563 
1564 namespace Hwreg {
1565 
1566 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1567   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1568   return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1569 }
1570 
1571 bool isValidHwreg(int64_t Id) {
1572   return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1573 }
1574 
1575 bool isValidHwregOffset(int64_t Offset) {
1576   return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1577 }
1578 
1579 bool isValidHwregWidth(int64_t Width) {
1580   return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1581 }
1582 
1583 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
1584   return (Id << ID_SHIFT_) |
1585          (Offset << OFFSET_SHIFT_) |
1586          ((Width - 1) << WIDTH_M1_SHIFT_);
1587 }
1588 
1589 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1590   int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1591   return (Idx < 0) ? "" : Opr[Idx].Name;
1592 }
1593 
1594 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1595   Id = (Val & ID_MASK_) >> ID_SHIFT_;
1596   Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1597   Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1598 }
1599 
1600 } // namespace Hwreg
1601 
1602 //===----------------------------------------------------------------------===//
1603 // exp tgt
1604 //===----------------------------------------------------------------------===//
1605 
1606 namespace Exp {
1607 
1608 struct ExpTgt {
1609   StringLiteral Name;
1610   unsigned Tgt;
1611   unsigned MaxIndex;
1612 };
1613 
1614 static constexpr ExpTgt ExpTgtInfo[] = {
1615   {{"null"},           ET_NULL,            ET_NULL_MAX_IDX},
1616   {{"mrtz"},           ET_MRTZ,            ET_MRTZ_MAX_IDX},
1617   {{"prim"},           ET_PRIM,            ET_PRIM_MAX_IDX},
1618   {{"mrt"},            ET_MRT0,            ET_MRT_MAX_IDX},
1619   {{"pos"},            ET_POS0,            ET_POS_MAX_IDX},
1620   {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
1621   {{"param"},          ET_PARAM0,          ET_PARAM_MAX_IDX},
1622 };
1623 
1624 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1625   for (const ExpTgt &Val : ExpTgtInfo) {
1626     if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1627       Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1628       Name = Val.Name;
1629       return true;
1630     }
1631   }
1632   return false;
1633 }
1634 
1635 unsigned getTgtId(const StringRef Name) {
1636 
1637   for (const ExpTgt &Val : ExpTgtInfo) {
1638     if (Val.MaxIndex == 0 && Name == Val.Name)
1639       return Val.Tgt;
1640 
1641     if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1642       StringRef Suffix = Name.drop_front(Val.Name.size());
1643 
1644       unsigned Id;
1645       if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1646         return ET_INVALID;
1647 
1648       // Disable leading zeroes
1649       if (Suffix.size() > 1 && Suffix[0] == '0')
1650         return ET_INVALID;
1651 
1652       return Val.Tgt + Id;
1653     }
1654   }
1655   return ET_INVALID;
1656 }
1657 
1658 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1659   switch (Id) {
1660   case ET_NULL:
1661     return !isGFX11Plus(STI);
1662   case ET_POS4:
1663   case ET_PRIM:
1664     return isGFX10Plus(STI);
1665   case ET_DUAL_SRC_BLEND0:
1666   case ET_DUAL_SRC_BLEND1:
1667     return isGFX11Plus(STI);
1668   default:
1669     if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
1670       return !isGFX11Plus(STI);
1671     return true;
1672   }
1673 }
1674 
1675 } // namespace Exp
1676 
1677 //===----------------------------------------------------------------------===//
1678 // MTBUF Format
1679 //===----------------------------------------------------------------------===//
1680 
1681 namespace MTBUFFormat {
1682 
1683 int64_t getDfmt(const StringRef Name) {
1684   for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1685     if (Name == DfmtSymbolic[Id])
1686       return Id;
1687   }
1688   return DFMT_UNDEF;
1689 }
1690 
1691 StringRef getDfmtName(unsigned Id) {
1692   assert(Id <= DFMT_MAX);
1693   return DfmtSymbolic[Id];
1694 }
1695 
1696 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) {
1697   if (isSI(STI) || isCI(STI))
1698     return NfmtSymbolicSICI;
1699   if (isVI(STI) || isGFX9(STI))
1700     return NfmtSymbolicVI;
1701   return NfmtSymbolicGFX10;
1702 }
1703 
1704 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1705   auto lookupTable = getNfmtLookupTable(STI);
1706   for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1707     if (Name == lookupTable[Id])
1708       return Id;
1709   }
1710   return NFMT_UNDEF;
1711 }
1712 
1713 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1714   assert(Id <= NFMT_MAX);
1715   return getNfmtLookupTable(STI)[Id];
1716 }
1717 
1718 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1719   unsigned Dfmt;
1720   unsigned Nfmt;
1721   decodeDfmtNfmt(Id, Dfmt, Nfmt);
1722   return isValidNfmt(Nfmt, STI);
1723 }
1724 
1725 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1726   return !getNfmtName(Id, STI).empty();
1727 }
1728 
1729 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1730   return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1731 }
1732 
1733 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1734   Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1735   Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1736 }
1737 
1738 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
1739   if (isGFX11Plus(STI)) {
1740     for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1741       if (Name == UfmtSymbolicGFX11[Id])
1742         return Id;
1743     }
1744   } else {
1745     for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1746       if (Name == UfmtSymbolicGFX10[Id])
1747         return Id;
1748     }
1749   }
1750   return UFMT_UNDEF;
1751 }
1752 
1753 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) {
1754   if(isValidUnifiedFormat(Id, STI))
1755     return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
1756   return "";
1757 }
1758 
1759 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
1760   return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
1761 }
1762 
1763 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1764                              const MCSubtargetInfo &STI) {
1765   int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1766   if (isGFX11Plus(STI)) {
1767     for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1768       if (Fmt == DfmtNfmt2UFmtGFX11[Id])
1769         return Id;
1770     }
1771   } else {
1772     for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1773       if (Fmt == DfmtNfmt2UFmtGFX10[Id])
1774         return Id;
1775     }
1776   }
1777   return UFMT_UNDEF;
1778 }
1779 
1780 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1781   return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1782 }
1783 
1784 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) {
1785   if (isGFX10Plus(STI))
1786     return UFMT_DEFAULT;
1787   return DFMT_NFMT_DEFAULT;
1788 }
1789 
1790 } // namespace MTBUFFormat
1791 
1792 //===----------------------------------------------------------------------===//
1793 // SendMsg
1794 //===----------------------------------------------------------------------===//
1795 
1796 namespace SendMsg {
1797 
1798 static uint64_t getMsgIdMask(const MCSubtargetInfo &STI) {
1799   return isGFX11Plus(STI) ? ID_MASK_GFX11Plus_ : ID_MASK_PreGFX11_;
1800 }
1801 
1802 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1803   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1804   return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1805 }
1806 
1807 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
1808   return (MsgId & ~(getMsgIdMask(STI))) == 0;
1809 }
1810 
1811 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1812   int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1813   return (Idx < 0) ? "" : Msg[Idx].Name;
1814 }
1815 
1816 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1817   const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1818   const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1819   const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1820   for (int i = F; i < L; ++i) {
1821     if (Name == S[i]) {
1822       return i;
1823     }
1824   }
1825   return OP_UNKNOWN_;
1826 }
1827 
1828 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1829                   bool Strict) {
1830   assert(isValidMsgId(MsgId, STI));
1831 
1832   if (!Strict)
1833     return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1834 
1835   if (MsgId == ID_SYSMSG)
1836     return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1837   if (!isGFX11Plus(STI)) {
1838     switch (MsgId) {
1839     case ID_GS_PreGFX11:
1840       return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1841     case ID_GS_DONE_PreGFX11:
1842       return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1843     }
1844   }
1845   return OpId == OP_NONE_;
1846 }
1847 
1848 StringRef getMsgOpName(int64_t MsgId, int64_t OpId,
1849                        const MCSubtargetInfo &STI) {
1850   assert(msgRequiresOp(MsgId, STI));
1851   return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1852 }
1853 
1854 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1855                       const MCSubtargetInfo &STI, bool Strict) {
1856   assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1857 
1858   if (!Strict)
1859     return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1860 
1861   if (!isGFX11Plus(STI)) {
1862     switch (MsgId) {
1863     case ID_GS_PreGFX11:
1864       return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
1865     case ID_GS_DONE_PreGFX11:
1866       return (OpId == OP_GS_NOP) ?
1867           (StreamId == STREAM_ID_NONE_) :
1868           (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
1869     }
1870   }
1871   return StreamId == STREAM_ID_NONE_;
1872 }
1873 
1874 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
1875   return MsgId == ID_SYSMSG ||
1876       (!isGFX11Plus(STI) &&
1877        (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
1878 }
1879 
1880 bool msgSupportsStream(int64_t MsgId, int64_t OpId,
1881                        const MCSubtargetInfo &STI) {
1882   return !isGFX11Plus(STI) &&
1883       (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
1884       OpId != OP_GS_NOP;
1885 }
1886 
1887 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1888                uint16_t &StreamId, const MCSubtargetInfo &STI) {
1889   MsgId = Val & getMsgIdMask(STI);
1890   if (isGFX11Plus(STI)) {
1891     OpId = 0;
1892     StreamId = 0;
1893   } else {
1894     OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1895     StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
1896   }
1897 }
1898 
1899 uint64_t encodeMsg(uint64_t MsgId,
1900                    uint64_t OpId,
1901                    uint64_t StreamId) {
1902   return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
1903 }
1904 
1905 } // namespace SendMsg
1906 
1907 //===----------------------------------------------------------------------===//
1908 //
1909 //===----------------------------------------------------------------------===//
1910 
1911 unsigned getInitialPSInputAddr(const Function &F) {
1912   return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
1913 }
1914 
1915 bool getHasColorExport(const Function &F) {
1916   // As a safe default always respond as if PS has color exports.
1917   return F.getFnAttributeAsParsedInteger(
1918              "amdgpu-color-export",
1919              F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1920 }
1921 
1922 bool getHasDepthExport(const Function &F) {
1923   return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
1924 }
1925 
1926 bool isShader(CallingConv::ID cc) {
1927   switch(cc) {
1928     case CallingConv::AMDGPU_VS:
1929     case CallingConv::AMDGPU_LS:
1930     case CallingConv::AMDGPU_HS:
1931     case CallingConv::AMDGPU_ES:
1932     case CallingConv::AMDGPU_GS:
1933     case CallingConv::AMDGPU_PS:
1934     case CallingConv::AMDGPU_CS:
1935       return true;
1936     default:
1937       return false;
1938   }
1939 }
1940 
1941 bool isGraphics(CallingConv::ID cc) {
1942   return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1943 }
1944 
1945 bool isCompute(CallingConv::ID cc) {
1946   return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1947 }
1948 
1949 bool isEntryFunctionCC(CallingConv::ID CC) {
1950   switch (CC) {
1951   case CallingConv::AMDGPU_KERNEL:
1952   case CallingConv::SPIR_KERNEL:
1953   case CallingConv::AMDGPU_VS:
1954   case CallingConv::AMDGPU_GS:
1955   case CallingConv::AMDGPU_PS:
1956   case CallingConv::AMDGPU_CS:
1957   case CallingConv::AMDGPU_ES:
1958   case CallingConv::AMDGPU_HS:
1959   case CallingConv::AMDGPU_LS:
1960     return true;
1961   default:
1962     return false;
1963   }
1964 }
1965 
1966 bool isModuleEntryFunctionCC(CallingConv::ID CC) {
1967   switch (CC) {
1968   case CallingConv::AMDGPU_Gfx:
1969     return true;
1970   default:
1971     return isEntryFunctionCC(CC);
1972   }
1973 }
1974 
1975 bool isKernelCC(const Function *Func) {
1976   return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1977 }
1978 
1979 bool hasXNACK(const MCSubtargetInfo &STI) {
1980   return STI.hasFeature(AMDGPU::FeatureXNACK);
1981 }
1982 
1983 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1984   return STI.hasFeature(AMDGPU::FeatureSRAMECC);
1985 }
1986 
1987 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1988   return STI.hasFeature(AMDGPU::FeatureMIMG_R128) && !STI.hasFeature(AMDGPU::FeatureR128A16);
1989 }
1990 
1991 bool hasA16(const MCSubtargetInfo &STI) {
1992   return STI.hasFeature(AMDGPU::FeatureA16);
1993 }
1994 
1995 bool hasG16(const MCSubtargetInfo &STI) {
1996   return STI.hasFeature(AMDGPU::FeatureG16);
1997 }
1998 
1999 bool hasPackedD16(const MCSubtargetInfo &STI) {
2000   return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2001          !isSI(STI);
2002 }
2003 
2004 unsigned getNSAMaxSize(const MCSubtargetInfo &STI) {
2005   auto Version = getIsaVersion(STI.getCPU());
2006   if (Version.Major == 10)
2007     return Version.Minor >= 3 ? 13 : 5;
2008   if (Version.Major == 11)
2009     return 5;
2010   return 0;
2011 }
2012 
2013 bool isSI(const MCSubtargetInfo &STI) {
2014   return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2015 }
2016 
2017 bool isCI(const MCSubtargetInfo &STI) {
2018   return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2019 }
2020 
2021 bool isVI(const MCSubtargetInfo &STI) {
2022   return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2023 }
2024 
2025 bool isGFX9(const MCSubtargetInfo &STI) {
2026   return STI.hasFeature(AMDGPU::FeatureGFX9);
2027 }
2028 
2029 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
2030   return isGFX9(STI) || isGFX10(STI);
2031 }
2032 
2033 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) {
2034   return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2035 }
2036 
2037 bool isGFX8Plus(const MCSubtargetInfo &STI) {
2038   return isVI(STI) || isGFX9Plus(STI);
2039 }
2040 
2041 bool isGFX9Plus(const MCSubtargetInfo &STI) {
2042   return isGFX9(STI) || isGFX10Plus(STI);
2043 }
2044 
2045 bool isGFX10(const MCSubtargetInfo &STI) {
2046   return STI.hasFeature(AMDGPU::FeatureGFX10);
2047 }
2048 
2049 bool isGFX10Plus(const MCSubtargetInfo &STI) {
2050   return isGFX10(STI) || isGFX11Plus(STI);
2051 }
2052 
2053 bool isGFX11(const MCSubtargetInfo &STI) {
2054   return STI.hasFeature(AMDGPU::FeatureGFX11);
2055 }
2056 
2057 bool isGFX11Plus(const MCSubtargetInfo &STI) {
2058   return isGFX11(STI);
2059 }
2060 
2061 bool isNotGFX11Plus(const MCSubtargetInfo &STI) {
2062   return !isGFX11Plus(STI);
2063 }
2064 
2065 bool isNotGFX10Plus(const MCSubtargetInfo &STI) {
2066   return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2067 }
2068 
2069 bool isGFX10Before1030(const MCSubtargetInfo &STI) {
2070   return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2071 }
2072 
2073 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
2074   return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2075 }
2076 
2077 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) {
2078   return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2079 }
2080 
2081 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) {
2082   return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2083 }
2084 
2085 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) {
2086   return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2087 }
2088 
2089 bool isGFX90A(const MCSubtargetInfo &STI) {
2090   return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2091 }
2092 
2093 bool isGFX940(const MCSubtargetInfo &STI) {
2094   return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2095 }
2096 
2097 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) {
2098   return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2099 }
2100 
2101 bool hasMAIInsts(const MCSubtargetInfo &STI) {
2102   return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2103 }
2104 
2105 bool hasVOPD(const MCSubtargetInfo &STI) {
2106   return STI.hasFeature(AMDGPU::FeatureVOPD);
2107 }
2108 
2109 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2110                          int32_t ArgNumVGPR) {
2111   if (has90AInsts && ArgNumAGPR)
2112     return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2113   return std::max(ArgNumVGPR, ArgNumAGPR);
2114 }
2115 
2116 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
2117   const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2118   const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2119   return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2120     Reg == AMDGPU::SCC;
2121 }
2122 
2123 #define MAP_REG2REG \
2124   using namespace AMDGPU; \
2125   switch(Reg) { \
2126   default: return Reg; \
2127   CASE_CI_VI(FLAT_SCR) \
2128   CASE_CI_VI(FLAT_SCR_LO) \
2129   CASE_CI_VI(FLAT_SCR_HI) \
2130   CASE_VI_GFX9PLUS(TTMP0) \
2131   CASE_VI_GFX9PLUS(TTMP1) \
2132   CASE_VI_GFX9PLUS(TTMP2) \
2133   CASE_VI_GFX9PLUS(TTMP3) \
2134   CASE_VI_GFX9PLUS(TTMP4) \
2135   CASE_VI_GFX9PLUS(TTMP5) \
2136   CASE_VI_GFX9PLUS(TTMP6) \
2137   CASE_VI_GFX9PLUS(TTMP7) \
2138   CASE_VI_GFX9PLUS(TTMP8) \
2139   CASE_VI_GFX9PLUS(TTMP9) \
2140   CASE_VI_GFX9PLUS(TTMP10) \
2141   CASE_VI_GFX9PLUS(TTMP11) \
2142   CASE_VI_GFX9PLUS(TTMP12) \
2143   CASE_VI_GFX9PLUS(TTMP13) \
2144   CASE_VI_GFX9PLUS(TTMP14) \
2145   CASE_VI_GFX9PLUS(TTMP15) \
2146   CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2147   CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2148   CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2149   CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2150   CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2151   CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2152   CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2153   CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2154   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2155   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2156   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2157   CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2158   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2159   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2160   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2161   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2162   CASE_GFXPRE11_GFX11PLUS(M0) \
2163   CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2164   CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2165   }
2166 
2167 #define CASE_CI_VI(node) \
2168   assert(!isSI(STI)); \
2169   case node: return isCI(STI) ? node##_ci : node##_vi;
2170 
2171 #define CASE_VI_GFX9PLUS(node) \
2172   case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2173 
2174 #define CASE_GFXPRE11_GFX11PLUS(node) \
2175   case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2176 
2177 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2178   case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2179 
2180 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
2181   if (STI.getTargetTriple().getArch() == Triple::r600)
2182     return Reg;
2183   MAP_REG2REG
2184 }
2185 
2186 #undef CASE_CI_VI
2187 #undef CASE_VI_GFX9PLUS
2188 #undef CASE_GFXPRE11_GFX11PLUS
2189 #undef CASE_GFXPRE11_GFX11PLUS_TO
2190 
2191 #define CASE_CI_VI(node)   case node##_ci: case node##_vi:   return node;
2192 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
2193 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
2194 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2195 
2196 unsigned mc2PseudoReg(unsigned Reg) {
2197   MAP_REG2REG
2198 }
2199 
2200 bool isInlineValue(unsigned Reg) {
2201   switch (Reg) {
2202   case AMDGPU::SRC_SHARED_BASE_LO:
2203   case AMDGPU::SRC_SHARED_BASE:
2204   case AMDGPU::SRC_SHARED_LIMIT_LO:
2205   case AMDGPU::SRC_SHARED_LIMIT:
2206   case AMDGPU::SRC_PRIVATE_BASE_LO:
2207   case AMDGPU::SRC_PRIVATE_BASE:
2208   case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2209   case AMDGPU::SRC_PRIVATE_LIMIT:
2210   case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2211     return true;
2212   case AMDGPU::SRC_VCCZ:
2213   case AMDGPU::SRC_EXECZ:
2214   case AMDGPU::SRC_SCC:
2215     return true;
2216   case AMDGPU::SGPR_NULL:
2217     return true;
2218   default:
2219     return false;
2220   }
2221 }
2222 
2223 #undef CASE_CI_VI
2224 #undef CASE_VI_GFX9PLUS
2225 #undef CASE_GFXPRE11_GFX11PLUS
2226 #undef CASE_GFXPRE11_GFX11PLUS_TO
2227 #undef MAP_REG2REG
2228 
2229 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2230   assert(OpNo < Desc.NumOperands);
2231   unsigned OpType = Desc.operands()[OpNo].OperandType;
2232   return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2233          OpType <= AMDGPU::OPERAND_SRC_LAST;
2234 }
2235 
2236 bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2237   assert(OpNo < Desc.NumOperands);
2238   unsigned OpType = Desc.operands()[OpNo].OperandType;
2239   return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2240          OpType <= AMDGPU::OPERAND_KIMM_LAST;
2241 }
2242 
2243 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2244   assert(OpNo < Desc.NumOperands);
2245   unsigned OpType = Desc.operands()[OpNo].OperandType;
2246   switch (OpType) {
2247   case AMDGPU::OPERAND_REG_IMM_FP32:
2248   case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
2249   case AMDGPU::OPERAND_REG_IMM_FP64:
2250   case AMDGPU::OPERAND_REG_IMM_FP16:
2251   case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
2252   case AMDGPU::OPERAND_REG_IMM_V2FP16:
2253   case AMDGPU::OPERAND_REG_IMM_V2INT16:
2254   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
2255   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
2256   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
2257   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
2258   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
2259   case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
2260   case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
2261   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
2262   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
2263   case AMDGPU::OPERAND_REG_IMM_V2FP32:
2264   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
2265   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
2266     return true;
2267   default:
2268     return false;
2269   }
2270 }
2271 
2272 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2273   assert(OpNo < Desc.NumOperands);
2274   unsigned OpType = Desc.operands()[OpNo].OperandType;
2275   return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2276          OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
2277 }
2278 
2279 // Avoid using MCRegisterClass::getSize, since that function will go away
2280 // (move from MC* level to Target* level). Return size in bits.
2281 unsigned getRegBitWidth(unsigned RCID) {
2282   switch (RCID) {
2283   case AMDGPU::VGPR_LO16RegClassID:
2284   case AMDGPU::VGPR_HI16RegClassID:
2285   case AMDGPU::SGPR_LO16RegClassID:
2286   case AMDGPU::AGPR_LO16RegClassID:
2287     return 16;
2288   case AMDGPU::SGPR_32RegClassID:
2289   case AMDGPU::VGPR_32RegClassID:
2290   case AMDGPU::VRegOrLds_32RegClassID:
2291   case AMDGPU::AGPR_32RegClassID:
2292   case AMDGPU::VS_32RegClassID:
2293   case AMDGPU::AV_32RegClassID:
2294   case AMDGPU::SReg_32RegClassID:
2295   case AMDGPU::SReg_32_XM0RegClassID:
2296   case AMDGPU::SRegOrLds_32RegClassID:
2297     return 32;
2298   case AMDGPU::SGPR_64RegClassID:
2299   case AMDGPU::VS_64RegClassID:
2300   case AMDGPU::SReg_64RegClassID:
2301   case AMDGPU::VReg_64RegClassID:
2302   case AMDGPU::AReg_64RegClassID:
2303   case AMDGPU::SReg_64_XEXECRegClassID:
2304   case AMDGPU::VReg_64_Align2RegClassID:
2305   case AMDGPU::AReg_64_Align2RegClassID:
2306   case AMDGPU::AV_64RegClassID:
2307   case AMDGPU::AV_64_Align2RegClassID:
2308     return 64;
2309   case AMDGPU::SGPR_96RegClassID:
2310   case AMDGPU::SReg_96RegClassID:
2311   case AMDGPU::VReg_96RegClassID:
2312   case AMDGPU::AReg_96RegClassID:
2313   case AMDGPU::VReg_96_Align2RegClassID:
2314   case AMDGPU::AReg_96_Align2RegClassID:
2315   case AMDGPU::AV_96RegClassID:
2316   case AMDGPU::AV_96_Align2RegClassID:
2317     return 96;
2318   case AMDGPU::SGPR_128RegClassID:
2319   case AMDGPU::SReg_128RegClassID:
2320   case AMDGPU::VReg_128RegClassID:
2321   case AMDGPU::AReg_128RegClassID:
2322   case AMDGPU::VReg_128_Align2RegClassID:
2323   case AMDGPU::AReg_128_Align2RegClassID:
2324   case AMDGPU::AV_128RegClassID:
2325   case AMDGPU::AV_128_Align2RegClassID:
2326     return 128;
2327   case AMDGPU::SGPR_160RegClassID:
2328   case AMDGPU::SReg_160RegClassID:
2329   case AMDGPU::VReg_160RegClassID:
2330   case AMDGPU::AReg_160RegClassID:
2331   case AMDGPU::VReg_160_Align2RegClassID:
2332   case AMDGPU::AReg_160_Align2RegClassID:
2333   case AMDGPU::AV_160RegClassID:
2334   case AMDGPU::AV_160_Align2RegClassID:
2335     return 160;
2336   case AMDGPU::SGPR_192RegClassID:
2337   case AMDGPU::SReg_192RegClassID:
2338   case AMDGPU::VReg_192RegClassID:
2339   case AMDGPU::AReg_192RegClassID:
2340   case AMDGPU::VReg_192_Align2RegClassID:
2341   case AMDGPU::AReg_192_Align2RegClassID:
2342   case AMDGPU::AV_192RegClassID:
2343   case AMDGPU::AV_192_Align2RegClassID:
2344     return 192;
2345   case AMDGPU::SGPR_224RegClassID:
2346   case AMDGPU::SReg_224RegClassID:
2347   case AMDGPU::VReg_224RegClassID:
2348   case AMDGPU::AReg_224RegClassID:
2349   case AMDGPU::VReg_224_Align2RegClassID:
2350   case AMDGPU::AReg_224_Align2RegClassID:
2351   case AMDGPU::AV_224RegClassID:
2352   case AMDGPU::AV_224_Align2RegClassID:
2353     return 224;
2354   case AMDGPU::SGPR_256RegClassID:
2355   case AMDGPU::SReg_256RegClassID:
2356   case AMDGPU::VReg_256RegClassID:
2357   case AMDGPU::AReg_256RegClassID:
2358   case AMDGPU::VReg_256_Align2RegClassID:
2359   case AMDGPU::AReg_256_Align2RegClassID:
2360   case AMDGPU::AV_256RegClassID:
2361   case AMDGPU::AV_256_Align2RegClassID:
2362     return 256;
2363   case AMDGPU::SGPR_288RegClassID:
2364   case AMDGPU::SReg_288RegClassID:
2365   case AMDGPU::VReg_288RegClassID:
2366   case AMDGPU::AReg_288RegClassID:
2367   case AMDGPU::VReg_288_Align2RegClassID:
2368   case AMDGPU::AReg_288_Align2RegClassID:
2369   case AMDGPU::AV_288RegClassID:
2370   case AMDGPU::AV_288_Align2RegClassID:
2371     return 288;
2372   case AMDGPU::SGPR_320RegClassID:
2373   case AMDGPU::SReg_320RegClassID:
2374   case AMDGPU::VReg_320RegClassID:
2375   case AMDGPU::AReg_320RegClassID:
2376   case AMDGPU::VReg_320_Align2RegClassID:
2377   case AMDGPU::AReg_320_Align2RegClassID:
2378   case AMDGPU::AV_320RegClassID:
2379   case AMDGPU::AV_320_Align2RegClassID:
2380     return 320;
2381   case AMDGPU::SGPR_352RegClassID:
2382   case AMDGPU::SReg_352RegClassID:
2383   case AMDGPU::VReg_352RegClassID:
2384   case AMDGPU::AReg_352RegClassID:
2385   case AMDGPU::VReg_352_Align2RegClassID:
2386   case AMDGPU::AReg_352_Align2RegClassID:
2387   case AMDGPU::AV_352RegClassID:
2388   case AMDGPU::AV_352_Align2RegClassID:
2389     return 352;
2390   case AMDGPU::SGPR_384RegClassID:
2391   case AMDGPU::SReg_384RegClassID:
2392   case AMDGPU::VReg_384RegClassID:
2393   case AMDGPU::AReg_384RegClassID:
2394   case AMDGPU::VReg_384_Align2RegClassID:
2395   case AMDGPU::AReg_384_Align2RegClassID:
2396   case AMDGPU::AV_384RegClassID:
2397   case AMDGPU::AV_384_Align2RegClassID:
2398     return 384;
2399   case AMDGPU::SGPR_512RegClassID:
2400   case AMDGPU::SReg_512RegClassID:
2401   case AMDGPU::VReg_512RegClassID:
2402   case AMDGPU::AReg_512RegClassID:
2403   case AMDGPU::VReg_512_Align2RegClassID:
2404   case AMDGPU::AReg_512_Align2RegClassID:
2405   case AMDGPU::AV_512RegClassID:
2406   case AMDGPU::AV_512_Align2RegClassID:
2407     return 512;
2408   case AMDGPU::SGPR_1024RegClassID:
2409   case AMDGPU::SReg_1024RegClassID:
2410   case AMDGPU::VReg_1024RegClassID:
2411   case AMDGPU::AReg_1024RegClassID:
2412   case AMDGPU::VReg_1024_Align2RegClassID:
2413   case AMDGPU::AReg_1024_Align2RegClassID:
2414   case AMDGPU::AV_1024RegClassID:
2415   case AMDGPU::AV_1024_Align2RegClassID:
2416     return 1024;
2417   default:
2418     llvm_unreachable("Unexpected register class");
2419   }
2420 }
2421 
2422 unsigned getRegBitWidth(const MCRegisterClass &RC) {
2423   return getRegBitWidth(RC.getID());
2424 }
2425 
2426 unsigned getRegBitWidth(const TargetRegisterClass &RC) {
2427   return getRegBitWidth(RC.getID());
2428 }
2429 
2430 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
2431                            unsigned OpNo) {
2432   assert(OpNo < Desc.NumOperands);
2433   unsigned RCID = Desc.operands()[OpNo].RegClass;
2434   return getRegBitWidth(RCID) / 8;
2435 }
2436 
2437 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2438   if (isInlinableIntLiteral(Literal))
2439     return true;
2440 
2441   uint64_t Val = static_cast<uint64_t>(Literal);
2442   return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2443          (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2444          (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2445          (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2446          (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2447          (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2448          (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2449          (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2450          (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2451          (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2452 }
2453 
2454 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2455   if (isInlinableIntLiteral(Literal))
2456     return true;
2457 
2458   // The actual type of the operand does not seem to matter as long
2459   // as the bits match one of the inline immediate values.  For example:
2460   //
2461   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2462   // so it is a legal inline immediate.
2463   //
2464   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2465   // floating-point, so it is a legal inline immediate.
2466 
2467   uint32_t Val = static_cast<uint32_t>(Literal);
2468   return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2469          (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2470          (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2471          (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2472          (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2473          (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2474          (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2475          (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2476          (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2477          (Val == 0x3e22f983 && HasInv2Pi);
2478 }
2479 
2480 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
2481   if (!HasInv2Pi)
2482     return false;
2483 
2484   if (isInlinableIntLiteral(Literal))
2485     return true;
2486 
2487   uint16_t Val = static_cast<uint16_t>(Literal);
2488   return Val == 0x3C00 || // 1.0
2489          Val == 0xBC00 || // -1.0
2490          Val == 0x3800 || // 0.5
2491          Val == 0xB800 || // -0.5
2492          Val == 0x4000 || // 2.0
2493          Val == 0xC000 || // -2.0
2494          Val == 0x4400 || // 4.0
2495          Val == 0xC400 || // -4.0
2496          Val == 0x3118;   // 1/2pi
2497 }
2498 
2499 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2500   assert(HasInv2Pi);
2501 
2502   if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2503     int16_t Trunc = static_cast<int16_t>(Literal);
2504     return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2505   }
2506   if (!(Literal & 0xffff))
2507     return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2508 
2509   int16_t Lo16 = static_cast<int16_t>(Literal);
2510   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2511   return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2512 }
2513 
2514 bool isInlinableIntLiteralV216(int32_t Literal) {
2515   int16_t Lo16 = static_cast<int16_t>(Literal);
2516   if (isInt<16>(Literal) || isUInt<16>(Literal))
2517     return isInlinableIntLiteral(Lo16);
2518 
2519   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2520   if (!(Literal & 0xffff))
2521     return isInlinableIntLiteral(Hi16);
2522   return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2523 }
2524 
2525 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2526   assert(HasInv2Pi);
2527 
2528   int16_t Lo16 = static_cast<int16_t>(Literal);
2529   if (isInt<16>(Literal) || isUInt<16>(Literal))
2530     return true;
2531 
2532   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2533   if (!(Literal & 0xffff))
2534     return true;
2535   return Lo16 == Hi16;
2536 }
2537 
2538 bool isArgPassedInSGPR(const Argument *A) {
2539   const Function *F = A->getParent();
2540 
2541   // Arguments to compute shaders are never a source of divergence.
2542   CallingConv::ID CC = F->getCallingConv();
2543   switch (CC) {
2544   case CallingConv::AMDGPU_KERNEL:
2545   case CallingConv::SPIR_KERNEL:
2546     return true;
2547   case CallingConv::AMDGPU_VS:
2548   case CallingConv::AMDGPU_LS:
2549   case CallingConv::AMDGPU_HS:
2550   case CallingConv::AMDGPU_ES:
2551   case CallingConv::AMDGPU_GS:
2552   case CallingConv::AMDGPU_PS:
2553   case CallingConv::AMDGPU_CS:
2554   case CallingConv::AMDGPU_Gfx:
2555     // For non-compute shaders, SGPR inputs are marked with either inreg or
2556     // byval. Everything else is in VGPRs.
2557     return A->hasAttribute(Attribute::InReg) ||
2558            A->hasAttribute(Attribute::ByVal);
2559   default:
2560     // TODO: Should calls support inreg for SGPR inputs?
2561     return false;
2562   }
2563 }
2564 
2565 bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
2566   // Arguments to compute shaders are never a source of divergence.
2567   CallingConv::ID CC = CB->getCallingConv();
2568   switch (CC) {
2569   case CallingConv::AMDGPU_KERNEL:
2570   case CallingConv::SPIR_KERNEL:
2571     return true;
2572   case CallingConv::AMDGPU_VS:
2573   case CallingConv::AMDGPU_LS:
2574   case CallingConv::AMDGPU_HS:
2575   case CallingConv::AMDGPU_ES:
2576   case CallingConv::AMDGPU_GS:
2577   case CallingConv::AMDGPU_PS:
2578   case CallingConv::AMDGPU_CS:
2579   case CallingConv::AMDGPU_Gfx:
2580     // For non-compute shaders, SGPR inputs are marked with either inreg or
2581     // byval. Everything else is in VGPRs.
2582     return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
2583            CB->paramHasAttr(ArgNo, Attribute::ByVal);
2584   default:
2585     // TODO: Should calls support inreg for SGPR inputs?
2586     return false;
2587   }
2588 }
2589 
2590 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2591   return isGCN3Encoding(ST) || isGFX10Plus(ST);
2592 }
2593 
2594 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) {
2595   return isGFX9Plus(ST);
2596 }
2597 
2598 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
2599                                       int64_t EncodedOffset) {
2600   return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2601                                : isUInt<8>(EncodedOffset);
2602 }
2603 
2604 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST,
2605                                     int64_t EncodedOffset,
2606                                     bool IsBuffer) {
2607   return !IsBuffer &&
2608          hasSMRDSignedImmOffset(ST) &&
2609          isInt<21>(EncodedOffset);
2610 }
2611 
2612 static bool isDwordAligned(uint64_t ByteOffset) {
2613   return (ByteOffset & 3) == 0;
2614 }
2615 
2616 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
2617                                 uint64_t ByteOffset) {
2618   if (hasSMEMByteOffset(ST))
2619     return ByteOffset;
2620 
2621   assert(isDwordAligned(ByteOffset));
2622   return ByteOffset >> 2;
2623 }
2624 
2625 std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
2626                                             int64_t ByteOffset, bool IsBuffer) {
2627   // The signed version is always a byte offset.
2628   if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2629     assert(hasSMEMByteOffset(ST));
2630     return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
2631                                  : std::nullopt;
2632   }
2633 
2634   if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2635     return std::nullopt;
2636 
2637   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2638   return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2639              ? std::optional<int64_t>(EncodedOffset)
2640              : std::nullopt;
2641 }
2642 
2643 std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
2644                                                      int64_t ByteOffset) {
2645   if (!isCI(ST) || !isDwordAligned(ByteOffset))
2646     return std::nullopt;
2647 
2648   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2649   return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
2650                                    : std::nullopt;
2651 }
2652 
2653 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST) {
2654   // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+.
2655   if (AMDGPU::isGFX10(ST))
2656     return 12;
2657 
2658   return 13;
2659 }
2660 
2661 namespace {
2662 
2663 struct SourceOfDivergence {
2664   unsigned Intr;
2665 };
2666 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2667 
2668 struct AlwaysUniform {
2669   unsigned Intr;
2670 };
2671 const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
2672 
2673 #define GET_SourcesOfDivergence_IMPL
2674 #define GET_UniformIntrinsics_IMPL
2675 #define GET_Gfx9BufferFormat_IMPL
2676 #define GET_Gfx10BufferFormat_IMPL
2677 #define GET_Gfx11PlusBufferFormat_IMPL
2678 #include "AMDGPUGenSearchableTables.inc"
2679 
2680 } // end anonymous namespace
2681 
2682 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2683   return lookupSourceOfDivergence(IntrID);
2684 }
2685 
2686 bool isIntrinsicAlwaysUniform(unsigned IntrID) {
2687   return lookupAlwaysUniform(IntrID);
2688 }
2689 
2690 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2691                                                   uint8_t NumComponents,
2692                                                   uint8_t NumFormat,
2693                                                   const MCSubtargetInfo &STI) {
2694   return isGFX11Plus(STI)
2695              ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2696                                             NumFormat)
2697              : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2698                                                        NumComponents, NumFormat)
2699                             : getGfx9BufferFormatInfo(BitsPerComp,
2700                                                       NumComponents, NumFormat);
2701 }
2702 
2703 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
2704                                                   const MCSubtargetInfo &STI) {
2705   return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
2706                           : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
2707                                          : getGfx9BufferFormatInfo(Format);
2708 }
2709 
2710 } // namespace AMDGPU
2711 
2712 raw_ostream &operator<<(raw_ostream &OS,
2713                         const AMDGPU::IsaInfo::TargetIDSetting S) {
2714   switch (S) {
2715   case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported):
2716     OS << "Unsupported";
2717     break;
2718   case (AMDGPU::IsaInfo::TargetIDSetting::Any):
2719     OS << "Any";
2720     break;
2721   case (AMDGPU::IsaInfo::TargetIDSetting::Off):
2722     OS << "Off";
2723     break;
2724   case (AMDGPU::IsaInfo::TargetIDSetting::On):
2725     OS << "On";
2726     break;
2727   }
2728   return OS;
2729 }
2730 
2731 } // namespace llvm
2732