1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "GCNSubtarget.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/IR/GlobalValue.h"
19 #include "llvm/IR/IntrinsicsAMDGPU.h"
20 #include "llvm/IR/IntrinsicsR600.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/MC/MCSubtargetInfo.h"
23 #include "llvm/Support/AMDHSAKernelDescriptor.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/TargetParser.h"
26 
27 #define GET_INSTRINFO_NAMED_OPS
28 #define GET_INSTRMAP_INFO
29 #include "AMDGPUGenInstrInfo.inc"
30 
31 static llvm::cl::opt<unsigned>
32     AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden,
33                             llvm::cl::desc("AMDHSA Code Object Version"),
34                             llvm::cl::init(4));
35 
36 // TODO-GFX11: Remove this when full 16-bit codegen is implemented.
37 static llvm::cl::opt<bool>
38     LimitTo128VGPRs("amdgpu-limit-to-128-vgprs", llvm::cl::Hidden,
39                     llvm::cl::desc("Never use more than 128 VGPRs"));
40 
41 namespace {
42 
43 /// \returns Bit mask for given bit \p Shift and bit \p Width.
44 unsigned getBitMask(unsigned Shift, unsigned Width) {
45   return ((1 << Width) - 1) << Shift;
46 }
47 
48 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
49 ///
50 /// \returns Packed \p Dst.
51 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
52   unsigned Mask = getBitMask(Shift, Width);
53   return ((Src << Shift) & Mask) | (Dst & ~Mask);
54 }
55 
56 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
57 ///
58 /// \returns Unpacked bits.
59 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
60   return (Src & getBitMask(Shift, Width)) >> Shift;
61 }
62 
63 /// \returns Vmcnt bit shift (lower bits).
64 unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
65   return VersionMajor >= 11 ? 10 : 0;
66 }
67 
68 /// \returns Vmcnt bit width (lower bits).
69 unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
70   return VersionMajor >= 11 ? 6 : 4;
71 }
72 
73 /// \returns Expcnt bit shift.
74 unsigned getExpcntBitShift(unsigned VersionMajor) {
75   return VersionMajor >= 11 ? 0 : 4;
76 }
77 
78 /// \returns Expcnt bit width.
79 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
80 
81 /// \returns Lgkmcnt bit shift.
82 unsigned getLgkmcntBitShift(unsigned VersionMajor) {
83   return VersionMajor >= 11 ? 4 : 8;
84 }
85 
86 /// \returns Lgkmcnt bit width.
87 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
88   return VersionMajor >= 10 ? 6 : 4;
89 }
90 
91 /// \returns Vmcnt bit shift (higher bits).
92 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
93 
94 /// \returns Vmcnt bit width (higher bits).
95 unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
96   return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
97 }
98 
99 } // end namespace anonymous
100 
101 namespace llvm {
102 
103 namespace AMDGPU {
104 
105 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
106   if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
107     return None;
108 
109   switch (AmdhsaCodeObjectVersion) {
110   case 2:
111     return ELF::ELFABIVERSION_AMDGPU_HSA_V2;
112   case 3:
113     return ELF::ELFABIVERSION_AMDGPU_HSA_V3;
114   case 4:
115     return ELF::ELFABIVERSION_AMDGPU_HSA_V4;
116   case 5:
117     return ELF::ELFABIVERSION_AMDGPU_HSA_V5;
118   default:
119     report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
120                        Twine(AmdhsaCodeObjectVersion));
121   }
122 }
123 
124 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
125   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
126     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
127   return false;
128 }
129 
130 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
131   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
132     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
133   return false;
134 }
135 
136 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
137   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
138     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
139   return false;
140 }
141 
142 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
143   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
144     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
145   return false;
146 }
147 
148 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) {
149   return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
150          isHsaAbiVersion5(STI);
151 }
152 
153 unsigned getAmdhsaCodeObjectVersion() {
154   return AmdhsaCodeObjectVersion;
155 }
156 
157 unsigned getMultigridSyncArgImplicitArgPosition() {
158   switch (AmdhsaCodeObjectVersion) {
159   case 2:
160   case 3:
161   case 4:
162     return 48;
163   case 5:
164     return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET;
165   default:
166     llvm_unreachable("Unexpected code object version");
167     return 0;
168   }
169 }
170 
171 
172 // FIXME: All such magic numbers about the ABI should be in a
173 // central TD file.
174 unsigned getHostcallImplicitArgPosition() {
175   switch (AmdhsaCodeObjectVersion) {
176   case 2:
177   case 3:
178   case 4:
179     return 24;
180   case 5:
181     return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET;
182   default:
183     llvm_unreachable("Unexpected code object version");
184     return 0;
185   }
186 }
187 
188 #define GET_MIMGBaseOpcodesTable_IMPL
189 #define GET_MIMGDimInfoTable_IMPL
190 #define GET_MIMGInfoTable_IMPL
191 #define GET_MIMGLZMappingTable_IMPL
192 #define GET_MIMGMIPMappingTable_IMPL
193 #define GET_MIMGBiasMappingTable_IMPL
194 #define GET_MIMGOffsetMappingTable_IMPL
195 #define GET_MIMGG16MappingTable_IMPL
196 #define GET_MAIInstInfoTable_IMPL
197 #include "AMDGPUGenSearchableTables.inc"
198 
199 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
200                   unsigned VDataDwords, unsigned VAddrDwords) {
201   const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
202                                              VDataDwords, VAddrDwords);
203   return Info ? Info->Opcode : -1;
204 }
205 
206 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
207   const MIMGInfo *Info = getMIMGInfo(Opc);
208   return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
209 }
210 
211 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
212   const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
213   const MIMGInfo *NewInfo =
214       getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
215                           NewChannels, OrigInfo->VAddrDwords);
216   return NewInfo ? NewInfo->Opcode : -1;
217 }
218 
219 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
220                            const MIMGDimInfo *Dim, bool IsA16,
221                            bool IsG16Supported) {
222   unsigned AddrWords = BaseOpcode->NumExtraArgs;
223   unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
224                             (BaseOpcode->LodOrClampOrMip ? 1 : 0);
225   if (IsA16)
226     AddrWords += divideCeil(AddrComponents, 2);
227   else
228     AddrWords += AddrComponents;
229 
230   // Note: For subtargets that support A16 but not G16, enabling A16 also
231   // enables 16 bit gradients.
232   // For subtargets that support A16 (operand) and G16 (done with a different
233   // instruction encoding), they are independent.
234 
235   if (BaseOpcode->Gradients) {
236     if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
237       // There are two gradients per coordinate, we pack them separately.
238       // For the 3d case,
239       // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
240       AddrWords += alignTo<2>(Dim->NumGradients / 2);
241     else
242       AddrWords += Dim->NumGradients;
243   }
244   return AddrWords;
245 }
246 
247 struct MUBUFInfo {
248   uint16_t Opcode;
249   uint16_t BaseOpcode;
250   uint8_t elements;
251   bool has_vaddr;
252   bool has_srsrc;
253   bool has_soffset;
254   bool IsBufferInv;
255 };
256 
257 struct MTBUFInfo {
258   uint16_t Opcode;
259   uint16_t BaseOpcode;
260   uint8_t elements;
261   bool has_vaddr;
262   bool has_srsrc;
263   bool has_soffset;
264 };
265 
266 struct SMInfo {
267   uint16_t Opcode;
268   bool IsBuffer;
269 };
270 
271 struct VOPInfo {
272   uint16_t Opcode;
273   bool IsSingle;
274 };
275 
276 struct VOPC64DPPInfo {
277   uint16_t Opcode;
278 };
279 
280 #define GET_MTBUFInfoTable_DECL
281 #define GET_MTBUFInfoTable_IMPL
282 #define GET_MUBUFInfoTable_DECL
283 #define GET_MUBUFInfoTable_IMPL
284 #define GET_SMInfoTable_DECL
285 #define GET_SMInfoTable_IMPL
286 #define GET_VOP1InfoTable_DECL
287 #define GET_VOP1InfoTable_IMPL
288 #define GET_VOP2InfoTable_DECL
289 #define GET_VOP2InfoTable_IMPL
290 #define GET_VOP3InfoTable_DECL
291 #define GET_VOP3InfoTable_IMPL
292 #define GET_VOPC64DPPTable_DECL
293 #define GET_VOPC64DPPTable_IMPL
294 #define GET_VOPC64DPP8Table_DECL
295 #define GET_VOPC64DPP8Table_IMPL
296 #define GET_WMMAOpcode2AddrMappingTable_DECL
297 #define GET_WMMAOpcode2AddrMappingTable_IMPL
298 #define GET_WMMAOpcode3AddrMappingTable_DECL
299 #define GET_WMMAOpcode3AddrMappingTable_IMPL
300 #include "AMDGPUGenSearchableTables.inc"
301 
302 int getMTBUFBaseOpcode(unsigned Opc) {
303   const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
304   return Info ? Info->BaseOpcode : -1;
305 }
306 
307 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
308   const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
309   return Info ? Info->Opcode : -1;
310 }
311 
312 int getMTBUFElements(unsigned Opc) {
313   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
314   return Info ? Info->elements : 0;
315 }
316 
317 bool getMTBUFHasVAddr(unsigned Opc) {
318   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
319   return Info ? Info->has_vaddr : false;
320 }
321 
322 bool getMTBUFHasSrsrc(unsigned Opc) {
323   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
324   return Info ? Info->has_srsrc : false;
325 }
326 
327 bool getMTBUFHasSoffset(unsigned Opc) {
328   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
329   return Info ? Info->has_soffset : false;
330 }
331 
332 int getMUBUFBaseOpcode(unsigned Opc) {
333   const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
334   return Info ? Info->BaseOpcode : -1;
335 }
336 
337 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
338   const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
339   return Info ? Info->Opcode : -1;
340 }
341 
342 int getMUBUFElements(unsigned Opc) {
343   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
344   return Info ? Info->elements : 0;
345 }
346 
347 bool getMUBUFHasVAddr(unsigned Opc) {
348   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
349   return Info ? Info->has_vaddr : false;
350 }
351 
352 bool getMUBUFHasSrsrc(unsigned Opc) {
353   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
354   return Info ? Info->has_srsrc : false;
355 }
356 
357 bool getMUBUFHasSoffset(unsigned Opc) {
358   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
359   return Info ? Info->has_soffset : false;
360 }
361 
362 bool getMUBUFIsBufferInv(unsigned Opc) {
363   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
364   return Info ? Info->IsBufferInv : false;
365 }
366 
367 bool getSMEMIsBuffer(unsigned Opc) {
368   const SMInfo *Info = getSMEMOpcodeHelper(Opc);
369   return Info ? Info->IsBuffer : false;
370 }
371 
372 bool getVOP1IsSingle(unsigned Opc) {
373   const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
374   return Info ? Info->IsSingle : false;
375 }
376 
377 bool getVOP2IsSingle(unsigned Opc) {
378   const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
379   return Info ? Info->IsSingle : false;
380 }
381 
382 bool getVOP3IsSingle(unsigned Opc) {
383   const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
384   return Info ? Info->IsSingle : false;
385 }
386 
387 bool isVOPC64DPP(unsigned Opc) {
388   return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
389 }
390 
391 bool getMAIIsDGEMM(unsigned Opc) {
392   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
393   return Info ? Info->is_dgemm : false;
394 }
395 
396 bool getMAIIsGFX940XDL(unsigned Opc) {
397   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
398   return Info ? Info->is_gfx940_xdl : false;
399 }
400 
401 unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
402   const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
403   return Info ? Info->Opcode3Addr : ~0u;
404 }
405 
406 unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
407   const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
408   return Info ? Info->Opcode2Addr : ~0u;
409 }
410 
411 // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
412 // header files, so we need to wrap it in a function that takes unsigned
413 // instead.
414 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
415   return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
416 }
417 
418 namespace IsaInfo {
419 
420 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI)
421     : STI(STI), XnackSetting(TargetIDSetting::Any),
422       SramEccSetting(TargetIDSetting::Any) {
423   if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
424     XnackSetting = TargetIDSetting::Unsupported;
425   if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
426     SramEccSetting = TargetIDSetting::Unsupported;
427 }
428 
429 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) {
430   // Check if xnack or sramecc is explicitly enabled or disabled.  In the
431   // absence of the target features we assume we must generate code that can run
432   // in any environment.
433   SubtargetFeatures Features(FS);
434   Optional<bool> XnackRequested;
435   Optional<bool> SramEccRequested;
436 
437   for (const std::string &Feature : Features.getFeatures()) {
438     if (Feature == "+xnack")
439       XnackRequested = true;
440     else if (Feature == "-xnack")
441       XnackRequested = false;
442     else if (Feature == "+sramecc")
443       SramEccRequested = true;
444     else if (Feature == "-sramecc")
445       SramEccRequested = false;
446   }
447 
448   bool XnackSupported = isXnackSupported();
449   bool SramEccSupported = isSramEccSupported();
450 
451   if (XnackRequested) {
452     if (XnackSupported) {
453       XnackSetting =
454           *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
455     } else {
456       // If a specific xnack setting was requested and this GPU does not support
457       // xnack emit a warning. Setting will remain set to "Unsupported".
458       if (*XnackRequested) {
459         errs() << "warning: xnack 'On' was requested for a processor that does "
460                   "not support it!\n";
461       } else {
462         errs() << "warning: xnack 'Off' was requested for a processor that "
463                   "does not support it!\n";
464       }
465     }
466   }
467 
468   if (SramEccRequested) {
469     if (SramEccSupported) {
470       SramEccSetting =
471           *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
472     } else {
473       // If a specific sramecc setting was requested and this GPU does not
474       // support sramecc emit a warning. Setting will remain set to
475       // "Unsupported".
476       if (*SramEccRequested) {
477         errs() << "warning: sramecc 'On' was requested for a processor that "
478                   "does not support it!\n";
479       } else {
480         errs() << "warning: sramecc 'Off' was requested for a processor that "
481                   "does not support it!\n";
482       }
483     }
484   }
485 }
486 
487 static TargetIDSetting
488 getTargetIDSettingFromFeatureString(StringRef FeatureString) {
489   if (FeatureString.endswith("-"))
490     return TargetIDSetting::Off;
491   if (FeatureString.endswith("+"))
492     return TargetIDSetting::On;
493 
494   llvm_unreachable("Malformed feature string");
495 }
496 
497 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) {
498   SmallVector<StringRef, 3> TargetIDSplit;
499   TargetID.split(TargetIDSplit, ':');
500 
501   for (const auto &FeatureString : TargetIDSplit) {
502     if (FeatureString.startswith("xnack"))
503       XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
504     if (FeatureString.startswith("sramecc"))
505       SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
506   }
507 }
508 
509 std::string AMDGPUTargetID::toString() const {
510   std::string StringRep;
511   raw_string_ostream StreamRep(StringRep);
512 
513   auto TargetTriple = STI.getTargetTriple();
514   auto Version = getIsaVersion(STI.getCPU());
515 
516   StreamRep << TargetTriple.getArchName() << '-'
517             << TargetTriple.getVendorName() << '-'
518             << TargetTriple.getOSName() << '-'
519             << TargetTriple.getEnvironmentName() << '-';
520 
521   std::string Processor;
522   // TODO: Following else statement is present here because we used various
523   // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
524   // Remove once all aliases are removed from GCNProcessors.td.
525   if (Version.Major >= 9)
526     Processor = STI.getCPU().str();
527   else
528     Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
529                  Twine(Version.Stepping))
530                     .str();
531 
532   std::string Features;
533   if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
534     switch (*HsaAbiVersion) {
535     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
536       // Code object V2 only supported specific processors and had fixed
537       // settings for the XNACK.
538       if (Processor == "gfx600") {
539       } else if (Processor == "gfx601") {
540       } else if (Processor == "gfx602") {
541       } else if (Processor == "gfx700") {
542       } else if (Processor == "gfx701") {
543       } else if (Processor == "gfx702") {
544       } else if (Processor == "gfx703") {
545       } else if (Processor == "gfx704") {
546       } else if (Processor == "gfx705") {
547       } else if (Processor == "gfx801") {
548         if (!isXnackOnOrAny())
549           report_fatal_error(
550               "AMD GPU code object V2 does not support processor " +
551               Twine(Processor) + " without XNACK");
552       } else if (Processor == "gfx802") {
553       } else if (Processor == "gfx803") {
554       } else if (Processor == "gfx805") {
555       } else if (Processor == "gfx810") {
556         if (!isXnackOnOrAny())
557           report_fatal_error(
558               "AMD GPU code object V2 does not support processor " +
559               Twine(Processor) + " without XNACK");
560       } else if (Processor == "gfx900") {
561         if (isXnackOnOrAny())
562           Processor = "gfx901";
563       } else if (Processor == "gfx902") {
564         if (isXnackOnOrAny())
565           Processor = "gfx903";
566       } else if (Processor == "gfx904") {
567         if (isXnackOnOrAny())
568           Processor = "gfx905";
569       } else if (Processor == "gfx906") {
570         if (isXnackOnOrAny())
571           Processor = "gfx907";
572       } else if (Processor == "gfx90c") {
573         if (isXnackOnOrAny())
574           report_fatal_error(
575               "AMD GPU code object V2 does not support processor " +
576               Twine(Processor) + " with XNACK being ON or ANY");
577       } else {
578         report_fatal_error(
579             "AMD GPU code object V2 does not support processor " +
580             Twine(Processor));
581       }
582       break;
583     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
584       // xnack.
585       if (isXnackOnOrAny())
586         Features += "+xnack";
587       // In code object v2 and v3, "sramecc" feature was spelled with a
588       // hyphen ("sram-ecc").
589       if (isSramEccOnOrAny())
590         Features += "+sram-ecc";
591       break;
592     case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
593     case ELF::ELFABIVERSION_AMDGPU_HSA_V5:
594       // sramecc.
595       if (getSramEccSetting() == TargetIDSetting::Off)
596         Features += ":sramecc-";
597       else if (getSramEccSetting() == TargetIDSetting::On)
598         Features += ":sramecc+";
599       // xnack.
600       if (getXnackSetting() == TargetIDSetting::Off)
601         Features += ":xnack-";
602       else if (getXnackSetting() == TargetIDSetting::On)
603         Features += ":xnack+";
604       break;
605     default:
606       break;
607     }
608   }
609 
610   StreamRep << Processor << Features;
611 
612   StreamRep.flush();
613   return StringRep;
614 }
615 
616 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
617   if (STI->getFeatureBits().test(FeatureWavefrontSize16))
618     return 16;
619   if (STI->getFeatureBits().test(FeatureWavefrontSize32))
620     return 32;
621 
622   return 64;
623 }
624 
625 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
626   if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
627     return 32768;
628   if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
629     return 65536;
630 
631   return 0;
632 }
633 
634 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
635   // "Per CU" really means "per whatever functional block the waves of a
636   // workgroup must share". For gfx10 in CU mode this is the CU, which contains
637   // two SIMDs.
638   if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
639     return 2;
640   // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
641   // two CUs, so a total of four SIMDs.
642   return 4;
643 }
644 
645 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
646                                unsigned FlatWorkGroupSize) {
647   assert(FlatWorkGroupSize != 0);
648   if (STI->getTargetTriple().getArch() != Triple::amdgcn)
649     return 8;
650   unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
651   if (N == 1)
652     return 40;
653   N = 40 / N;
654   return std::min(N, 16u);
655 }
656 
657 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
658   return 1;
659 }
660 
661 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
662   // FIXME: Need to take scratch memory into account.
663   if (isGFX90A(*STI))
664     return 8;
665   if (!isGFX10Plus(*STI))
666     return 10;
667   return hasGFX10_3Insts(*STI) ? 16 : 20;
668 }
669 
670 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
671                                    unsigned FlatWorkGroupSize) {
672   return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
673                     getEUsPerCU(STI));
674 }
675 
676 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
677   return 1;
678 }
679 
680 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
681   // Some subtargets allow encoding 2048, but this isn't tested or supported.
682   return 1024;
683 }
684 
685 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
686                               unsigned FlatWorkGroupSize) {
687   return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
688 }
689 
690 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
691   IsaVersion Version = getIsaVersion(STI->getCPU());
692   if (Version.Major >= 10)
693     return getAddressableNumSGPRs(STI);
694   if (Version.Major >= 8)
695     return 16;
696   return 8;
697 }
698 
699 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
700   return 8;
701 }
702 
703 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
704   IsaVersion Version = getIsaVersion(STI->getCPU());
705   if (Version.Major >= 8)
706     return 800;
707   return 512;
708 }
709 
710 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
711   if (STI->getFeatureBits().test(FeatureSGPRInitBug))
712     return FIXED_NUM_SGPRS_FOR_INIT_BUG;
713 
714   IsaVersion Version = getIsaVersion(STI->getCPU());
715   if (Version.Major >= 10)
716     return 106;
717   if (Version.Major >= 8)
718     return 102;
719   return 104;
720 }
721 
722 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
723   assert(WavesPerEU != 0);
724 
725   IsaVersion Version = getIsaVersion(STI->getCPU());
726   if (Version.Major >= 10)
727     return 0;
728 
729   if (WavesPerEU >= getMaxWavesPerEU(STI))
730     return 0;
731 
732   unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
733   if (STI->getFeatureBits().test(FeatureTrapHandler))
734     MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
735   MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
736   return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
737 }
738 
739 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
740                         bool Addressable) {
741   assert(WavesPerEU != 0);
742 
743   unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
744   IsaVersion Version = getIsaVersion(STI->getCPU());
745   if (Version.Major >= 10)
746     return Addressable ? AddressableNumSGPRs : 108;
747   if (Version.Major >= 8 && !Addressable)
748     AddressableNumSGPRs = 112;
749   unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
750   if (STI->getFeatureBits().test(FeatureTrapHandler))
751     MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
752   MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
753   return std::min(MaxNumSGPRs, AddressableNumSGPRs);
754 }
755 
756 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
757                           bool FlatScrUsed, bool XNACKUsed) {
758   unsigned ExtraSGPRs = 0;
759   if (VCCUsed)
760     ExtraSGPRs = 2;
761 
762   IsaVersion Version = getIsaVersion(STI->getCPU());
763   if (Version.Major >= 10)
764     return ExtraSGPRs;
765 
766   if (Version.Major < 8) {
767     if (FlatScrUsed)
768       ExtraSGPRs = 4;
769   } else {
770     if (XNACKUsed)
771       ExtraSGPRs = 4;
772 
773     if (FlatScrUsed ||
774         STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
775       ExtraSGPRs = 6;
776   }
777 
778   return ExtraSGPRs;
779 }
780 
781 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
782                           bool FlatScrUsed) {
783   return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
784                           STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
785 }
786 
787 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
788   NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
789   // SGPRBlocks is actual number of SGPR blocks minus 1.
790   return NumSGPRs / getSGPREncodingGranule(STI) - 1;
791 }
792 
793 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
794                              Optional<bool> EnableWavefrontSize32) {
795   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
796     return 8;
797 
798   bool IsWave32 = EnableWavefrontSize32 ?
799       *EnableWavefrontSize32 :
800       STI->getFeatureBits().test(FeatureWavefrontSize32);
801 
802   if (hasGFX10_3Insts(*STI))
803     return IsWave32 ? 16 : 8;
804 
805   return IsWave32 ? 8 : 4;
806 }
807 
808 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
809                                 Optional<bool> EnableWavefrontSize32) {
810   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
811     return 8;
812 
813   bool IsWave32 = EnableWavefrontSize32 ?
814       *EnableWavefrontSize32 :
815       STI->getFeatureBits().test(FeatureWavefrontSize32);
816 
817   return IsWave32 ? 8 : 4;
818 }
819 
820 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
821   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
822     return 512;
823   if (!isGFX10Plus(*STI))
824     return 256;
825   return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512;
826 }
827 
828 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
829   if (LimitTo128VGPRs.getNumOccurrences() ? LimitTo128VGPRs
830                                           : isGFX11Plus(*STI)) {
831     // GFX11 changes the encoding of 16-bit operands in VOP1/2/C instructions
832     // such that values 128..255 no longer mean v128..v255, they mean
833     // v0.hi..v127.hi instead. Until the compiler understands this, it is not
834     // safe to use v128..v255.
835     // TODO-GFX11: Remove this when full 16-bit codegen is implemented.
836     return 128;
837   }
838   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
839     return 512;
840   return 256;
841 }
842 
843 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
844   assert(WavesPerEU != 0);
845 
846   if (WavesPerEU >= getMaxWavesPerEU(STI))
847     return 0;
848   unsigned MinNumVGPRs =
849       alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
850                 getVGPRAllocGranule(STI)) + 1;
851   return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
852 }
853 
854 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
855   assert(WavesPerEU != 0);
856 
857   unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
858                                    getVGPRAllocGranule(STI));
859   unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
860   return std::min(MaxNumVGPRs, AddressableNumVGPRs);
861 }
862 
863 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
864                           Optional<bool> EnableWavefrontSize32) {
865   NumVGPRs = alignTo(std::max(1u, NumVGPRs),
866                      getVGPREncodingGranule(STI, EnableWavefrontSize32));
867   // VGPRBlocks is actual number of VGPR blocks minus 1.
868   return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
869 }
870 
871 } // end namespace IsaInfo
872 
873 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
874                                const MCSubtargetInfo *STI) {
875   IsaVersion Version = getIsaVersion(STI->getCPU());
876 
877   memset(&Header, 0, sizeof(Header));
878 
879   Header.amd_kernel_code_version_major = 1;
880   Header.amd_kernel_code_version_minor = 2;
881   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
882   Header.amd_machine_version_major = Version.Major;
883   Header.amd_machine_version_minor = Version.Minor;
884   Header.amd_machine_version_stepping = Version.Stepping;
885   Header.kernel_code_entry_byte_offset = sizeof(Header);
886   Header.wavefront_size = 6;
887 
888   // If the code object does not support indirect functions, then the value must
889   // be 0xffffffff.
890   Header.call_convention = -1;
891 
892   // These alignment values are specified in powers of two, so alignment =
893   // 2^n.  The minimum alignment is 2^4 = 16.
894   Header.kernarg_segment_alignment = 4;
895   Header.group_segment_alignment = 4;
896   Header.private_segment_alignment = 4;
897 
898   if (Version.Major >= 10) {
899     if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
900       Header.wavefront_size = 5;
901       Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
902     }
903     Header.compute_pgm_resource_registers |=
904       S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
905       S_00B848_MEM_ORDERED(1);
906   }
907 }
908 
909 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
910     const MCSubtargetInfo *STI) {
911   IsaVersion Version = getIsaVersion(STI->getCPU());
912 
913   amdhsa::kernel_descriptor_t KD;
914   memset(&KD, 0, sizeof(KD));
915 
916   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
917                   amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
918                   amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
919   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
920                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
921   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
922                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
923   AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
924                   amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
925   if (Version.Major >= 10) {
926     AMDHSA_BITS_SET(KD.kernel_code_properties,
927                     amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
928                     STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
929     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
930                     amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
931                     STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
932     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
933                     amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
934   }
935   if (AMDGPU::isGFX90A(*STI)) {
936     AMDHSA_BITS_SET(KD.compute_pgm_rsrc3,
937                     amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
938                     STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
939   }
940   return KD;
941 }
942 
943 bool isGroupSegment(const GlobalValue *GV) {
944   return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
945 }
946 
947 bool isGlobalSegment(const GlobalValue *GV) {
948   return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
949 }
950 
951 bool isReadOnlySegment(const GlobalValue *GV) {
952   unsigned AS = GV->getAddressSpace();
953   return AS == AMDGPUAS::CONSTANT_ADDRESS ||
954          AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
955 }
956 
957 bool shouldEmitConstantsToTextSection(const Triple &TT) {
958   return TT.getArch() == Triple::r600;
959 }
960 
961 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
962   Attribute A = F.getFnAttribute(Name);
963   int Result = Default;
964 
965   if (A.isStringAttribute()) {
966     StringRef Str = A.getValueAsString();
967     if (Str.getAsInteger(0, Result)) {
968       LLVMContext &Ctx = F.getContext();
969       Ctx.emitError("can't parse integer attribute " + Name);
970     }
971   }
972 
973   return Result;
974 }
975 
976 std::pair<int, int> getIntegerPairAttribute(const Function &F,
977                                             StringRef Name,
978                                             std::pair<int, int> Default,
979                                             bool OnlyFirstRequired) {
980   Attribute A = F.getFnAttribute(Name);
981   if (!A.isStringAttribute())
982     return Default;
983 
984   LLVMContext &Ctx = F.getContext();
985   std::pair<int, int> Ints = Default;
986   std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
987   if (Strs.first.trim().getAsInteger(0, Ints.first)) {
988     Ctx.emitError("can't parse first integer attribute " + Name);
989     return Default;
990   }
991   if (Strs.second.trim().getAsInteger(0, Ints.second)) {
992     if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
993       Ctx.emitError("can't parse second integer attribute " + Name);
994       return Default;
995     }
996   }
997 
998   return Ints;
999 }
1000 
1001 unsigned getVmcntBitMask(const IsaVersion &Version) {
1002   return (1 << (getVmcntBitWidthLo(Version.Major) +
1003                 getVmcntBitWidthHi(Version.Major))) -
1004          1;
1005 }
1006 
1007 unsigned getExpcntBitMask(const IsaVersion &Version) {
1008   return (1 << getExpcntBitWidth(Version.Major)) - 1;
1009 }
1010 
1011 unsigned getLgkmcntBitMask(const IsaVersion &Version) {
1012   return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1013 }
1014 
1015 unsigned getWaitcntBitMask(const IsaVersion &Version) {
1016   unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1017                                 getVmcntBitWidthLo(Version.Major));
1018   unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1019                                getExpcntBitWidth(Version.Major));
1020   unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1021                                 getLgkmcntBitWidth(Version.Major));
1022   unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1023                                 getVmcntBitWidthHi(Version.Major));
1024   return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1025 }
1026 
1027 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1028   unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1029                                 getVmcntBitWidthLo(Version.Major));
1030   unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1031                                 getVmcntBitWidthHi(Version.Major));
1032   return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1033 }
1034 
1035 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1036   return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1037                     getExpcntBitWidth(Version.Major));
1038 }
1039 
1040 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1041   return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1042                     getLgkmcntBitWidth(Version.Major));
1043 }
1044 
1045 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1046                    unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1047   Vmcnt = decodeVmcnt(Version, Waitcnt);
1048   Expcnt = decodeExpcnt(Version, Waitcnt);
1049   Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1050 }
1051 
1052 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1053   Waitcnt Decoded;
1054   Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1055   Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1056   Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1057   return Decoded;
1058 }
1059 
1060 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1061                      unsigned Vmcnt) {
1062   Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1063                      getVmcntBitWidthLo(Version.Major));
1064   return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1065                   getVmcntBitShiftHi(Version.Major),
1066                   getVmcntBitWidthHi(Version.Major));
1067 }
1068 
1069 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1070                       unsigned Expcnt) {
1071   return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1072                   getExpcntBitWidth(Version.Major));
1073 }
1074 
1075 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1076                        unsigned Lgkmcnt) {
1077   return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1078                   getLgkmcntBitWidth(Version.Major));
1079 }
1080 
1081 unsigned encodeWaitcnt(const IsaVersion &Version,
1082                        unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1083   unsigned Waitcnt = getWaitcntBitMask(Version);
1084   Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1085   Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1086   Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1087   return Waitcnt;
1088 }
1089 
1090 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1091   return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1092 }
1093 
1094 //===----------------------------------------------------------------------===//
1095 // Custom Operands.
1096 //
1097 // A table of custom operands shall describe "primary" operand names
1098 // first followed by aliases if any. It is not required but recommended
1099 // to arrange operands so that operand encoding match operand position
1100 // in the table. This will make disassembly a bit more efficient.
1101 // Unused slots in the table shall have an empty name.
1102 //
1103 //===----------------------------------------------------------------------===//
1104 
1105 template <class T>
1106 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1107                        T Context) {
1108   return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1109          (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1110 }
1111 
1112 template <class T>
1113 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1114                      const CustomOperand<T> OpInfo[], int OpInfoSize,
1115                      T Context) {
1116   int InvalidIdx = OPR_ID_UNKNOWN;
1117   for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1118     if (Test(OpInfo[Idx])) {
1119       if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1120         return Idx;
1121       InvalidIdx = OPR_ID_UNSUPPORTED;
1122     }
1123   }
1124   return InvalidIdx;
1125 }
1126 
1127 template <class T>
1128 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1129                      int OpInfoSize, T Context) {
1130   auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1131   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1132 }
1133 
1134 template <class T>
1135 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1136                      T Context, bool QuickCheck = true) {
1137   auto Test = [=](const CustomOperand<T> &Op) {
1138     return Op.Encoding == Id && !Op.Name.empty();
1139   };
1140   // This is an optimization that should work in most cases.
1141   // As a side effect, it may cause selection of an alias
1142   // instead of a primary operand name in case of sparse tables.
1143   if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1144       OpInfo[Id].Encoding == Id) {
1145     return Id;
1146   }
1147   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1148 }
1149 
1150 //===----------------------------------------------------------------------===//
1151 // Custom Operand Values
1152 //===----------------------------------------------------------------------===//
1153 
1154 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr,
1155                                                 int Size,
1156                                                 const MCSubtargetInfo &STI) {
1157   unsigned Enc = 0;
1158   for (int Idx = 0; Idx < Size; ++Idx) {
1159     const auto &Op = Opr[Idx];
1160     if (Op.isSupported(STI))
1161       Enc |= Op.encode(Op.Default);
1162   }
1163   return Enc;
1164 }
1165 
1166 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr,
1167                                             int Size, unsigned Code,
1168                                             bool &HasNonDefaultVal,
1169                                             const MCSubtargetInfo &STI) {
1170   unsigned UsedOprMask = 0;
1171   HasNonDefaultVal = false;
1172   for (int Idx = 0; Idx < Size; ++Idx) {
1173     const auto &Op = Opr[Idx];
1174     if (!Op.isSupported(STI))
1175       continue;
1176     UsedOprMask |= Op.getMask();
1177     unsigned Val = Op.decode(Code);
1178     if (!Op.isValid(Val))
1179       return false;
1180     HasNonDefaultVal |= (Val != Op.Default);
1181   }
1182   return (Code & ~UsedOprMask) == 0;
1183 }
1184 
1185 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1186                                 unsigned Code, int &Idx, StringRef &Name,
1187                                 unsigned &Val, bool &IsDefault,
1188                                 const MCSubtargetInfo &STI) {
1189   while (Idx < Size) {
1190     const auto &Op = Opr[Idx++];
1191     if (Op.isSupported(STI)) {
1192       Name = Op.Name;
1193       Val = Op.decode(Code);
1194       IsDefault = (Val == Op.Default);
1195       return true;
1196     }
1197   }
1198 
1199   return false;
1200 }
1201 
1202 static int encodeCustomOperandVal(const CustomOperandVal &Op,
1203                                   int64_t InputVal) {
1204   if (InputVal < 0 || InputVal > Op.Max)
1205     return OPR_VAL_INVALID;
1206   return Op.encode(InputVal);
1207 }
1208 
1209 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1210                                const StringRef Name, int64_t InputVal,
1211                                unsigned &UsedOprMask,
1212                                const MCSubtargetInfo &STI) {
1213   int InvalidId = OPR_ID_UNKNOWN;
1214   for (int Idx = 0; Idx < Size; ++Idx) {
1215     const auto &Op = Opr[Idx];
1216     if (Op.Name == Name) {
1217       if (!Op.isSupported(STI)) {
1218         InvalidId = OPR_ID_UNSUPPORTED;
1219         continue;
1220       }
1221       auto OprMask = Op.getMask();
1222       if (OprMask & UsedOprMask)
1223         return OPR_ID_DUPLICATE;
1224       UsedOprMask |= OprMask;
1225       return encodeCustomOperandVal(Op, InputVal);
1226     }
1227   }
1228   return InvalidId;
1229 }
1230 
1231 //===----------------------------------------------------------------------===//
1232 // DepCtr
1233 //===----------------------------------------------------------------------===//
1234 
1235 namespace DepCtr {
1236 
1237 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) {
1238   static int Default = -1;
1239   if (Default == -1)
1240     Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI);
1241   return Default;
1242 }
1243 
1244 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1245                               const MCSubtargetInfo &STI) {
1246   return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code,
1247                                          HasNonDefaultVal, STI);
1248 }
1249 
1250 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1251                   bool &IsDefault, const MCSubtargetInfo &STI) {
1252   return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1253                              IsDefault, STI);
1254 }
1255 
1256 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1257                  const MCSubtargetInfo &STI) {
1258   return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1259                              STI);
1260 }
1261 
1262 } // namespace DepCtr
1263 
1264 //===----------------------------------------------------------------------===//
1265 // hwreg
1266 //===----------------------------------------------------------------------===//
1267 
1268 namespace Hwreg {
1269 
1270 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1271   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1272   return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1273 }
1274 
1275 bool isValidHwreg(int64_t Id) {
1276   return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1277 }
1278 
1279 bool isValidHwregOffset(int64_t Offset) {
1280   return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1281 }
1282 
1283 bool isValidHwregWidth(int64_t Width) {
1284   return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1285 }
1286 
1287 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
1288   return (Id << ID_SHIFT_) |
1289          (Offset << OFFSET_SHIFT_) |
1290          ((Width - 1) << WIDTH_M1_SHIFT_);
1291 }
1292 
1293 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1294   int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1295   return (Idx < 0) ? "" : Opr[Idx].Name;
1296 }
1297 
1298 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1299   Id = (Val & ID_MASK_) >> ID_SHIFT_;
1300   Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1301   Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1302 }
1303 
1304 } // namespace Hwreg
1305 
1306 //===----------------------------------------------------------------------===//
1307 // exp tgt
1308 //===----------------------------------------------------------------------===//
1309 
1310 namespace Exp {
1311 
1312 struct ExpTgt {
1313   StringLiteral Name;
1314   unsigned Tgt;
1315   unsigned MaxIndex;
1316 };
1317 
1318 static constexpr ExpTgt ExpTgtInfo[] = {
1319   {{"null"},           ET_NULL,            ET_NULL_MAX_IDX},
1320   {{"mrtz"},           ET_MRTZ,            ET_MRTZ_MAX_IDX},
1321   {{"prim"},           ET_PRIM,            ET_PRIM_MAX_IDX},
1322   {{"mrt"},            ET_MRT0,            ET_MRT_MAX_IDX},
1323   {{"pos"},            ET_POS0,            ET_POS_MAX_IDX},
1324   {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
1325   {{"param"},          ET_PARAM0,          ET_PARAM_MAX_IDX},
1326 };
1327 
1328 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1329   for (const ExpTgt &Val : ExpTgtInfo) {
1330     if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1331       Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1332       Name = Val.Name;
1333       return true;
1334     }
1335   }
1336   return false;
1337 }
1338 
1339 unsigned getTgtId(const StringRef Name) {
1340 
1341   for (const ExpTgt &Val : ExpTgtInfo) {
1342     if (Val.MaxIndex == 0 && Name == Val.Name)
1343       return Val.Tgt;
1344 
1345     if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1346       StringRef Suffix = Name.drop_front(Val.Name.size());
1347 
1348       unsigned Id;
1349       if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1350         return ET_INVALID;
1351 
1352       // Disable leading zeroes
1353       if (Suffix.size() > 1 && Suffix[0] == '0')
1354         return ET_INVALID;
1355 
1356       return Val.Tgt + Id;
1357     }
1358   }
1359   return ET_INVALID;
1360 }
1361 
1362 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1363   switch (Id) {
1364   case ET_NULL:
1365     return !isGFX11Plus(STI);
1366   case ET_POS4:
1367   case ET_PRIM:
1368     return isGFX10Plus(STI);
1369   case ET_DUAL_SRC_BLEND0:
1370   case ET_DUAL_SRC_BLEND1:
1371     return isGFX11Plus(STI);
1372   default:
1373     if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
1374       return !isGFX11Plus(STI);
1375     return true;
1376   }
1377 }
1378 
1379 } // namespace Exp
1380 
1381 //===----------------------------------------------------------------------===//
1382 // MTBUF Format
1383 //===----------------------------------------------------------------------===//
1384 
1385 namespace MTBUFFormat {
1386 
1387 int64_t getDfmt(const StringRef Name) {
1388   for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1389     if (Name == DfmtSymbolic[Id])
1390       return Id;
1391   }
1392   return DFMT_UNDEF;
1393 }
1394 
1395 StringRef getDfmtName(unsigned Id) {
1396   assert(Id <= DFMT_MAX);
1397   return DfmtSymbolic[Id];
1398 }
1399 
1400 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) {
1401   if (isSI(STI) || isCI(STI))
1402     return NfmtSymbolicSICI;
1403   if (isVI(STI) || isGFX9(STI))
1404     return NfmtSymbolicVI;
1405   return NfmtSymbolicGFX10;
1406 }
1407 
1408 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1409   auto lookupTable = getNfmtLookupTable(STI);
1410   for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1411     if (Name == lookupTable[Id])
1412       return Id;
1413   }
1414   return NFMT_UNDEF;
1415 }
1416 
1417 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1418   assert(Id <= NFMT_MAX);
1419   return getNfmtLookupTable(STI)[Id];
1420 }
1421 
1422 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1423   unsigned Dfmt;
1424   unsigned Nfmt;
1425   decodeDfmtNfmt(Id, Dfmt, Nfmt);
1426   return isValidNfmt(Nfmt, STI);
1427 }
1428 
1429 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1430   return !getNfmtName(Id, STI).empty();
1431 }
1432 
1433 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1434   return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1435 }
1436 
1437 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1438   Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1439   Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1440 }
1441 
1442 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
1443   if (isGFX11Plus(STI)) {
1444     for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1445       if (Name == UfmtSymbolicGFX11[Id])
1446         return Id;
1447     }
1448   } else {
1449     for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1450       if (Name == UfmtSymbolicGFX10[Id])
1451         return Id;
1452     }
1453   }
1454   return UFMT_UNDEF;
1455 }
1456 
1457 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) {
1458   if(isValidUnifiedFormat(Id, STI))
1459     return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
1460   return "";
1461 }
1462 
1463 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
1464   return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
1465 }
1466 
1467 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1468                              const MCSubtargetInfo &STI) {
1469   int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1470   if (isGFX11Plus(STI)) {
1471     for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1472       if (Fmt == DfmtNfmt2UFmtGFX11[Id])
1473         return Id;
1474     }
1475   } else {
1476     for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1477       if (Fmt == DfmtNfmt2UFmtGFX10[Id])
1478         return Id;
1479     }
1480   }
1481   return UFMT_UNDEF;
1482 }
1483 
1484 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1485   return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1486 }
1487 
1488 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) {
1489   if (isGFX10Plus(STI))
1490     return UFMT_DEFAULT;
1491   return DFMT_NFMT_DEFAULT;
1492 }
1493 
1494 } // namespace MTBUFFormat
1495 
1496 //===----------------------------------------------------------------------===//
1497 // SendMsg
1498 //===----------------------------------------------------------------------===//
1499 
1500 namespace SendMsg {
1501 
1502 static uint64_t getMsgIdMask(const MCSubtargetInfo &STI) {
1503   return isGFX11Plus(STI) ? ID_MASK_GFX11Plus_ : ID_MASK_PreGFX11_;
1504 }
1505 
1506 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1507   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1508   return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1509 }
1510 
1511 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
1512   return (MsgId & ~(getMsgIdMask(STI))) == 0;
1513 }
1514 
1515 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1516   int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1517   return (Idx < 0) ? "" : Msg[Idx].Name;
1518 }
1519 
1520 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1521   const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1522   const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1523   const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1524   for (int i = F; i < L; ++i) {
1525     if (Name == S[i]) {
1526       return i;
1527     }
1528   }
1529   return OP_UNKNOWN_;
1530 }
1531 
1532 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1533                   bool Strict) {
1534   assert(isValidMsgId(MsgId, STI));
1535 
1536   if (!Strict)
1537     return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1538 
1539   if (MsgId == ID_SYSMSG)
1540     return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1541   if (!isGFX11Plus(STI)) {
1542     switch (MsgId) {
1543     case ID_GS_PreGFX11:
1544       return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1545     case ID_GS_DONE_PreGFX11:
1546       return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1547     }
1548   }
1549   return OpId == OP_NONE_;
1550 }
1551 
1552 StringRef getMsgOpName(int64_t MsgId, int64_t OpId,
1553                        const MCSubtargetInfo &STI) {
1554   assert(msgRequiresOp(MsgId, STI));
1555   return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1556 }
1557 
1558 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1559                       const MCSubtargetInfo &STI, bool Strict) {
1560   assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1561 
1562   if (!Strict)
1563     return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1564 
1565   if (!isGFX11Plus(STI)) {
1566     switch (MsgId) {
1567     case ID_GS_PreGFX11:
1568       return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
1569     case ID_GS_DONE_PreGFX11:
1570       return (OpId == OP_GS_NOP) ?
1571           (StreamId == STREAM_ID_NONE_) :
1572           (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
1573     }
1574   }
1575   return StreamId == STREAM_ID_NONE_;
1576 }
1577 
1578 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
1579   return MsgId == ID_SYSMSG ||
1580       (!isGFX11Plus(STI) &&
1581        (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
1582 }
1583 
1584 bool msgSupportsStream(int64_t MsgId, int64_t OpId,
1585                        const MCSubtargetInfo &STI) {
1586   return !isGFX11Plus(STI) &&
1587       (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
1588       OpId != OP_GS_NOP;
1589 }
1590 
1591 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1592                uint16_t &StreamId, const MCSubtargetInfo &STI) {
1593   MsgId = Val & getMsgIdMask(STI);
1594   if (isGFX11Plus(STI)) {
1595     OpId = 0;
1596     StreamId = 0;
1597   } else {
1598     OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1599     StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
1600   }
1601 }
1602 
1603 uint64_t encodeMsg(uint64_t MsgId,
1604                    uint64_t OpId,
1605                    uint64_t StreamId) {
1606   return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
1607 }
1608 
1609 } // namespace SendMsg
1610 
1611 //===----------------------------------------------------------------------===//
1612 //
1613 //===----------------------------------------------------------------------===//
1614 
1615 unsigned getInitialPSInputAddr(const Function &F) {
1616   return getIntegerAttribute(F, "InitialPSInputAddr", 0);
1617 }
1618 
1619 bool getHasColorExport(const Function &F) {
1620   // As a safe default always respond as if PS has color exports.
1621   return getIntegerAttribute(
1622              F, "amdgpu-color-export",
1623              F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1624 }
1625 
1626 bool getHasDepthExport(const Function &F) {
1627   return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0;
1628 }
1629 
1630 bool isShader(CallingConv::ID cc) {
1631   switch(cc) {
1632     case CallingConv::AMDGPU_VS:
1633     case CallingConv::AMDGPU_LS:
1634     case CallingConv::AMDGPU_HS:
1635     case CallingConv::AMDGPU_ES:
1636     case CallingConv::AMDGPU_GS:
1637     case CallingConv::AMDGPU_PS:
1638     case CallingConv::AMDGPU_CS:
1639       return true;
1640     default:
1641       return false;
1642   }
1643 }
1644 
1645 bool isGraphics(CallingConv::ID cc) {
1646   return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1647 }
1648 
1649 bool isCompute(CallingConv::ID cc) {
1650   return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1651 }
1652 
1653 bool isEntryFunctionCC(CallingConv::ID CC) {
1654   switch (CC) {
1655   case CallingConv::AMDGPU_KERNEL:
1656   case CallingConv::SPIR_KERNEL:
1657   case CallingConv::AMDGPU_VS:
1658   case CallingConv::AMDGPU_GS:
1659   case CallingConv::AMDGPU_PS:
1660   case CallingConv::AMDGPU_CS:
1661   case CallingConv::AMDGPU_ES:
1662   case CallingConv::AMDGPU_HS:
1663   case CallingConv::AMDGPU_LS:
1664     return true;
1665   default:
1666     return false;
1667   }
1668 }
1669 
1670 bool isModuleEntryFunctionCC(CallingConv::ID CC) {
1671   switch (CC) {
1672   case CallingConv::AMDGPU_Gfx:
1673     return true;
1674   default:
1675     return isEntryFunctionCC(CC);
1676   }
1677 }
1678 
1679 bool isKernelCC(const Function *Func) {
1680   return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1681 }
1682 
1683 bool hasXNACK(const MCSubtargetInfo &STI) {
1684   return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
1685 }
1686 
1687 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1688   return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
1689 }
1690 
1691 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1692   return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16];
1693 }
1694 
1695 bool hasGFX10A16(const MCSubtargetInfo &STI) {
1696   return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16];
1697 }
1698 
1699 bool hasG16(const MCSubtargetInfo &STI) {
1700   return STI.getFeatureBits()[AMDGPU::FeatureG16];
1701 }
1702 
1703 bool hasPackedD16(const MCSubtargetInfo &STI) {
1704   return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) &&
1705          !isSI(STI);
1706 }
1707 
1708 bool isSI(const MCSubtargetInfo &STI) {
1709   return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
1710 }
1711 
1712 bool isCI(const MCSubtargetInfo &STI) {
1713   return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
1714 }
1715 
1716 bool isVI(const MCSubtargetInfo &STI) {
1717   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1718 }
1719 
1720 bool isGFX9(const MCSubtargetInfo &STI) {
1721   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1722 }
1723 
1724 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
1725   return isGFX9(STI) || isGFX10(STI);
1726 }
1727 
1728 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) {
1729   return isVI(STI) || isGFX9(STI) || isGFX10(STI);
1730 }
1731 
1732 bool isGFX8Plus(const MCSubtargetInfo &STI) {
1733   return isVI(STI) || isGFX9Plus(STI);
1734 }
1735 
1736 bool isGFX9Plus(const MCSubtargetInfo &STI) {
1737   return isGFX9(STI) || isGFX10Plus(STI);
1738 }
1739 
1740 bool isGFX10(const MCSubtargetInfo &STI) {
1741   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1742 }
1743 
1744 bool isGFX10Plus(const MCSubtargetInfo &STI) {
1745   return isGFX10(STI) || isGFX11Plus(STI);
1746 }
1747 
1748 bool isGFX11(const MCSubtargetInfo &STI) {
1749   return STI.getFeatureBits()[AMDGPU::FeatureGFX11];
1750 }
1751 
1752 bool isGFX11Plus(const MCSubtargetInfo &STI) {
1753   return isGFX11(STI);
1754 }
1755 
1756 bool isNotGFX11Plus(const MCSubtargetInfo &STI) {
1757   return !isGFX11Plus(STI);
1758 }
1759 
1760 bool isNotGFX10Plus(const MCSubtargetInfo &STI) {
1761   return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
1762 }
1763 
1764 bool isGFX10Before1030(const MCSubtargetInfo &STI) {
1765   return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
1766 }
1767 
1768 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
1769   return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
1770 }
1771 
1772 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) {
1773   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding];
1774 }
1775 
1776 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) {
1777   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding];
1778 }
1779 
1780 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) {
1781   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts];
1782 }
1783 
1784 bool isGFX90A(const MCSubtargetInfo &STI) {
1785   return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1786 }
1787 
1788 bool isGFX940(const MCSubtargetInfo &STI) {
1789   return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts];
1790 }
1791 
1792 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) {
1793   return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1794 }
1795 
1796 bool hasMAIInsts(const MCSubtargetInfo &STI) {
1797   return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts];
1798 }
1799 
1800 bool hasVOPD(const MCSubtargetInfo &STI) {
1801   return STI.getFeatureBits()[AMDGPU::FeatureVOPD];
1802 }
1803 
1804 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
1805                          int32_t ArgNumVGPR) {
1806   if (has90AInsts && ArgNumAGPR)
1807     return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
1808   return std::max(ArgNumVGPR, ArgNumAGPR);
1809 }
1810 
1811 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
1812   const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
1813   const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
1814   return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
1815     Reg == AMDGPU::SCC;
1816 }
1817 
1818 #define MAP_REG2REG \
1819   using namespace AMDGPU; \
1820   switch(Reg) { \
1821   default: return Reg; \
1822   CASE_CI_VI(FLAT_SCR) \
1823   CASE_CI_VI(FLAT_SCR_LO) \
1824   CASE_CI_VI(FLAT_SCR_HI) \
1825   CASE_VI_GFX9PLUS(TTMP0) \
1826   CASE_VI_GFX9PLUS(TTMP1) \
1827   CASE_VI_GFX9PLUS(TTMP2) \
1828   CASE_VI_GFX9PLUS(TTMP3) \
1829   CASE_VI_GFX9PLUS(TTMP4) \
1830   CASE_VI_GFX9PLUS(TTMP5) \
1831   CASE_VI_GFX9PLUS(TTMP6) \
1832   CASE_VI_GFX9PLUS(TTMP7) \
1833   CASE_VI_GFX9PLUS(TTMP8) \
1834   CASE_VI_GFX9PLUS(TTMP9) \
1835   CASE_VI_GFX9PLUS(TTMP10) \
1836   CASE_VI_GFX9PLUS(TTMP11) \
1837   CASE_VI_GFX9PLUS(TTMP12) \
1838   CASE_VI_GFX9PLUS(TTMP13) \
1839   CASE_VI_GFX9PLUS(TTMP14) \
1840   CASE_VI_GFX9PLUS(TTMP15) \
1841   CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
1842   CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
1843   CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
1844   CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
1845   CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
1846   CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
1847   CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
1848   CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
1849   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
1850   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
1851   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
1852   CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
1853   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
1854   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
1855   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1856   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1857   CASE_GFXPRE11_GFX11PLUS(M0) \
1858   CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
1859   CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
1860   }
1861 
1862 #define CASE_CI_VI(node) \
1863   assert(!isSI(STI)); \
1864   case node: return isCI(STI) ? node##_ci : node##_vi;
1865 
1866 #define CASE_VI_GFX9PLUS(node) \
1867   case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
1868 
1869 #define CASE_GFXPRE11_GFX11PLUS(node) \
1870   case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
1871 
1872 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
1873   case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
1874 
1875 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
1876   if (STI.getTargetTriple().getArch() == Triple::r600)
1877     return Reg;
1878   MAP_REG2REG
1879 }
1880 
1881 #undef CASE_CI_VI
1882 #undef CASE_VI_GFX9PLUS
1883 #undef CASE_GFXPRE11_GFX11PLUS
1884 #undef CASE_GFXPRE11_GFX11PLUS_TO
1885 
1886 #define CASE_CI_VI(node)   case node##_ci: case node##_vi:   return node;
1887 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
1888 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
1889 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
1890 
1891 unsigned mc2PseudoReg(unsigned Reg) {
1892   MAP_REG2REG
1893 }
1894 
1895 #undef CASE_CI_VI
1896 #undef CASE_VI_GFX9PLUS
1897 #undef CASE_GFXPRE11_GFX11PLUS
1898 #undef CASE_GFXPRE11_GFX11PLUS_TO
1899 #undef MAP_REG2REG
1900 
1901 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1902   assert(OpNo < Desc.NumOperands);
1903   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1904   return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1905          OpType <= AMDGPU::OPERAND_SRC_LAST;
1906 }
1907 
1908 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1909   assert(OpNo < Desc.NumOperands);
1910   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1911   switch (OpType) {
1912   case AMDGPU::OPERAND_REG_IMM_FP32:
1913   case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
1914   case AMDGPU::OPERAND_REG_IMM_FP64:
1915   case AMDGPU::OPERAND_REG_IMM_FP16:
1916   case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
1917   case AMDGPU::OPERAND_REG_IMM_V2FP16:
1918   case AMDGPU::OPERAND_REG_IMM_V2INT16:
1919   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1920   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1921   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1922   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1923   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1924   case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
1925   case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
1926   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
1927   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1928   case AMDGPU::OPERAND_REG_IMM_V2FP32:
1929   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
1930   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
1931     return true;
1932   default:
1933     return false;
1934   }
1935 }
1936 
1937 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1938   assert(OpNo < Desc.NumOperands);
1939   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1940   return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
1941          OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
1942 }
1943 
1944 // Avoid using MCRegisterClass::getSize, since that function will go away
1945 // (move from MC* level to Target* level). Return size in bits.
1946 unsigned getRegBitWidth(unsigned RCID) {
1947   switch (RCID) {
1948   case AMDGPU::VGPR_LO16RegClassID:
1949   case AMDGPU::VGPR_HI16RegClassID:
1950   case AMDGPU::SGPR_LO16RegClassID:
1951   case AMDGPU::AGPR_LO16RegClassID:
1952     return 16;
1953   case AMDGPU::SGPR_32RegClassID:
1954   case AMDGPU::VGPR_32RegClassID:
1955   case AMDGPU::VRegOrLds_32RegClassID:
1956   case AMDGPU::AGPR_32RegClassID:
1957   case AMDGPU::VS_32RegClassID:
1958   case AMDGPU::AV_32RegClassID:
1959   case AMDGPU::SReg_32RegClassID:
1960   case AMDGPU::SReg_32_XM0RegClassID:
1961   case AMDGPU::SRegOrLds_32RegClassID:
1962     return 32;
1963   case AMDGPU::SGPR_64RegClassID:
1964   case AMDGPU::VS_64RegClassID:
1965   case AMDGPU::SReg_64RegClassID:
1966   case AMDGPU::VReg_64RegClassID:
1967   case AMDGPU::AReg_64RegClassID:
1968   case AMDGPU::SReg_64_XEXECRegClassID:
1969   case AMDGPU::VReg_64_Align2RegClassID:
1970   case AMDGPU::AReg_64_Align2RegClassID:
1971   case AMDGPU::AV_64RegClassID:
1972   case AMDGPU::AV_64_Align2RegClassID:
1973     return 64;
1974   case AMDGPU::SGPR_96RegClassID:
1975   case AMDGPU::SReg_96RegClassID:
1976   case AMDGPU::VReg_96RegClassID:
1977   case AMDGPU::AReg_96RegClassID:
1978   case AMDGPU::VReg_96_Align2RegClassID:
1979   case AMDGPU::AReg_96_Align2RegClassID:
1980   case AMDGPU::AV_96RegClassID:
1981   case AMDGPU::AV_96_Align2RegClassID:
1982     return 96;
1983   case AMDGPU::SGPR_128RegClassID:
1984   case AMDGPU::SReg_128RegClassID:
1985   case AMDGPU::VReg_128RegClassID:
1986   case AMDGPU::AReg_128RegClassID:
1987   case AMDGPU::VReg_128_Align2RegClassID:
1988   case AMDGPU::AReg_128_Align2RegClassID:
1989   case AMDGPU::AV_128RegClassID:
1990   case AMDGPU::AV_128_Align2RegClassID:
1991     return 128;
1992   case AMDGPU::SGPR_160RegClassID:
1993   case AMDGPU::SReg_160RegClassID:
1994   case AMDGPU::VReg_160RegClassID:
1995   case AMDGPU::AReg_160RegClassID:
1996   case AMDGPU::VReg_160_Align2RegClassID:
1997   case AMDGPU::AReg_160_Align2RegClassID:
1998   case AMDGPU::AV_160RegClassID:
1999   case AMDGPU::AV_160_Align2RegClassID:
2000     return 160;
2001   case AMDGPU::SGPR_192RegClassID:
2002   case AMDGPU::SReg_192RegClassID:
2003   case AMDGPU::VReg_192RegClassID:
2004   case AMDGPU::AReg_192RegClassID:
2005   case AMDGPU::VReg_192_Align2RegClassID:
2006   case AMDGPU::AReg_192_Align2RegClassID:
2007   case AMDGPU::AV_192RegClassID:
2008   case AMDGPU::AV_192_Align2RegClassID:
2009     return 192;
2010   case AMDGPU::SGPR_224RegClassID:
2011   case AMDGPU::SReg_224RegClassID:
2012   case AMDGPU::VReg_224RegClassID:
2013   case AMDGPU::AReg_224RegClassID:
2014   case AMDGPU::VReg_224_Align2RegClassID:
2015   case AMDGPU::AReg_224_Align2RegClassID:
2016   case AMDGPU::AV_224RegClassID:
2017   case AMDGPU::AV_224_Align2RegClassID:
2018     return 224;
2019   case AMDGPU::SGPR_256RegClassID:
2020   case AMDGPU::SReg_256RegClassID:
2021   case AMDGPU::VReg_256RegClassID:
2022   case AMDGPU::AReg_256RegClassID:
2023   case AMDGPU::VReg_256_Align2RegClassID:
2024   case AMDGPU::AReg_256_Align2RegClassID:
2025   case AMDGPU::AV_256RegClassID:
2026   case AMDGPU::AV_256_Align2RegClassID:
2027     return 256;
2028   case AMDGPU::SGPR_512RegClassID:
2029   case AMDGPU::SReg_512RegClassID:
2030   case AMDGPU::VReg_512RegClassID:
2031   case AMDGPU::AReg_512RegClassID:
2032   case AMDGPU::VReg_512_Align2RegClassID:
2033   case AMDGPU::AReg_512_Align2RegClassID:
2034   case AMDGPU::AV_512RegClassID:
2035   case AMDGPU::AV_512_Align2RegClassID:
2036     return 512;
2037   case AMDGPU::SGPR_1024RegClassID:
2038   case AMDGPU::SReg_1024RegClassID:
2039   case AMDGPU::VReg_1024RegClassID:
2040   case AMDGPU::AReg_1024RegClassID:
2041   case AMDGPU::VReg_1024_Align2RegClassID:
2042   case AMDGPU::AReg_1024_Align2RegClassID:
2043   case AMDGPU::AV_1024RegClassID:
2044   case AMDGPU::AV_1024_Align2RegClassID:
2045     return 1024;
2046   default:
2047     llvm_unreachable("Unexpected register class");
2048   }
2049 }
2050 
2051 unsigned getRegBitWidth(const MCRegisterClass &RC) {
2052   return getRegBitWidth(RC.getID());
2053 }
2054 
2055 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
2056                            unsigned OpNo) {
2057   assert(OpNo < Desc.NumOperands);
2058   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2059   return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
2060 }
2061 
2062 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2063   if (isInlinableIntLiteral(Literal))
2064     return true;
2065 
2066   uint64_t Val = static_cast<uint64_t>(Literal);
2067   return (Val == DoubleToBits(0.0)) ||
2068          (Val == DoubleToBits(1.0)) ||
2069          (Val == DoubleToBits(-1.0)) ||
2070          (Val == DoubleToBits(0.5)) ||
2071          (Val == DoubleToBits(-0.5)) ||
2072          (Val == DoubleToBits(2.0)) ||
2073          (Val == DoubleToBits(-2.0)) ||
2074          (Val == DoubleToBits(4.0)) ||
2075          (Val == DoubleToBits(-4.0)) ||
2076          (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2077 }
2078 
2079 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2080   if (isInlinableIntLiteral(Literal))
2081     return true;
2082 
2083   // The actual type of the operand does not seem to matter as long
2084   // as the bits match one of the inline immediate values.  For example:
2085   //
2086   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2087   // so it is a legal inline immediate.
2088   //
2089   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2090   // floating-point, so it is a legal inline immediate.
2091 
2092   uint32_t Val = static_cast<uint32_t>(Literal);
2093   return (Val == FloatToBits(0.0f)) ||
2094          (Val == FloatToBits(1.0f)) ||
2095          (Val == FloatToBits(-1.0f)) ||
2096          (Val == FloatToBits(0.5f)) ||
2097          (Val == FloatToBits(-0.5f)) ||
2098          (Val == FloatToBits(2.0f)) ||
2099          (Val == FloatToBits(-2.0f)) ||
2100          (Val == FloatToBits(4.0f)) ||
2101          (Val == FloatToBits(-4.0f)) ||
2102          (Val == 0x3e22f983 && HasInv2Pi);
2103 }
2104 
2105 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
2106   if (!HasInv2Pi)
2107     return false;
2108 
2109   if (isInlinableIntLiteral(Literal))
2110     return true;
2111 
2112   uint16_t Val = static_cast<uint16_t>(Literal);
2113   return Val == 0x3C00 || // 1.0
2114          Val == 0xBC00 || // -1.0
2115          Val == 0x3800 || // 0.5
2116          Val == 0xB800 || // -0.5
2117          Val == 0x4000 || // 2.0
2118          Val == 0xC000 || // -2.0
2119          Val == 0x4400 || // 4.0
2120          Val == 0xC400 || // -4.0
2121          Val == 0x3118;   // 1/2pi
2122 }
2123 
2124 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2125   assert(HasInv2Pi);
2126 
2127   if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2128     int16_t Trunc = static_cast<int16_t>(Literal);
2129     return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2130   }
2131   if (!(Literal & 0xffff))
2132     return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2133 
2134   int16_t Lo16 = static_cast<int16_t>(Literal);
2135   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2136   return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2137 }
2138 
2139 bool isInlinableIntLiteralV216(int32_t Literal) {
2140   int16_t Lo16 = static_cast<int16_t>(Literal);
2141   if (isInt<16>(Literal) || isUInt<16>(Literal))
2142     return isInlinableIntLiteral(Lo16);
2143 
2144   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2145   if (!(Literal & 0xffff))
2146     return isInlinableIntLiteral(Hi16);
2147   return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2148 }
2149 
2150 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2151   assert(HasInv2Pi);
2152 
2153   int16_t Lo16 = static_cast<int16_t>(Literal);
2154   if (isInt<16>(Literal) || isUInt<16>(Literal))
2155     return true;
2156 
2157   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2158   if (!(Literal & 0xffff))
2159     return true;
2160   return Lo16 == Hi16;
2161 }
2162 
2163 bool isArgPassedInSGPR(const Argument *A) {
2164   const Function *F = A->getParent();
2165 
2166   // Arguments to compute shaders are never a source of divergence.
2167   CallingConv::ID CC = F->getCallingConv();
2168   switch (CC) {
2169   case CallingConv::AMDGPU_KERNEL:
2170   case CallingConv::SPIR_KERNEL:
2171     return true;
2172   case CallingConv::AMDGPU_VS:
2173   case CallingConv::AMDGPU_LS:
2174   case CallingConv::AMDGPU_HS:
2175   case CallingConv::AMDGPU_ES:
2176   case CallingConv::AMDGPU_GS:
2177   case CallingConv::AMDGPU_PS:
2178   case CallingConv::AMDGPU_CS:
2179   case CallingConv::AMDGPU_Gfx:
2180     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
2181     // Everything else is in VGPRs.
2182     return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) ||
2183            F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal);
2184   default:
2185     // TODO: Should calls support inreg for SGPR inputs?
2186     return false;
2187   }
2188 }
2189 
2190 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2191   return isGCN3Encoding(ST) || isGFX10Plus(ST);
2192 }
2193 
2194 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) {
2195   return isGFX9Plus(ST);
2196 }
2197 
2198 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
2199                                       int64_t EncodedOffset) {
2200   return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2201                                : isUInt<8>(EncodedOffset);
2202 }
2203 
2204 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST,
2205                                     int64_t EncodedOffset,
2206                                     bool IsBuffer) {
2207   return !IsBuffer &&
2208          hasSMRDSignedImmOffset(ST) &&
2209          isInt<21>(EncodedOffset);
2210 }
2211 
2212 static bool isDwordAligned(uint64_t ByteOffset) {
2213   return (ByteOffset & 3) == 0;
2214 }
2215 
2216 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
2217                                 uint64_t ByteOffset) {
2218   if (hasSMEMByteOffset(ST))
2219     return ByteOffset;
2220 
2221   assert(isDwordAligned(ByteOffset));
2222   return ByteOffset >> 2;
2223 }
2224 
2225 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
2226                                        int64_t ByteOffset, bool IsBuffer) {
2227   // The signed version is always a byte offset.
2228   if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2229     assert(hasSMEMByteOffset(ST));
2230     return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None;
2231   }
2232 
2233   if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2234     return None;
2235 
2236   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2237   return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2238              ? Optional<int64_t>(EncodedOffset)
2239              : None;
2240 }
2241 
2242 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
2243                                                 int64_t ByteOffset) {
2244   if (!isCI(ST) || !isDwordAligned(ByteOffset))
2245     return None;
2246 
2247   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2248   return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None;
2249 }
2250 
2251 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) {
2252   // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+.
2253   if (AMDGPU::isGFX10(ST))
2254     return Signed ? 12 : 11;
2255 
2256   return Signed ? 13 : 12;
2257 }
2258 
2259 // Given Imm, split it into the values to put into the SOffset and ImmOffset
2260 // fields in an MUBUF instruction. Return false if it is not possible (due to a
2261 // hardware bug needing a workaround).
2262 //
2263 // The required alignment ensures that individual address components remain
2264 // aligned if they are aligned to begin with. It also ensures that additional
2265 // offsets within the given alignment can be added to the resulting ImmOffset.
2266 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
2267                       const GCNSubtarget *Subtarget, Align Alignment) {
2268   const uint32_t MaxImm = alignDown(4095, Alignment.value());
2269   uint32_t Overflow = 0;
2270 
2271   if (Imm > MaxImm) {
2272     if (Imm <= MaxImm + 64) {
2273       // Use an SOffset inline constant for 4..64
2274       Overflow = Imm - MaxImm;
2275       Imm = MaxImm;
2276     } else {
2277       // Try to keep the same value in SOffset for adjacent loads, so that
2278       // the corresponding register contents can be re-used.
2279       //
2280       // Load values with all low-bits (except for alignment bits) set into
2281       // SOffset, so that a larger range of values can be covered using
2282       // s_movk_i32.
2283       //
2284       // Atomic operations fail to work correctly when individual address
2285       // components are unaligned, even if their sum is aligned.
2286       uint32_t High = (Imm + Alignment.value()) & ~4095;
2287       uint32_t Low = (Imm + Alignment.value()) & 4095;
2288       Imm = Low;
2289       Overflow = High - Alignment.value();
2290     }
2291   }
2292 
2293   // There is a hardware bug in SI and CI which prevents address clamping in
2294   // MUBUF instructions from working correctly with SOffsets. The immediate
2295   // offset is unaffected.
2296   if (Overflow > 0 &&
2297       Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
2298     return false;
2299 
2300   ImmOffset = Imm;
2301   SOffset = Overflow;
2302   return true;
2303 }
2304 
2305 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) {
2306   *this = getDefaultForCallingConv(F.getCallingConv());
2307 
2308   StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
2309   if (!IEEEAttr.empty())
2310     IEEE = IEEEAttr == "true";
2311 
2312   StringRef DX10ClampAttr
2313     = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
2314   if (!DX10ClampAttr.empty())
2315     DX10Clamp = DX10ClampAttr == "true";
2316 
2317   StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString();
2318   if (!DenormF32Attr.empty()) {
2319     DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr);
2320     FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2321     FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2322   }
2323 
2324   StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString();
2325   if (!DenormAttr.empty()) {
2326     DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr);
2327 
2328     if (DenormF32Attr.empty()) {
2329       FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2330       FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2331     }
2332 
2333     FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2334     FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2335   }
2336 }
2337 
2338 namespace {
2339 
2340 struct SourceOfDivergence {
2341   unsigned Intr;
2342 };
2343 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2344 
2345 #define GET_SourcesOfDivergence_IMPL
2346 #define GET_Gfx9BufferFormat_IMPL
2347 #define GET_Gfx10BufferFormat_IMPL
2348 #define GET_Gfx11PlusBufferFormat_IMPL
2349 #include "AMDGPUGenSearchableTables.inc"
2350 
2351 } // end anonymous namespace
2352 
2353 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2354   return lookupSourceOfDivergence(IntrID);
2355 }
2356 
2357 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2358                                                   uint8_t NumComponents,
2359                                                   uint8_t NumFormat,
2360                                                   const MCSubtargetInfo &STI) {
2361   return isGFX11Plus(STI)
2362              ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2363                                             NumFormat)
2364              : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2365                                                        NumComponents, NumFormat)
2366                             : getGfx9BufferFormatInfo(BitsPerComp,
2367                                                       NumComponents, NumFormat);
2368 }
2369 
2370 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
2371                                                   const MCSubtargetInfo &STI) {
2372   return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
2373                           : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
2374                                          : getGfx9BufferFormatInfo(Format);
2375 }
2376 
2377 } // namespace AMDGPU
2378 
2379 raw_ostream &operator<<(raw_ostream &OS,
2380                         const AMDGPU::IsaInfo::TargetIDSetting S) {
2381   switch (S) {
2382   case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported):
2383     OS << "Unsupported";
2384     break;
2385   case (AMDGPU::IsaInfo::TargetIDSetting::Any):
2386     OS << "Any";
2387     break;
2388   case (AMDGPU::IsaInfo::TargetIDSetting::Off):
2389     OS << "Off";
2390     break;
2391   case (AMDGPU::IsaInfo::TargetIDSetting::On):
2392     OS << "On";
2393     break;
2394   }
2395   return OS;
2396 }
2397 
2398 } // namespace llvm
2399