1 //===- SIInstrInfo.h - SI Instruction Info Interface ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Interface definition for SIInstrInfo.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H
15 #define LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H
16 
17 #include "AMDGPUMIRFormatter.h"
18 #include "SIRegisterInfo.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetSchedule.h"
23 
24 #define GET_INSTRINFO_HEADER
25 #include "AMDGPUGenInstrInfo.inc"
26 
27 namespace llvm {
28 
29 class APInt;
30 class GCNSubtarget;
31 class LiveVariables;
32 class MachineDominatorTree;
33 class MachineRegisterInfo;
34 class RegScavenger;
35 class TargetRegisterClass;
36 class ScheduleHazardRecognizer;
37 
38 class SIInstrInfo final : public AMDGPUGenInstrInfo {
39 private:
40   const SIRegisterInfo RI;
41   const GCNSubtarget &ST;
42   TargetSchedModel SchedModel;
43   mutable std::unique_ptr<AMDGPUMIRFormatter> Formatter;
44 
45   // The inverse predicate should have the negative value.
46   enum BranchPredicate {
47     INVALID_BR = 0,
48     SCC_TRUE = 1,
49     SCC_FALSE = -1,
50     VCCNZ = 2,
51     VCCZ = -2,
52     EXECNZ = -3,
53     EXECZ = 3
54   };
55 
56   using SetVectorType = SmallSetVector<MachineInstr *, 32>;
57 
58   static unsigned getBranchOpcode(BranchPredicate Cond);
59   static BranchPredicate getBranchPredicate(unsigned Opcode);
60 
61 public:
62   unsigned buildExtractSubReg(MachineBasicBlock::iterator MI,
63                               MachineRegisterInfo &MRI,
64                               MachineOperand &SuperReg,
65                               const TargetRegisterClass *SuperRC,
66                               unsigned SubIdx,
67                               const TargetRegisterClass *SubRC) const;
68   MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI,
69                                          MachineRegisterInfo &MRI,
70                                          MachineOperand &SuperReg,
71                                          const TargetRegisterClass *SuperRC,
72                                          unsigned SubIdx,
73                                          const TargetRegisterClass *SubRC) const;
74 private:
75   void swapOperands(MachineInstr &Inst) const;
76 
77   std::pair<bool, MachineBasicBlock *>
78   moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
79                    MachineDominatorTree *MDT = nullptr) const;
80 
81   void lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
82                    MachineDominatorTree *MDT = nullptr) const;
83 
84   void lowerScalarAbs(SetVectorType &Worklist,
85                       MachineInstr &Inst) const;
86 
87   void lowerScalarXnor(SetVectorType &Worklist,
88                        MachineInstr &Inst) const;
89 
90   void splitScalarNotBinop(SetVectorType &Worklist,
91                            MachineInstr &Inst,
92                            unsigned Opcode) const;
93 
94   void splitScalarBinOpN2(SetVectorType &Worklist,
95                           MachineInstr &Inst,
96                           unsigned Opcode) const;
97 
98   void splitScalar64BitUnaryOp(SetVectorType &Worklist,
99                                MachineInstr &Inst, unsigned Opcode,
100                                bool Swap = false) const;
101 
102   void splitScalar64BitAddSub(SetVectorType &Worklist, MachineInstr &Inst,
103                               MachineDominatorTree *MDT = nullptr) const;
104 
105   void splitScalar64BitBinaryOp(SetVectorType &Worklist, MachineInstr &Inst,
106                                 unsigned Opcode,
107                                 MachineDominatorTree *MDT = nullptr) const;
108 
109   void splitScalar64BitXnor(SetVectorType &Worklist, MachineInstr &Inst,
110                                 MachineDominatorTree *MDT = nullptr) const;
111 
112   void splitScalar64BitBCNT(SetVectorType &Worklist,
113                             MachineInstr &Inst) const;
114   void splitScalar64BitBFE(SetVectorType &Worklist,
115                            MachineInstr &Inst) const;
116   void movePackToVALU(SetVectorType &Worklist,
117                       MachineRegisterInfo &MRI,
118                       MachineInstr &Inst) const;
119 
120   void addUsersToMoveToVALUWorklist(Register Reg, MachineRegisterInfo &MRI,
121                                     SetVectorType &Worklist) const;
122 
123   void addSCCDefUsersToVALUWorklist(MachineOperand &Op,
124                                     MachineInstr &SCCDefInst,
125                                     SetVectorType &Worklist,
126                                     Register NewCond = Register()) const;
127   void addSCCDefsToVALUWorklist(MachineOperand &Op,
128                                 SetVectorType &Worklist) const;
129 
130   const TargetRegisterClass *
131   getDestEquivalentVGPRClass(const MachineInstr &Inst) const;
132 
133   bool checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
134                                     const MachineInstr &MIb) const;
135 
136   Register findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
137 
138 protected:
139   bool swapSourceModifiers(MachineInstr &MI,
140                            MachineOperand &Src0, unsigned Src0OpName,
141                            MachineOperand &Src1, unsigned Src1OpName) const;
142 
143   MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
144                                        unsigned OpIdx0,
145                                        unsigned OpIdx1) const override;
146 
147 public:
148   enum TargetOperandFlags {
149     MO_MASK = 0xf,
150 
151     MO_NONE = 0,
152     // MO_GOTPCREL -> symbol@GOTPCREL -> R_AMDGPU_GOTPCREL.
153     MO_GOTPCREL = 1,
154     // MO_GOTPCREL32_LO -> symbol@gotpcrel32@lo -> R_AMDGPU_GOTPCREL32_LO.
155     MO_GOTPCREL32 = 2,
156     MO_GOTPCREL32_LO = 2,
157     // MO_GOTPCREL32_HI -> symbol@gotpcrel32@hi -> R_AMDGPU_GOTPCREL32_HI.
158     MO_GOTPCREL32_HI = 3,
159     // MO_REL32_LO -> symbol@rel32@lo -> R_AMDGPU_REL32_LO.
160     MO_REL32 = 4,
161     MO_REL32_LO = 4,
162     // MO_REL32_HI -> symbol@rel32@hi -> R_AMDGPU_REL32_HI.
163     MO_REL32_HI = 5,
164 
165     MO_FAR_BRANCH_OFFSET = 6,
166 
167     MO_ABS32_LO = 8,
168     MO_ABS32_HI = 9,
169   };
170 
171   explicit SIInstrInfo(const GCNSubtarget &ST);
172 
173   const SIRegisterInfo &getRegisterInfo() const {
174     return RI;
175   }
176 
177   const GCNSubtarget &getSubtarget() const {
178     return ST;
179   }
180 
181   bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
182                                          AAResults *AA) const override;
183 
184   bool isIgnorableUse(const MachineOperand &MO) const override;
185 
186   bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
187                                int64_t &Offset1,
188                                int64_t &Offset2) const override;
189 
190   bool getMemOperandsWithOffsetWidth(
191       const MachineInstr &LdSt,
192       SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
193       bool &OffsetIsScalable, unsigned &Width,
194       const TargetRegisterInfo *TRI) const final;
195 
196   bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
197                            ArrayRef<const MachineOperand *> BaseOps2,
198                            unsigned NumLoads, unsigned NumBytes) const override;
199 
200   bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,
201                                int64_t Offset1, unsigned NumLoads) const override;
202 
203   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
204                    const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
205                    bool KillSrc) const override;
206 
207   void materializeImmediate(MachineBasicBlock &MBB,
208                             MachineBasicBlock::iterator MI,
209                             const DebugLoc &DL,
210                             unsigned DestReg,
211                             int64_t Value) const;
212 
213   const TargetRegisterClass *getPreferredSelectRegClass(
214                                unsigned Size) const;
215 
216   Register insertNE(MachineBasicBlock *MBB,
217                     MachineBasicBlock::iterator I, const DebugLoc &DL,
218                     Register SrcReg, int Value) const;
219 
220   Register insertEQ(MachineBasicBlock *MBB,
221                     MachineBasicBlock::iterator I, const DebugLoc &DL,
222                     Register SrcReg, int Value)  const;
223 
224   void storeRegToStackSlot(MachineBasicBlock &MBB,
225                            MachineBasicBlock::iterator MI, Register SrcReg,
226                            bool isKill, int FrameIndex,
227                            const TargetRegisterClass *RC,
228                            const TargetRegisterInfo *TRI) const override;
229 
230   void loadRegFromStackSlot(MachineBasicBlock &MBB,
231                             MachineBasicBlock::iterator MI, Register DestReg,
232                             int FrameIndex, const TargetRegisterClass *RC,
233                             const TargetRegisterInfo *TRI) const override;
234 
235   bool expandPostRAPseudo(MachineInstr &MI) const override;
236 
237   // Splits a V_MOV_B64_DPP_PSEUDO opcode into a pair of v_mov_b32_dpp
238   // instructions. Returns a pair of generated instructions.
239   // Can split either post-RA with physical registers or pre-RA with
240   // virtual registers. In latter case IR needs to be in SSA form and
241   // and a REG_SEQUENCE is produced to define original register.
242   std::pair<MachineInstr*, MachineInstr*>
243   expandMovDPP64(MachineInstr &MI) const;
244 
245   // Returns an opcode that can be used to move a value to a \p DstRC
246   // register.  If there is no hardware instruction that can store to \p
247   // DstRC, then AMDGPU::COPY is returned.
248   unsigned getMovOpcode(const TargetRegisterClass *DstRC) const;
249 
250   const MCInstrDesc &getIndirectRegWriteMovRelPseudo(unsigned VecSize,
251                                                      unsigned EltSize,
252                                                      bool IsSGPR) const;
253 
254   const MCInstrDesc &getIndirectGPRIDXPseudo(unsigned VecSize,
255                                              bool IsIndirectSrc) const;
256   LLVM_READONLY
257   int commuteOpcode(unsigned Opc) const;
258 
259   LLVM_READONLY
260   inline int commuteOpcode(const MachineInstr &MI) const {
261     return commuteOpcode(MI.getOpcode());
262   }
263 
264   bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
265                              unsigned &SrcOpIdx2) const override;
266 
267   bool findCommutedOpIndices(MCInstrDesc Desc, unsigned & SrcOpIdx0,
268    unsigned & SrcOpIdx1) const;
269 
270   bool isBranchOffsetInRange(unsigned BranchOpc,
271                              int64_t BrOffset) const override;
272 
273   MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
274 
275   void insertIndirectBranch(MachineBasicBlock &MBB,
276                             MachineBasicBlock &NewDestBB,
277                             MachineBasicBlock &RestoreBB, const DebugLoc &DL,
278                             int64_t BrOffset, RegScavenger *RS) const override;
279 
280   bool analyzeBranchImpl(MachineBasicBlock &MBB,
281                          MachineBasicBlock::iterator I,
282                          MachineBasicBlock *&TBB,
283                          MachineBasicBlock *&FBB,
284                          SmallVectorImpl<MachineOperand> &Cond,
285                          bool AllowModify) const;
286 
287   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
288                      MachineBasicBlock *&FBB,
289                      SmallVectorImpl<MachineOperand> &Cond,
290                      bool AllowModify = false) const override;
291 
292   unsigned removeBranch(MachineBasicBlock &MBB,
293                         int *BytesRemoved = nullptr) const override;
294 
295   unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
296                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
297                         const DebugLoc &DL,
298                         int *BytesAdded = nullptr) const override;
299 
300   bool reverseBranchCondition(
301     SmallVectorImpl<MachineOperand> &Cond) const override;
302 
303   bool canInsertSelect(const MachineBasicBlock &MBB,
304                        ArrayRef<MachineOperand> Cond, Register DstReg,
305                        Register TrueReg, Register FalseReg, int &CondCycles,
306                        int &TrueCycles, int &FalseCycles) const override;
307 
308   void insertSelect(MachineBasicBlock &MBB,
309                     MachineBasicBlock::iterator I, const DebugLoc &DL,
310                     Register DstReg, ArrayRef<MachineOperand> Cond,
311                     Register TrueReg, Register FalseReg) const override;
312 
313   void insertVectorSelect(MachineBasicBlock &MBB,
314                           MachineBasicBlock::iterator I, const DebugLoc &DL,
315                           Register DstReg, ArrayRef<MachineOperand> Cond,
316                           Register TrueReg, Register FalseReg) const;
317 
318   bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
319                       Register &SrcReg2, int64_t &CmpMask,
320                       int64_t &CmpValue) const override;
321 
322   bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
323                             Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
324                             const MachineRegisterInfo *MRI) const override;
325 
326   unsigned getAddressSpaceForPseudoSourceKind(
327              unsigned Kind) const override;
328 
329   bool
330   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
331                                   const MachineInstr &MIb) const override;
332 
333   static bool isFoldableCopy(const MachineInstr &MI);
334 
335   bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
336                      MachineRegisterInfo *MRI) const final;
337 
338   unsigned getMachineCSELookAheadLimit() const override { return 500; }
339 
340   MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
341                                       LiveIntervals *LIS) const override;
342 
343   bool isSchedulingBoundary(const MachineInstr &MI,
344                             const MachineBasicBlock *MBB,
345                             const MachineFunction &MF) const override;
346 
347   static bool isSALU(const MachineInstr &MI) {
348     return MI.getDesc().TSFlags & SIInstrFlags::SALU;
349   }
350 
351   bool isSALU(uint16_t Opcode) const {
352     return get(Opcode).TSFlags & SIInstrFlags::SALU;
353   }
354 
355   static bool isVALU(const MachineInstr &MI) {
356     return MI.getDesc().TSFlags & SIInstrFlags::VALU;
357   }
358 
359   bool isVALU(uint16_t Opcode) const {
360     return get(Opcode).TSFlags & SIInstrFlags::VALU;
361   }
362 
363   static bool isVMEM(const MachineInstr &MI) {
364     return isMUBUF(MI) || isMTBUF(MI) || isMIMG(MI);
365   }
366 
367   bool isVMEM(uint16_t Opcode) const {
368     return isMUBUF(Opcode) || isMTBUF(Opcode) || isMIMG(Opcode);
369   }
370 
371   static bool isSOP1(const MachineInstr &MI) {
372     return MI.getDesc().TSFlags & SIInstrFlags::SOP1;
373   }
374 
375   bool isSOP1(uint16_t Opcode) const {
376     return get(Opcode).TSFlags & SIInstrFlags::SOP1;
377   }
378 
379   static bool isSOP2(const MachineInstr &MI) {
380     return MI.getDesc().TSFlags & SIInstrFlags::SOP2;
381   }
382 
383   bool isSOP2(uint16_t Opcode) const {
384     return get(Opcode).TSFlags & SIInstrFlags::SOP2;
385   }
386 
387   static bool isSOPC(const MachineInstr &MI) {
388     return MI.getDesc().TSFlags & SIInstrFlags::SOPC;
389   }
390 
391   bool isSOPC(uint16_t Opcode) const {
392     return get(Opcode).TSFlags & SIInstrFlags::SOPC;
393   }
394 
395   static bool isSOPK(const MachineInstr &MI) {
396     return MI.getDesc().TSFlags & SIInstrFlags::SOPK;
397   }
398 
399   bool isSOPK(uint16_t Opcode) const {
400     return get(Opcode).TSFlags & SIInstrFlags::SOPK;
401   }
402 
403   static bool isSOPP(const MachineInstr &MI) {
404     return MI.getDesc().TSFlags & SIInstrFlags::SOPP;
405   }
406 
407   bool isSOPP(uint16_t Opcode) const {
408     return get(Opcode).TSFlags & SIInstrFlags::SOPP;
409   }
410 
411   static bool isPacked(const MachineInstr &MI) {
412     return MI.getDesc().TSFlags & SIInstrFlags::IsPacked;
413   }
414 
415   bool isPacked(uint16_t Opcode) const {
416     return get(Opcode).TSFlags & SIInstrFlags::IsPacked;
417   }
418 
419   static bool isVOP1(const MachineInstr &MI) {
420     return MI.getDesc().TSFlags & SIInstrFlags::VOP1;
421   }
422 
423   bool isVOP1(uint16_t Opcode) const {
424     return get(Opcode).TSFlags & SIInstrFlags::VOP1;
425   }
426 
427   static bool isVOP2(const MachineInstr &MI) {
428     return MI.getDesc().TSFlags & SIInstrFlags::VOP2;
429   }
430 
431   bool isVOP2(uint16_t Opcode) const {
432     return get(Opcode).TSFlags & SIInstrFlags::VOP2;
433   }
434 
435   static bool isVOP3(const MachineInstr &MI) {
436     return MI.getDesc().TSFlags & SIInstrFlags::VOP3;
437   }
438 
439   bool isVOP3(uint16_t Opcode) const {
440     return get(Opcode).TSFlags & SIInstrFlags::VOP3;
441   }
442 
443   static bool isSDWA(const MachineInstr &MI) {
444     return MI.getDesc().TSFlags & SIInstrFlags::SDWA;
445   }
446 
447   bool isSDWA(uint16_t Opcode) const {
448     return get(Opcode).TSFlags & SIInstrFlags::SDWA;
449   }
450 
451   static bool isVOPC(const MachineInstr &MI) {
452     return MI.getDesc().TSFlags & SIInstrFlags::VOPC;
453   }
454 
455   bool isVOPC(uint16_t Opcode) const {
456     return get(Opcode).TSFlags & SIInstrFlags::VOPC;
457   }
458 
459   static bool isMUBUF(const MachineInstr &MI) {
460     return MI.getDesc().TSFlags & SIInstrFlags::MUBUF;
461   }
462 
463   bool isMUBUF(uint16_t Opcode) const {
464     return get(Opcode).TSFlags & SIInstrFlags::MUBUF;
465   }
466 
467   static bool isMTBUF(const MachineInstr &MI) {
468     return MI.getDesc().TSFlags & SIInstrFlags::MTBUF;
469   }
470 
471   bool isMTBUF(uint16_t Opcode) const {
472     return get(Opcode).TSFlags & SIInstrFlags::MTBUF;
473   }
474 
475   static bool isSMRD(const MachineInstr &MI) {
476     return MI.getDesc().TSFlags & SIInstrFlags::SMRD;
477   }
478 
479   bool isSMRD(uint16_t Opcode) const {
480     return get(Opcode).TSFlags & SIInstrFlags::SMRD;
481   }
482 
483   bool isBufferSMRD(const MachineInstr &MI) const;
484 
485   static bool isDS(const MachineInstr &MI) {
486     return MI.getDesc().TSFlags & SIInstrFlags::DS;
487   }
488 
489   bool isDS(uint16_t Opcode) const {
490     return get(Opcode).TSFlags & SIInstrFlags::DS;
491   }
492 
493   bool isAlwaysGDS(uint16_t Opcode) const;
494 
495   static bool isMIMG(const MachineInstr &MI) {
496     return MI.getDesc().TSFlags & SIInstrFlags::MIMG;
497   }
498 
499   bool isMIMG(uint16_t Opcode) const {
500     return get(Opcode).TSFlags & SIInstrFlags::MIMG;
501   }
502 
503   static bool isGather4(const MachineInstr &MI) {
504     return MI.getDesc().TSFlags & SIInstrFlags::Gather4;
505   }
506 
507   bool isGather4(uint16_t Opcode) const {
508     return get(Opcode).TSFlags & SIInstrFlags::Gather4;
509   }
510 
511   static bool isFLAT(const MachineInstr &MI) {
512     return MI.getDesc().TSFlags & SIInstrFlags::FLAT;
513   }
514 
515   // Is a FLAT encoded instruction which accesses a specific segment,
516   // i.e. global_* or scratch_*.
517   static bool isSegmentSpecificFLAT(const MachineInstr &MI) {
518     auto Flags = MI.getDesc().TSFlags;
519     return Flags & (SIInstrFlags::FlatGlobal | SIInstrFlags::FlatScratch);
520   }
521 
522   bool isSegmentSpecificFLAT(uint16_t Opcode) const {
523     auto Flags = get(Opcode).TSFlags;
524     return Flags & (SIInstrFlags::FlatGlobal | SIInstrFlags::FlatScratch);
525   }
526 
527   static bool isFLATGlobal(const MachineInstr &MI) {
528     return MI.getDesc().TSFlags & SIInstrFlags::FlatGlobal;
529   }
530 
531   bool isFLATGlobal(uint16_t Opcode) const {
532     return get(Opcode).TSFlags & SIInstrFlags::FlatGlobal;
533   }
534 
535   static bool isFLATScratch(const MachineInstr &MI) {
536     return MI.getDesc().TSFlags & SIInstrFlags::FlatScratch;
537   }
538 
539   bool isFLATScratch(uint16_t Opcode) const {
540     return get(Opcode).TSFlags & SIInstrFlags::FlatScratch;
541   }
542 
543   // Any FLAT encoded instruction, including global_* and scratch_*.
544   bool isFLAT(uint16_t Opcode) const {
545     return get(Opcode).TSFlags & SIInstrFlags::FLAT;
546   }
547 
548   static bool isEXP(const MachineInstr &MI) {
549     return MI.getDesc().TSFlags & SIInstrFlags::EXP;
550   }
551 
552   bool isEXP(uint16_t Opcode) const {
553     return get(Opcode).TSFlags & SIInstrFlags::EXP;
554   }
555 
556   static bool isAtomicNoRet(const MachineInstr &MI) {
557     return MI.getDesc().TSFlags & SIInstrFlags::IsAtomicNoRet;
558   }
559 
560   bool isAtomicNoRet(uint16_t Opcode) const {
561     return get(Opcode).TSFlags & SIInstrFlags::IsAtomicNoRet;
562   }
563 
564   static bool isAtomicRet(const MachineInstr &MI) {
565     return MI.getDesc().TSFlags & SIInstrFlags::IsAtomicRet;
566   }
567 
568   bool isAtomicRet(uint16_t Opcode) const {
569     return get(Opcode).TSFlags & SIInstrFlags::IsAtomicRet;
570   }
571 
572   static bool isAtomic(const MachineInstr &MI) {
573     return MI.getDesc().TSFlags & (SIInstrFlags::IsAtomicRet |
574                                    SIInstrFlags::IsAtomicNoRet);
575   }
576 
577   bool isAtomic(uint16_t Opcode) const {
578     return get(Opcode).TSFlags & (SIInstrFlags::IsAtomicRet |
579                                   SIInstrFlags::IsAtomicNoRet);
580   }
581 
582   static bool isWQM(const MachineInstr &MI) {
583     return MI.getDesc().TSFlags & SIInstrFlags::WQM;
584   }
585 
586   bool isWQM(uint16_t Opcode) const {
587     return get(Opcode).TSFlags & SIInstrFlags::WQM;
588   }
589 
590   static bool isDisableWQM(const MachineInstr &MI) {
591     return MI.getDesc().TSFlags & SIInstrFlags::DisableWQM;
592   }
593 
594   bool isDisableWQM(uint16_t Opcode) const {
595     return get(Opcode).TSFlags & SIInstrFlags::DisableWQM;
596   }
597 
598   static bool isVGPRSpill(const MachineInstr &MI) {
599     return MI.getDesc().TSFlags & SIInstrFlags::VGPRSpill;
600   }
601 
602   bool isVGPRSpill(uint16_t Opcode) const {
603     return get(Opcode).TSFlags & SIInstrFlags::VGPRSpill;
604   }
605 
606   static bool isSGPRSpill(const MachineInstr &MI) {
607     return MI.getDesc().TSFlags & SIInstrFlags::SGPRSpill;
608   }
609 
610   bool isSGPRSpill(uint16_t Opcode) const {
611     return get(Opcode).TSFlags & SIInstrFlags::SGPRSpill;
612   }
613 
614   static bool isDPP(const MachineInstr &MI) {
615     return MI.getDesc().TSFlags & SIInstrFlags::DPP;
616   }
617 
618   bool isDPP(uint16_t Opcode) const {
619     return get(Opcode).TSFlags & SIInstrFlags::DPP;
620   }
621 
622   static bool isTRANS(const MachineInstr &MI) {
623     return MI.getDesc().TSFlags & SIInstrFlags::TRANS;
624   }
625 
626   bool isTRANS(uint16_t Opcode) const {
627     return get(Opcode).TSFlags & SIInstrFlags::TRANS;
628   }
629 
630   static bool isVOP3P(const MachineInstr &MI) {
631     return MI.getDesc().TSFlags & SIInstrFlags::VOP3P;
632   }
633 
634   bool isVOP3P(uint16_t Opcode) const {
635     return get(Opcode).TSFlags & SIInstrFlags::VOP3P;
636   }
637 
638   static bool isVINTRP(const MachineInstr &MI) {
639     return MI.getDesc().TSFlags & SIInstrFlags::VINTRP;
640   }
641 
642   bool isVINTRP(uint16_t Opcode) const {
643     return get(Opcode).TSFlags & SIInstrFlags::VINTRP;
644   }
645 
646   static bool isMAI(const MachineInstr &MI) {
647     return MI.getDesc().TSFlags & SIInstrFlags::IsMAI;
648   }
649 
650   bool isMAI(uint16_t Opcode) const {
651     return get(Opcode).TSFlags & SIInstrFlags::IsMAI;
652   }
653 
654   static bool isDOT(const MachineInstr &MI) {
655     return MI.getDesc().TSFlags & SIInstrFlags::IsDOT;
656   }
657 
658   bool isDOT(uint16_t Opcode) const {
659     return get(Opcode).TSFlags & SIInstrFlags::IsDOT;
660   }
661 
662   static bool isScalarUnit(const MachineInstr &MI) {
663     return MI.getDesc().TSFlags & (SIInstrFlags::SALU | SIInstrFlags::SMRD);
664   }
665 
666   static bool usesVM_CNT(const MachineInstr &MI) {
667     return MI.getDesc().TSFlags & SIInstrFlags::VM_CNT;
668   }
669 
670   static bool usesLGKM_CNT(const MachineInstr &MI) {
671     return MI.getDesc().TSFlags & SIInstrFlags::LGKM_CNT;
672   }
673 
674   static bool sopkIsZext(const MachineInstr &MI) {
675     return MI.getDesc().TSFlags & SIInstrFlags::SOPK_ZEXT;
676   }
677 
678   bool sopkIsZext(uint16_t Opcode) const {
679     return get(Opcode).TSFlags & SIInstrFlags::SOPK_ZEXT;
680   }
681 
682   /// \returns true if this is an s_store_dword* instruction. This is more
683   /// specific than than isSMEM && mayStore.
684   static bool isScalarStore(const MachineInstr &MI) {
685     return MI.getDesc().TSFlags & SIInstrFlags::SCALAR_STORE;
686   }
687 
688   bool isScalarStore(uint16_t Opcode) const {
689     return get(Opcode).TSFlags & SIInstrFlags::SCALAR_STORE;
690   }
691 
692   static bool isFixedSize(const MachineInstr &MI) {
693     return MI.getDesc().TSFlags & SIInstrFlags::FIXED_SIZE;
694   }
695 
696   bool isFixedSize(uint16_t Opcode) const {
697     return get(Opcode).TSFlags & SIInstrFlags::FIXED_SIZE;
698   }
699 
700   static bool hasFPClamp(const MachineInstr &MI) {
701     return MI.getDesc().TSFlags & SIInstrFlags::FPClamp;
702   }
703 
704   bool hasFPClamp(uint16_t Opcode) const {
705     return get(Opcode).TSFlags & SIInstrFlags::FPClamp;
706   }
707 
708   static bool hasIntClamp(const MachineInstr &MI) {
709     return MI.getDesc().TSFlags & SIInstrFlags::IntClamp;
710   }
711 
712   uint64_t getClampMask(const MachineInstr &MI) const {
713     const uint64_t ClampFlags = SIInstrFlags::FPClamp |
714                                 SIInstrFlags::IntClamp |
715                                 SIInstrFlags::ClampLo |
716                                 SIInstrFlags::ClampHi;
717       return MI.getDesc().TSFlags & ClampFlags;
718   }
719 
720   static bool usesFPDPRounding(const MachineInstr &MI) {
721     return MI.getDesc().TSFlags & SIInstrFlags::FPDPRounding;
722   }
723 
724   bool usesFPDPRounding(uint16_t Opcode) const {
725     return get(Opcode).TSFlags & SIInstrFlags::FPDPRounding;
726   }
727 
728   static bool isFPAtomic(const MachineInstr &MI) {
729     return MI.getDesc().TSFlags & SIInstrFlags::FPAtomic;
730   }
731 
732   bool isFPAtomic(uint16_t Opcode) const {
733     return get(Opcode).TSFlags & SIInstrFlags::FPAtomic;
734   }
735 
736   bool isVGPRCopy(const MachineInstr &MI) const {
737     assert(MI.isCopy());
738     Register Dest = MI.getOperand(0).getReg();
739     const MachineFunction &MF = *MI.getParent()->getParent();
740     const MachineRegisterInfo &MRI = MF.getRegInfo();
741     return !RI.isSGPRReg(MRI, Dest);
742   }
743 
744   bool hasVGPRUses(const MachineInstr &MI) const {
745     const MachineFunction &MF = *MI.getParent()->getParent();
746     const MachineRegisterInfo &MRI = MF.getRegInfo();
747     return llvm::any_of(MI.explicit_uses(),
748                         [&MRI, this](const MachineOperand &MO) {
749       return MO.isReg() && RI.isVGPR(MRI, MO.getReg());});
750   }
751 
752   /// Return true if the instruction modifies the mode register.q
753   static bool modifiesModeRegister(const MachineInstr &MI);
754 
755   /// Whether we must prevent this instruction from executing with EXEC = 0.
756   bool hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const;
757 
758   /// Returns true if the instruction could potentially depend on the value of
759   /// exec. If false, exec dependencies may safely be ignored.
760   bool mayReadEXEC(const MachineRegisterInfo &MRI, const MachineInstr &MI) const;
761 
762   bool isInlineConstant(const APInt &Imm) const;
763 
764   bool isInlineConstant(const APFloat &Imm) const {
765     return isInlineConstant(Imm.bitcastToAPInt());
766   }
767 
768   bool isInlineConstant(const MachineOperand &MO, uint8_t OperandType) const;
769 
770   bool isInlineConstant(const MachineOperand &MO,
771                         const MCOperandInfo &OpInfo) const {
772     return isInlineConstant(MO, OpInfo.OperandType);
773   }
774 
775   /// \p returns true if \p UseMO is substituted with \p DefMO in \p MI it would
776   /// be an inline immediate.
777   bool isInlineConstant(const MachineInstr &MI,
778                         const MachineOperand &UseMO,
779                         const MachineOperand &DefMO) const {
780     assert(UseMO.getParent() == &MI);
781     int OpIdx = MI.getOperandNo(&UseMO);
782     if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) {
783       return false;
784     }
785 
786     return isInlineConstant(DefMO, MI.getDesc().OpInfo[OpIdx]);
787   }
788 
789   /// \p returns true if the operand \p OpIdx in \p MI is a valid inline
790   /// immediate.
791   bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx) const {
792     const MachineOperand &MO = MI.getOperand(OpIdx);
793     return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType);
794   }
795 
796   bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
797                         const MachineOperand &MO) const {
798     if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands)
799       return false;
800 
801     if (MI.isCopy()) {
802       unsigned Size = getOpSize(MI, OpIdx);
803       assert(Size == 8 || Size == 4);
804 
805       uint8_t OpType = (Size == 8) ?
806         AMDGPU::OPERAND_REG_IMM_INT64 : AMDGPU::OPERAND_REG_IMM_INT32;
807       return isInlineConstant(MO, OpType);
808     }
809 
810     return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType);
811   }
812 
813   bool isInlineConstant(const MachineOperand &MO) const {
814     const MachineInstr *Parent = MO.getParent();
815     return isInlineConstant(*Parent, Parent->getOperandNo(&MO));
816   }
817 
818   bool isLiteralConstant(const MachineOperand &MO,
819                          const MCOperandInfo &OpInfo) const {
820     return MO.isImm() && !isInlineConstant(MO, OpInfo.OperandType);
821   }
822 
823   bool isLiteralConstant(const MachineInstr &MI, int OpIdx) const {
824     const MachineOperand &MO = MI.getOperand(OpIdx);
825     return MO.isImm() && !isInlineConstant(MI, OpIdx);
826   }
827 
828   // Returns true if this operand could potentially require a 32-bit literal
829   // operand, but not necessarily. A FrameIndex for example could resolve to an
830   // inline immediate value that will not require an additional 4-bytes; this
831   // assumes that it will.
832   bool isLiteralConstantLike(const MachineOperand &MO,
833                              const MCOperandInfo &OpInfo) const;
834 
835   bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
836                          const MachineOperand &MO) const;
837 
838   /// Return true if this 64-bit VALU instruction has a 32-bit encoding.
839   /// This function will return false if you pass it a 32-bit instruction.
840   bool hasVALU32BitEncoding(unsigned Opcode) const;
841 
842   /// Returns true if this operand uses the constant bus.
843   bool usesConstantBus(const MachineRegisterInfo &MRI,
844                        const MachineOperand &MO,
845                        const MCOperandInfo &OpInfo) const;
846 
847   /// Return true if this instruction has any modifiers.
848   ///  e.g. src[012]_mod, omod, clamp.
849   bool hasModifiers(unsigned Opcode) const;
850 
851   bool hasModifiersSet(const MachineInstr &MI,
852                        unsigned OpName) const;
853   bool hasAnyModifiersSet(const MachineInstr &MI) const;
854 
855   bool canShrink(const MachineInstr &MI,
856                  const MachineRegisterInfo &MRI) const;
857 
858   MachineInstr *buildShrunkInst(MachineInstr &MI,
859                                 unsigned NewOpcode) const;
860 
861   bool verifyInstruction(const MachineInstr &MI,
862                          StringRef &ErrInfo) const override;
863 
864   unsigned getVALUOp(const MachineInstr &MI) const;
865 
866   /// Return the correct register class for \p OpNo.  For target-specific
867   /// instructions, this will return the register class that has been defined
868   /// in tablegen.  For generic instructions, like REG_SEQUENCE it will return
869   /// the register class of its machine operand.
870   /// to infer the correct register class base on the other operands.
871   const TargetRegisterClass *getOpRegClass(const MachineInstr &MI,
872                                            unsigned OpNo) const;
873 
874   /// Return the size in bytes of the operand OpNo on the given
875   // instruction opcode.
876   unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const {
877     const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo];
878 
879     if (OpInfo.RegClass == -1) {
880       // If this is an immediate operand, this must be a 32-bit literal.
881       assert(OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE);
882       return 4;
883     }
884 
885     return RI.getRegSizeInBits(*RI.getRegClass(OpInfo.RegClass)) / 8;
886   }
887 
888   /// This form should usually be preferred since it handles operands
889   /// with unknown register classes.
890   unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const {
891     const MachineOperand &MO = MI.getOperand(OpNo);
892     if (MO.isReg()) {
893       if (unsigned SubReg = MO.getSubReg()) {
894         return RI.getSubRegIdxSize(SubReg) / 8;
895       }
896     }
897     return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8;
898   }
899 
900   /// Legalize the \p OpIndex operand of this instruction by inserting
901   /// a MOV.  For example:
902   /// ADD_I32_e32 VGPR0, 15
903   /// to
904   /// MOV VGPR1, 15
905   /// ADD_I32_e32 VGPR0, VGPR1
906   ///
907   /// If the operand being legalized is a register, then a COPY will be used
908   /// instead of MOV.
909   void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const;
910 
911   /// Check if \p MO is a legal operand if it was the \p OpIdx Operand
912   /// for \p MI.
913   bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
914                       const MachineOperand *MO = nullptr) const;
915 
916   /// Check if \p MO would be a valid operand for the given operand
917   /// definition \p OpInfo. Note this does not attempt to validate constant bus
918   /// restrictions (e.g. literal constant usage).
919   bool isLegalVSrcOperand(const MachineRegisterInfo &MRI,
920                           const MCOperandInfo &OpInfo,
921                           const MachineOperand &MO) const;
922 
923   /// Check if \p MO (a register operand) is a legal register for the
924   /// given operand description.
925   bool isLegalRegOperand(const MachineRegisterInfo &MRI,
926                          const MCOperandInfo &OpInfo,
927                          const MachineOperand &MO) const;
928 
929   /// Legalize operands in \p MI by either commuting it or inserting a
930   /// copy of src1.
931   void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const;
932 
933   /// Fix operands in \p MI to satisfy constant bus requirements.
934   void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const;
935 
936   /// Copy a value from a VGPR (\p SrcReg) to SGPR.  This function can only
937   /// be used when it is know that the value in SrcReg is same across all
938   /// threads in the wave.
939   /// \returns The SGPR register that \p SrcReg was copied to.
940   Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
941                               MachineRegisterInfo &MRI) const;
942 
943   void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const;
944   void legalizeOperandsFLAT(MachineRegisterInfo &MRI, MachineInstr &MI) const;
945 
946   void legalizeGenericOperand(MachineBasicBlock &InsertMBB,
947                               MachineBasicBlock::iterator I,
948                               const TargetRegisterClass *DstRC,
949                               MachineOperand &Op, MachineRegisterInfo &MRI,
950                               const DebugLoc &DL) const;
951 
952   /// Legalize all operands in this instruction.  This function may create new
953   /// instructions and control-flow around \p MI.  If present, \p MDT is
954   /// updated.
955   /// \returns A new basic block that contains \p MI if new blocks were created.
956   MachineBasicBlock *
957   legalizeOperands(MachineInstr &MI, MachineDominatorTree *MDT = nullptr) const;
958 
959   /// Change SADDR form of a FLAT \p Inst to its VADDR form if saddr operand
960   /// was moved to VGPR. \returns true if succeeded.
961   bool moveFlatAddrToVGPR(MachineInstr &Inst) const;
962 
963   /// Replace this instruction's opcode with the equivalent VALU
964   /// opcode.  This function will also move the users of \p MI to the
965   /// VALU if necessary. If present, \p MDT is updated.
966   MachineBasicBlock *moveToVALU(MachineInstr &MI,
967                                 MachineDominatorTree *MDT = nullptr) const;
968 
969   void insertNoop(MachineBasicBlock &MBB,
970                   MachineBasicBlock::iterator MI) const override;
971 
972   void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
973                    unsigned Quantity) const override;
974 
975   void insertReturn(MachineBasicBlock &MBB) const;
976   /// Return the number of wait states that result from executing this
977   /// instruction.
978   static unsigned getNumWaitStates(const MachineInstr &MI);
979 
980   /// Returns the operand named \p Op.  If \p MI does not have an
981   /// operand named \c Op, this function returns nullptr.
982   LLVM_READONLY
983   MachineOperand *getNamedOperand(MachineInstr &MI, unsigned OperandName) const;
984 
985   LLVM_READONLY
986   const MachineOperand *getNamedOperand(const MachineInstr &MI,
987                                         unsigned OpName) const {
988     return getNamedOperand(const_cast<MachineInstr &>(MI), OpName);
989   }
990 
991   /// Get required immediate operand
992   int64_t getNamedImmOperand(const MachineInstr &MI, unsigned OpName) const {
993     int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
994     return MI.getOperand(Idx).getImm();
995   }
996 
997   uint64_t getDefaultRsrcDataFormat() const;
998   uint64_t getScratchRsrcWords23() const;
999 
1000   bool isLowLatencyInstruction(const MachineInstr &MI) const;
1001   bool isHighLatencyDef(int Opc) const override;
1002 
1003   /// Return the descriptor of the target-specific machine instruction
1004   /// that corresponds to the specified pseudo or native opcode.
1005   const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
1006     return get(pseudoToMCOpcode(Opcode));
1007   }
1008 
1009   unsigned isStackAccess(const MachineInstr &MI, int &FrameIndex) const;
1010   unsigned isSGPRStackAccess(const MachineInstr &MI, int &FrameIndex) const;
1011 
1012   unsigned isLoadFromStackSlot(const MachineInstr &MI,
1013                                int &FrameIndex) const override;
1014   unsigned isStoreToStackSlot(const MachineInstr &MI,
1015                               int &FrameIndex) const override;
1016 
1017   unsigned getInstBundleSize(const MachineInstr &MI) const;
1018   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
1019 
1020   bool mayAccessFlatAddressSpace(const MachineInstr &MI) const;
1021 
1022   bool isNonUniformBranchInstr(MachineInstr &Instr) const;
1023 
1024   void convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
1025                                  MachineBasicBlock *IfEnd) const;
1026 
1027   void convertNonUniformLoopRegion(MachineBasicBlock *LoopEntry,
1028                                    MachineBasicBlock *LoopEnd) const;
1029 
1030   std::pair<unsigned, unsigned>
1031   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
1032 
1033   ArrayRef<std::pair<int, const char *>>
1034   getSerializableTargetIndices() const override;
1035 
1036   ArrayRef<std::pair<unsigned, const char *>>
1037   getSerializableDirectMachineOperandTargetFlags() const override;
1038 
1039   ScheduleHazardRecognizer *
1040   CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1041                                  const ScheduleDAG *DAG) const override;
1042 
1043   ScheduleHazardRecognizer *
1044   CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const override;
1045 
1046   ScheduleHazardRecognizer *
1047   CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
1048                                  const ScheduleDAGMI *DAG) const override;
1049 
1050   bool isBasicBlockPrologue(const MachineInstr &MI) const override;
1051 
1052   MachineInstr *createPHIDestinationCopy(MachineBasicBlock &MBB,
1053                                          MachineBasicBlock::iterator InsPt,
1054                                          const DebugLoc &DL, Register Src,
1055                                          Register Dst) const override;
1056 
1057   MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB,
1058                                     MachineBasicBlock::iterator InsPt,
1059                                     const DebugLoc &DL, Register Src,
1060                                     unsigned SrcSubReg,
1061                                     Register Dst) const override;
1062 
1063   bool isWave32() const;
1064 
1065   /// Return a partially built integer add instruction without carry.
1066   /// Caller must add source operands.
1067   /// For pre-GFX9 it will generate unused carry destination operand.
1068   /// TODO: After GFX9 it should return a no-carry operation.
1069   MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB,
1070                                     MachineBasicBlock::iterator I,
1071                                     const DebugLoc &DL,
1072                                     Register DestReg) const;
1073 
1074   MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB,
1075                                     MachineBasicBlock::iterator I,
1076                                     const DebugLoc &DL,
1077                                     Register DestReg,
1078                                     RegScavenger &RS) const;
1079 
1080   static bool isKillTerminator(unsigned Opcode);
1081   const MCInstrDesc &getKillTerminatorFromPseudo(unsigned Opcode) const;
1082 
1083   static bool isLegalMUBUFImmOffset(unsigned Imm) {
1084     return isUInt<12>(Imm);
1085   }
1086 
1087   /// Returns if \p Offset is legal for the subtarget as the offset to a FLAT
1088   /// encoded instruction. If \p Signed, this is for an instruction that
1089   /// interprets the offset as signed.
1090   bool isLegalFLATOffset(int64_t Offset, unsigned AddrSpace,
1091                          uint64_t FlatVariant) const;
1092 
1093   /// Split \p COffsetVal into {immediate offset field, remainder offset}
1094   /// values.
1095   std::pair<int64_t, int64_t> splitFlatOffset(int64_t COffsetVal,
1096                                               unsigned AddrSpace,
1097                                               uint64_t FlatVariant) const;
1098 
1099   /// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
1100   /// Return -1 if the target-specific opcode for the pseudo instruction does
1101   /// not exist. If Opcode is not a pseudo instruction, this is identity.
1102   int pseudoToMCOpcode(int Opcode) const;
1103 
1104   /// \brief Check if this instruction should only be used by assembler.
1105   /// Return true if this opcode should not be used by codegen.
1106   bool isAsmOnlyOpcode(int MCOp) const;
1107 
1108   const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
1109                                          const TargetRegisterInfo *TRI,
1110                                          const MachineFunction &MF)
1111     const override;
1112 
1113   void fixImplicitOperands(MachineInstr &MI) const;
1114 
1115   MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
1116                                       ArrayRef<unsigned> Ops,
1117                                       MachineBasicBlock::iterator InsertPt,
1118                                       int FrameIndex,
1119                                       LiveIntervals *LIS = nullptr,
1120                                       VirtRegMap *VRM = nullptr) const override;
1121 
1122   unsigned getInstrLatency(const InstrItineraryData *ItinData,
1123                            const MachineInstr &MI,
1124                            unsigned *PredCost = nullptr) const override;
1125 
1126   const MIRFormatter *getMIRFormatter() const override {
1127     if (!Formatter.get())
1128       Formatter = std::make_unique<AMDGPUMIRFormatter>();
1129     return Formatter.get();
1130   }
1131 
1132   static unsigned getDSShaderTypeValue(const MachineFunction &MF);
1133 
1134   const TargetSchedModel &getSchedModel() const { return SchedModel; }
1135 };
1136 
1137 /// \brief Returns true if a reg:subreg pair P has a TRC class
1138 inline bool isOfRegClass(const TargetInstrInfo::RegSubRegPair &P,
1139                          const TargetRegisterClass &TRC,
1140                          MachineRegisterInfo &MRI) {
1141   auto *RC = MRI.getRegClass(P.Reg);
1142   if (!P.SubReg)
1143     return RC == &TRC;
1144   auto *TRI = MRI.getTargetRegisterInfo();
1145   return RC == TRI->getMatchingSuperRegClass(RC, &TRC, P.SubReg);
1146 }
1147 
1148 /// \brief Create RegSubRegPair from a register MachineOperand
1149 inline
1150 TargetInstrInfo::RegSubRegPair getRegSubRegPair(const MachineOperand &O) {
1151   assert(O.isReg());
1152   return TargetInstrInfo::RegSubRegPair(O.getReg(), O.getSubReg());
1153 }
1154 
1155 /// \brief Return the SubReg component from REG_SEQUENCE
1156 TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI,
1157                                                     unsigned SubReg);
1158 
1159 /// \brief Return the defining instruction for a given reg:subreg pair
1160 /// skipping copy like instructions and subreg-manipulation pseudos.
1161 /// Following another subreg of a reg:subreg isn't supported.
1162 MachineInstr *getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
1163                                MachineRegisterInfo &MRI);
1164 
1165 /// \brief Return false if EXEC is not changed between the def of \p VReg at \p
1166 /// DefMI and the use at \p UseMI. Should be run on SSA. Currently does not
1167 /// attempt to track between blocks.
1168 bool execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI,
1169                                 Register VReg,
1170                                 const MachineInstr &DefMI,
1171                                 const MachineInstr &UseMI);
1172 
1173 /// \brief Return false if EXEC is not changed between the def of \p VReg at \p
1174 /// DefMI and all its uses. Should be run on SSA. Currently does not attempt to
1175 /// track between blocks.
1176 bool execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI,
1177                                    Register VReg,
1178                                    const MachineInstr &DefMI);
1179 
1180 namespace AMDGPU {
1181 
1182   LLVM_READONLY
1183   int getVOPe64(uint16_t Opcode);
1184 
1185   LLVM_READONLY
1186   int getVOPe32(uint16_t Opcode);
1187 
1188   LLVM_READONLY
1189   int getSDWAOp(uint16_t Opcode);
1190 
1191   LLVM_READONLY
1192   int getDPPOp32(uint16_t Opcode);
1193 
1194   LLVM_READONLY
1195   int getBasicFromSDWAOp(uint16_t Opcode);
1196 
1197   LLVM_READONLY
1198   int getCommuteRev(uint16_t Opcode);
1199 
1200   LLVM_READONLY
1201   int getCommuteOrig(uint16_t Opcode);
1202 
1203   LLVM_READONLY
1204   int getAddr64Inst(uint16_t Opcode);
1205 
1206   /// Check if \p Opcode is an Addr64 opcode.
1207   ///
1208   /// \returns \p Opcode if it is an Addr64 opcode, otherwise -1.
1209   LLVM_READONLY
1210   int getIfAddr64Inst(uint16_t Opcode);
1211 
1212   LLVM_READONLY
1213   int getMUBUFNoLdsInst(uint16_t Opcode);
1214 
1215   LLVM_READONLY
1216   int getAtomicNoRetOp(uint16_t Opcode);
1217 
1218   LLVM_READONLY
1219   int getSOPKOp(uint16_t Opcode);
1220 
1221   /// \returns SADDR form of a FLAT Global instruction given an \p Opcode
1222   /// of a VADDR form.
1223   LLVM_READONLY
1224   int getGlobalSaddrOp(uint16_t Opcode);
1225 
1226   /// \returns VADDR form of a FLAT Global instruction given an \p Opcode
1227   /// of a SADDR form.
1228   LLVM_READONLY
1229   int getGlobalVaddrOp(uint16_t Opcode);
1230 
1231   LLVM_READONLY
1232   int getVCMPXNoSDstOp(uint16_t Opcode);
1233 
1234   /// \returns ST form with only immediate offset of a FLAT Scratch instruction
1235   /// given an \p Opcode of an SS (SADDR) form.
1236   LLVM_READONLY
1237   int getFlatScratchInstSTfromSS(uint16_t Opcode);
1238 
1239   /// \returns SS (SADDR) form of a FLAT Scratch instruction given an \p Opcode
1240   /// of an SV (VADDR) form.
1241   LLVM_READONLY
1242   int getFlatScratchInstSSfromSV(uint16_t Opcode);
1243 
1244   /// \returns SV (VADDR) form of a FLAT Scratch instruction given an \p Opcode
1245   /// of an SS (SADDR) form.
1246   LLVM_READONLY
1247   int getFlatScratchInstSVfromSS(uint16_t Opcode);
1248 
1249   /// \returns earlyclobber version of a MAC MFMA is exists.
1250   LLVM_READONLY
1251   int getMFMAEarlyClobberOp(uint16_t Opcode);
1252 
1253   const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
1254   const uint64_t RSRC_ELEMENT_SIZE_SHIFT = (32 + 19);
1255   const uint64_t RSRC_INDEX_STRIDE_SHIFT = (32 + 21);
1256   const uint64_t RSRC_TID_ENABLE = UINT64_C(1) << (32 + 23);
1257 
1258 } // end namespace AMDGPU
1259 
1260 namespace SI {
1261 namespace KernelInputOffsets {
1262 
1263 /// Offsets in bytes from the start of the input buffer
1264 enum Offsets {
1265   NGROUPS_X = 0,
1266   NGROUPS_Y = 4,
1267   NGROUPS_Z = 8,
1268   GLOBAL_SIZE_X = 12,
1269   GLOBAL_SIZE_Y = 16,
1270   GLOBAL_SIZE_Z = 20,
1271   LOCAL_SIZE_X = 24,
1272   LOCAL_SIZE_Y = 28,
1273   LOCAL_SIZE_Z = 32
1274 };
1275 
1276 } // end namespace KernelInputOffsets
1277 } // end namespace SI
1278 
1279 } // end namespace llvm
1280 
1281 #endif // LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H
1282