1 //===- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass tries to apply several peephole SDWA patterns.
10 ///
11 /// E.g. original:
12 ///   V_LSHRREV_B32_e32 %0, 16, %1
13 ///   V_ADD_CO_U32_e32 %2, %0, %3
14 ///   V_LSHLREV_B32_e32 %4, 16, %2
15 ///
16 /// Replace:
17 ///   V_ADD_CO_U32_sdwa %4, %1, %3
18 ///       dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "AMDGPU.h"
23 #include "GCNSubtarget.h"
24 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
25 #include "llvm/ADT/MapVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include <optional>
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "si-peephole-sdwa"
33 
34 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found.");
35 STATISTIC(NumSDWAInstructionsPeepholed,
36           "Number of instruction converted to SDWA.");
37 
38 namespace {
39 
40 class SDWAOperand;
41 class SDWADstOperand;
42 
43 class SIPeepholeSDWA : public MachineFunctionPass {
44 public:
45   using SDWAOperandsVector = SmallVector<SDWAOperand *, 4>;
46 
47 private:
48   MachineRegisterInfo *MRI;
49   const SIRegisterInfo *TRI;
50   const SIInstrInfo *TII;
51 
52   MapVector<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands;
53   MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
54   SmallVector<MachineInstr *, 8> ConvertedInstructions;
55 
56   std::optional<int64_t> foldToImm(const MachineOperand &Op) const;
57 
58 public:
59   static char ID;
60 
61   SIPeepholeSDWA() : MachineFunctionPass(ID) {
62     initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry());
63   }
64 
65   bool runOnMachineFunction(MachineFunction &MF) override;
66   void matchSDWAOperands(MachineBasicBlock &MBB);
67   std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
68   bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const;
69   void pseudoOpConvertToVOP2(MachineInstr &MI,
70                              const GCNSubtarget &ST) const;
71   bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
72   void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
73 
74   StringRef getPassName() const override { return "SI Peephole SDWA"; }
75 
76   void getAnalysisUsage(AnalysisUsage &AU) const override {
77     AU.setPreservesCFG();
78     MachineFunctionPass::getAnalysisUsage(AU);
79   }
80 };
81 
82 class SDWAOperand {
83 private:
84   MachineOperand *Target; // Operand that would be used in converted instruction
85   MachineOperand *Replaced; // Operand that would be replace by Target
86 
87 public:
88   SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
89       : Target(TargetOp), Replaced(ReplacedOp) {
90     assert(Target->isReg());
91     assert(Replaced->isReg());
92   }
93 
94   virtual ~SDWAOperand() = default;
95 
96   virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0;
97   virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
98 
99   MachineOperand *getTargetOperand() const { return Target; }
100   MachineOperand *getReplacedOperand() const { return Replaced; }
101   MachineInstr *getParentInst() const { return Target->getParent(); }
102 
103   MachineRegisterInfo *getMRI() const {
104     return &getParentInst()->getParent()->getParent()->getRegInfo();
105   }
106 
107 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
108   virtual void print(raw_ostream& OS) const = 0;
109   void dump() const { print(dbgs()); }
110 #endif
111 };
112 
113 using namespace AMDGPU::SDWA;
114 
115 class SDWASrcOperand : public SDWAOperand {
116 private:
117   SdwaSel SrcSel;
118   bool Abs;
119   bool Neg;
120   bool Sext;
121 
122 public:
123   SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
124                  SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
125                  bool Sext_ = false)
126       : SDWAOperand(TargetOp, ReplacedOp),
127         SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
128 
129   MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
130   bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
131 
132   SdwaSel getSrcSel() const { return SrcSel; }
133   bool getAbs() const { return Abs; }
134   bool getNeg() const { return Neg; }
135   bool getSext() const { return Sext; }
136 
137   uint64_t getSrcMods(const SIInstrInfo *TII,
138                       const MachineOperand *SrcOp) const;
139 
140 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
141   void print(raw_ostream& OS) const override;
142 #endif
143 };
144 
145 class SDWADstOperand : public SDWAOperand {
146 private:
147   SdwaSel DstSel;
148   DstUnused DstUn;
149 
150 public:
151 
152   SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
153                  SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
154     : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
155 
156   MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
157   bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
158 
159   SdwaSel getDstSel() const { return DstSel; }
160   DstUnused getDstUnused() const { return DstUn; }
161 
162 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
163   void print(raw_ostream& OS) const override;
164 #endif
165 };
166 
167 class SDWADstPreserveOperand : public SDWADstOperand {
168 private:
169   MachineOperand *Preserve;
170 
171 public:
172   SDWADstPreserveOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
173                          MachineOperand *PreserveOp, SdwaSel DstSel_ = DWORD)
174       : SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
175         Preserve(PreserveOp) {}
176 
177   bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
178 
179   MachineOperand *getPreservedOperand() const { return Preserve; }
180 
181 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
182   void print(raw_ostream& OS) const override;
183 #endif
184 };
185 
186 } // end anonymous namespace
187 
188 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false)
189 
190 char SIPeepholeSDWA::ID = 0;
191 
192 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID;
193 
194 FunctionPass *llvm::createSIPeepholeSDWAPass() {
195   return new SIPeepholeSDWA();
196 }
197 
198 
199 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
200 static raw_ostream& operator<<(raw_ostream &OS, SdwaSel Sel) {
201   switch(Sel) {
202   case BYTE_0: OS << "BYTE_0"; break;
203   case BYTE_1: OS << "BYTE_1"; break;
204   case BYTE_2: OS << "BYTE_2"; break;
205   case BYTE_3: OS << "BYTE_3"; break;
206   case WORD_0: OS << "WORD_0"; break;
207   case WORD_1: OS << "WORD_1"; break;
208   case DWORD:  OS << "DWORD"; break;
209   }
210   return OS;
211 }
212 
213 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) {
214   switch(Un) {
215   case UNUSED_PAD: OS << "UNUSED_PAD"; break;
216   case UNUSED_SEXT: OS << "UNUSED_SEXT"; break;
217   case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break;
218   }
219   return OS;
220 }
221 
222 LLVM_DUMP_METHOD
223 void SDWASrcOperand::print(raw_ostream& OS) const {
224   OS << "SDWA src: " << *getTargetOperand()
225     << " src_sel:" << getSrcSel()
226     << " abs:" << getAbs() << " neg:" << getNeg()
227     << " sext:" << getSext() << '\n';
228 }
229 
230 LLVM_DUMP_METHOD
231 void SDWADstOperand::print(raw_ostream& OS) const {
232   OS << "SDWA dst: " << *getTargetOperand()
233     << " dst_sel:" << getDstSel()
234     << " dst_unused:" << getDstUnused() << '\n';
235 }
236 
237 LLVM_DUMP_METHOD
238 void SDWADstPreserveOperand::print(raw_ostream& OS) const {
239   OS << "SDWA preserve dst: " << *getTargetOperand()
240     << " dst_sel:" << getDstSel()
241     << " preserve:" << *getPreservedOperand() << '\n';
242 }
243 
244 #endif
245 
246 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
247   assert(To.isReg() && From.isReg());
248   To.setReg(From.getReg());
249   To.setSubReg(From.getSubReg());
250   To.setIsUndef(From.isUndef());
251   if (To.isUse()) {
252     To.setIsKill(From.isKill());
253   } else {
254     To.setIsDead(From.isDead());
255   }
256 }
257 
258 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) {
259   return LHS.isReg() &&
260          RHS.isReg() &&
261          LHS.getReg() == RHS.getReg() &&
262          LHS.getSubReg() == RHS.getSubReg();
263 }
264 
265 static MachineOperand *findSingleRegUse(const MachineOperand *Reg,
266                                         const MachineRegisterInfo *MRI) {
267   if (!Reg->isReg() || !Reg->isDef())
268     return nullptr;
269 
270   MachineOperand *ResMO = nullptr;
271   for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg->getReg())) {
272     // If there exist use of subreg of Reg then return nullptr
273     if (!isSameReg(UseMO, *Reg))
274       return nullptr;
275 
276     // Check that there is only one instruction that uses Reg
277     if (!ResMO) {
278       ResMO = &UseMO;
279     } else if (ResMO->getParent() != UseMO.getParent()) {
280       return nullptr;
281     }
282   }
283 
284   return ResMO;
285 }
286 
287 static MachineOperand *findSingleRegDef(const MachineOperand *Reg,
288                                         const MachineRegisterInfo *MRI) {
289   if (!Reg->isReg())
290     return nullptr;
291 
292   MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg->getReg());
293   if (!DefInstr)
294     return nullptr;
295 
296   for (auto &DefMO : DefInstr->defs()) {
297     if (DefMO.isReg() && DefMO.getReg() == Reg->getReg())
298       return &DefMO;
299   }
300 
301   // Ignore implicit defs.
302   return nullptr;
303 }
304 
305 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
306                                     const MachineOperand *SrcOp) const {
307   uint64_t Mods = 0;
308   const auto *MI = SrcOp->getParent();
309   if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
310     if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
311       Mods = Mod->getImm();
312     }
313   } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) {
314     if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) {
315       Mods = Mod->getImm();
316     }
317   }
318   if (Abs || Neg) {
319     assert(!Sext &&
320            "Float and integer src modifiers can't be set simultaneously");
321     Mods |= Abs ? SISrcMods::ABS : 0u;
322     Mods ^= Neg ? SISrcMods::NEG : 0u;
323   } else if (Sext) {
324     Mods |= SISrcMods::SEXT;
325   }
326 
327   return Mods;
328 }
329 
330 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) {
331   // For SDWA src operand potential instruction is one that use register
332   // defined by parent instruction
333   MachineOperand *PotentialMO = findSingleRegUse(getReplacedOperand(), getMRI());
334   if (!PotentialMO)
335     return nullptr;
336 
337   return PotentialMO->getParent();
338 }
339 
340 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
341   // Find operand in instruction that matches source operand and replace it with
342   // target operand. Set corresponding src_sel
343   bool IsPreserveSrc = false;
344   MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
345   MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
346   MachineOperand *SrcMods =
347       TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
348   assert(Src && (Src->isReg() || Src->isImm()));
349   if (!isSameReg(*Src, *getReplacedOperand())) {
350     // If this is not src0 then it could be src1
351     Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
352     SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
353     SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
354 
355     if (!Src ||
356         !isSameReg(*Src, *getReplacedOperand())) {
357       // It's possible this Src is a tied operand for
358       // UNUSED_PRESERVE, in which case we can either
359       // abandon the peephole attempt, or if legal we can
360       // copy the target operand into the tied slot
361       // if the preserve operation will effectively cause the same
362       // result by overwriting the rest of the dst.
363       MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
364       MachineOperand *DstUnused =
365         TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
366 
367       if (Dst &&
368           DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
369         // This will work if the tied src is accessing WORD_0, and the dst is
370         // writing WORD_1. Modifiers don't matter because all the bits that
371         // would be impacted are being overwritten by the dst.
372         // Any other case will not work.
373         SdwaSel DstSel = static_cast<SdwaSel>(
374             TII->getNamedImmOperand(MI, AMDGPU::OpName::dst_sel));
375         if (DstSel == AMDGPU::SDWA::SdwaSel::WORD_1 &&
376             getSrcSel() == AMDGPU::SDWA::SdwaSel::WORD_0) {
377           IsPreserveSrc = true;
378           auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
379                                                    AMDGPU::OpName::vdst);
380           auto TiedIdx = MI.findTiedOperandIdx(DstIdx);
381           Src = &MI.getOperand(TiedIdx);
382           SrcSel = nullptr;
383           SrcMods = nullptr;
384         } else {
385           // Not legal to convert this src
386           return false;
387         }
388       }
389     }
390     assert(Src && Src->isReg());
391 
392     if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
393          MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
394          MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
395          MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
396          !isSameReg(*Src, *getReplacedOperand())) {
397       // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to
398       // src2. This is not allowed.
399       return false;
400     }
401 
402     assert(isSameReg(*Src, *getReplacedOperand()) &&
403            (IsPreserveSrc || (SrcSel && SrcMods)));
404   }
405   copyRegOperand(*Src, *getTargetOperand());
406   if (!IsPreserveSrc) {
407     SrcSel->setImm(getSrcSel());
408     SrcMods->setImm(getSrcMods(TII, Src));
409   }
410   getTargetOperand()->setIsKill(false);
411   return true;
412 }
413 
414 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) {
415   // For SDWA dst operand potential instruction is one that defines register
416   // that this operand uses
417   MachineRegisterInfo *MRI = getMRI();
418   MachineInstr *ParentMI = getParentInst();
419 
420   MachineOperand *PotentialMO = findSingleRegDef(getReplacedOperand(), MRI);
421   if (!PotentialMO)
422     return nullptr;
423 
424   // Check that ParentMI is the only instruction that uses replaced register
425   for (MachineInstr &UseInst : MRI->use_nodbg_instructions(PotentialMO->getReg())) {
426     if (&UseInst != ParentMI)
427       return nullptr;
428   }
429 
430   return PotentialMO->getParent();
431 }
432 
433 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
434   // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
435 
436   if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
437        MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
438        MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
439        MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
440       getDstSel() != AMDGPU::SDWA::DWORD) {
441     // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD
442     return false;
443   }
444 
445   MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
446   assert(Operand &&
447          Operand->isReg() &&
448          isSameReg(*Operand, *getReplacedOperand()));
449   copyRegOperand(*Operand, *getTargetOperand());
450   MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
451   assert(DstSel);
452   DstSel->setImm(getDstSel());
453   MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
454   assert(DstUnused);
455   DstUnused->setImm(getDstUnused());
456 
457   // Remove original instruction  because it would conflict with our new
458   // instruction by register definition
459   getParentInst()->eraseFromParent();
460   return true;
461 }
462 
463 bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
464                                            const SIInstrInfo *TII) {
465   // MI should be moved right before v_or_b32.
466   // For this we should clear all kill flags on uses of MI src-operands or else
467   // we can encounter problem with use of killed operand.
468   for (MachineOperand &MO : MI.uses()) {
469     if (!MO.isReg())
470       continue;
471     getMRI()->clearKillFlags(MO.getReg());
472   }
473 
474   // Move MI before v_or_b32
475   auto MBB = MI.getParent();
476   MBB->remove(&MI);
477   MBB->insert(getParentInst(), &MI);
478 
479   // Add Implicit use of preserved register
480   MachineInstrBuilder MIB(*MBB->getParent(), MI);
481   MIB.addReg(getPreservedOperand()->getReg(),
482              RegState::ImplicitKill,
483              getPreservedOperand()->getSubReg());
484 
485   // Tie dst to implicit use
486   MI.tieOperands(AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst),
487                  MI.getNumOperands() - 1);
488 
489   // Convert MI as any other SDWADstOperand and remove v_or_b32
490   return SDWADstOperand::convertToSDWA(MI, TII);
491 }
492 
493 std::optional<int64_t>
494 SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
495   if (Op.isImm()) {
496     return Op.getImm();
497   }
498 
499   // If this is not immediate then it can be copy of immediate value, e.g.:
500   // %1 = S_MOV_B32 255;
501   if (Op.isReg()) {
502     for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
503       if (!isSameReg(Op, Def))
504         continue;
505 
506       const MachineInstr *DefInst = Def.getParent();
507       if (!TII->isFoldableCopy(*DefInst))
508         return std::nullopt;
509 
510       const MachineOperand &Copied = DefInst->getOperand(1);
511       if (!Copied.isImm())
512         return std::nullopt;
513 
514       return Copied.getImm();
515     }
516   }
517 
518   return std::nullopt;
519 }
520 
521 std::unique_ptr<SDWAOperand>
522 SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
523   unsigned Opcode = MI.getOpcode();
524   switch (Opcode) {
525   case AMDGPU::V_LSHRREV_B32_e32:
526   case AMDGPU::V_ASHRREV_I32_e32:
527   case AMDGPU::V_LSHLREV_B32_e32:
528   case AMDGPU::V_LSHRREV_B32_e64:
529   case AMDGPU::V_ASHRREV_I32_e64:
530   case AMDGPU::V_LSHLREV_B32_e64: {
531     // from: v_lshrrev_b32_e32 v1, 16/24, v0
532     // to SDWA src:v0 src_sel:WORD_1/BYTE_3
533 
534     // from: v_ashrrev_i32_e32 v1, 16/24, v0
535     // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1
536 
537     // from: v_lshlrev_b32_e32 v1, 16/24, v0
538     // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD
539     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
540     auto Imm = foldToImm(*Src0);
541     if (!Imm)
542       break;
543 
544     if (*Imm != 16 && *Imm != 24)
545       break;
546 
547     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
548     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
549     if (Src1->getReg().isPhysical() || Dst->getReg().isPhysical())
550       break;
551 
552     if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
553         Opcode == AMDGPU::V_LSHLREV_B32_e64) {
554       return std::make_unique<SDWADstOperand>(
555           Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
556     } else {
557       return std::make_unique<SDWASrcOperand>(
558           Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
559           Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
560           Opcode != AMDGPU::V_LSHRREV_B32_e64);
561     }
562     break;
563   }
564 
565   case AMDGPU::V_LSHRREV_B16_e32:
566   case AMDGPU::V_ASHRREV_I16_e32:
567   case AMDGPU::V_LSHLREV_B16_e32:
568   case AMDGPU::V_LSHRREV_B16_e64:
569   case AMDGPU::V_ASHRREV_I16_e64:
570   case AMDGPU::V_LSHLREV_B16_e64: {
571     // from: v_lshrrev_b16_e32 v1, 8, v0
572     // to SDWA src:v0 src_sel:BYTE_1
573 
574     // from: v_ashrrev_i16_e32 v1, 8, v0
575     // to SDWA src:v0 src_sel:BYTE_1 sext:1
576 
577     // from: v_lshlrev_b16_e32 v1, 8, v0
578     // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD
579     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
580     auto Imm = foldToImm(*Src0);
581     if (!Imm || *Imm != 8)
582       break;
583 
584     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
585     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
586 
587     if (Src1->getReg().isPhysical() || Dst->getReg().isPhysical())
588       break;
589 
590     if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
591         Opcode == AMDGPU::V_LSHLREV_B16_e64) {
592       return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
593     } else {
594       return std::make_unique<SDWASrcOperand>(
595             Src1, Dst, BYTE_1, false, false,
596             Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
597             Opcode != AMDGPU::V_LSHRREV_B16_e64);
598     }
599     break;
600   }
601 
602   case AMDGPU::V_BFE_I32_e64:
603   case AMDGPU::V_BFE_U32_e64: {
604     // e.g.:
605     // from: v_bfe_u32 v1, v0, 8, 8
606     // to SDWA src:v0 src_sel:BYTE_1
607 
608     // offset | width | src_sel
609     // ------------------------
610     // 0      | 8     | BYTE_0
611     // 0      | 16    | WORD_0
612     // 0      | 32    | DWORD ?
613     // 8      | 8     | BYTE_1
614     // 16     | 8     | BYTE_2
615     // 16     | 16    | WORD_1
616     // 24     | 8     | BYTE_3
617 
618     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
619     auto Offset = foldToImm(*Src1);
620     if (!Offset)
621       break;
622 
623     MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
624     auto Width = foldToImm(*Src2);
625     if (!Width)
626       break;
627 
628     SdwaSel SrcSel = DWORD;
629 
630     if (*Offset == 0 && *Width == 8)
631       SrcSel = BYTE_0;
632     else if (*Offset == 0 && *Width == 16)
633       SrcSel = WORD_0;
634     else if (*Offset == 0 && *Width == 32)
635       SrcSel = DWORD;
636     else if (*Offset == 8 && *Width == 8)
637       SrcSel = BYTE_1;
638     else if (*Offset == 16 && *Width == 8)
639       SrcSel = BYTE_2;
640     else if (*Offset == 16 && *Width == 16)
641       SrcSel = WORD_1;
642     else if (*Offset == 24 && *Width == 8)
643       SrcSel = BYTE_3;
644     else
645       break;
646 
647     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
648     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
649 
650     if (Src0->getReg().isPhysical() || Dst->getReg().isPhysical())
651       break;
652 
653     return std::make_unique<SDWASrcOperand>(
654           Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64);
655   }
656 
657   case AMDGPU::V_AND_B32_e32:
658   case AMDGPU::V_AND_B32_e64: {
659     // e.g.:
660     // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0
661     // to SDWA src:v0 src_sel:WORD_0/BYTE_0
662 
663     MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
664     MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
665     auto ValSrc = Src1;
666     auto Imm = foldToImm(*Src0);
667 
668     if (!Imm) {
669       Imm = foldToImm(*Src1);
670       ValSrc = Src0;
671     }
672 
673     if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff))
674       break;
675 
676     MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
677 
678     if (ValSrc->getReg().isPhysical() || Dst->getReg().isPhysical())
679       break;
680 
681     return std::make_unique<SDWASrcOperand>(
682         ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
683   }
684 
685   case AMDGPU::V_OR_B32_e32:
686   case AMDGPU::V_OR_B32_e64: {
687     // Patterns for dst_unused:UNUSED_PRESERVE.
688     // e.g., from:
689     // v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD
690     //                           src1_sel:WORD_1 src2_sel:WORD1
691     // v_add_f16_e32 v3, v1, v2
692     // v_or_b32_e32 v4, v0, v3
693     // to SDWA preserve dst:v4 dst_sel:WORD_1 dst_unused:UNUSED_PRESERVE preserve:v3
694 
695     // Check if one of operands of v_or_b32 is SDWA instruction
696     using CheckRetType =
697         std::optional<std::pair<MachineOperand *, MachineOperand *>>;
698     auto CheckOROperandsForSDWA =
699       [&](const MachineOperand *Op1, const MachineOperand *Op2) -> CheckRetType {
700         if (!Op1 || !Op1->isReg() || !Op2 || !Op2->isReg())
701           return CheckRetType(std::nullopt);
702 
703         MachineOperand *Op1Def = findSingleRegDef(Op1, MRI);
704         if (!Op1Def)
705           return CheckRetType(std::nullopt);
706 
707         MachineInstr *Op1Inst = Op1Def->getParent();
708         if (!TII->isSDWA(*Op1Inst))
709           return CheckRetType(std::nullopt);
710 
711         MachineOperand *Op2Def = findSingleRegDef(Op2, MRI);
712         if (!Op2Def)
713           return CheckRetType(std::nullopt);
714 
715         return CheckRetType(std::pair(Op1Def, Op2Def));
716       };
717 
718     MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
719     MachineOperand *OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
720     assert(OrSDWA && OrOther);
721     auto Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
722     if (!Res) {
723       OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
724       OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
725       assert(OrSDWA && OrOther);
726       Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
727       if (!Res)
728         break;
729     }
730 
731     MachineOperand *OrSDWADef = Res->first;
732     MachineOperand *OrOtherDef = Res->second;
733     assert(OrSDWADef && OrOtherDef);
734 
735     MachineInstr *SDWAInst = OrSDWADef->getParent();
736     MachineInstr *OtherInst = OrOtherDef->getParent();
737 
738     // Check that OtherInstr is actually bitwise compatible with SDWAInst = their
739     // destination patterns don't overlap. Compatible instruction can be either
740     // regular instruction with compatible bitness or SDWA instruction with
741     // correct dst_sel
742     // SDWAInst | OtherInst bitness / OtherInst dst_sel
743     // -----------------------------------------------------
744     // DWORD    | no                    / no
745     // WORD_0   | no                    / BYTE_2/3, WORD_1
746     // WORD_1   | 8/16-bit instructions / BYTE_0/1, WORD_0
747     // BYTE_0   | no                    / BYTE_1/2/3, WORD_1
748     // BYTE_1   | 8-bit                 / BYTE_0/2/3, WORD_1
749     // BYTE_2   | 8/16-bit              / BYTE_0/1/3. WORD_0
750     // BYTE_3   | 8/16/24-bit           / BYTE_0/1/2, WORD_0
751     // E.g. if SDWAInst is v_add_f16_sdwa dst_sel:WORD_1 then v_add_f16 is OK
752     // but v_add_f32 is not.
753 
754     // TODO: add support for non-SDWA instructions as OtherInst.
755     // For now this only works with SDWA instructions. For regular instructions
756     // there is no way to determine if the instruction writes only 8/16/24-bit
757     // out of full register size and all registers are at min 32-bit wide.
758     if (!TII->isSDWA(*OtherInst))
759       break;
760 
761     SdwaSel DstSel = static_cast<SdwaSel>(
762         TII->getNamedImmOperand(*SDWAInst, AMDGPU::OpName::dst_sel));
763     SdwaSel OtherDstSel = static_cast<SdwaSel>(
764       TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_sel));
765 
766     bool DstSelAgree = false;
767     switch (DstSel) {
768     case WORD_0: DstSelAgree = ((OtherDstSel == BYTE_2) ||
769                                 (OtherDstSel == BYTE_3) ||
770                                 (OtherDstSel == WORD_1));
771       break;
772     case WORD_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
773                                 (OtherDstSel == BYTE_1) ||
774                                 (OtherDstSel == WORD_0));
775       break;
776     case BYTE_0: DstSelAgree = ((OtherDstSel == BYTE_1) ||
777                                 (OtherDstSel == BYTE_2) ||
778                                 (OtherDstSel == BYTE_3) ||
779                                 (OtherDstSel == WORD_1));
780       break;
781     case BYTE_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
782                                 (OtherDstSel == BYTE_2) ||
783                                 (OtherDstSel == BYTE_3) ||
784                                 (OtherDstSel == WORD_1));
785       break;
786     case BYTE_2: DstSelAgree = ((OtherDstSel == BYTE_0) ||
787                                 (OtherDstSel == BYTE_1) ||
788                                 (OtherDstSel == BYTE_3) ||
789                                 (OtherDstSel == WORD_0));
790       break;
791     case BYTE_3: DstSelAgree = ((OtherDstSel == BYTE_0) ||
792                                 (OtherDstSel == BYTE_1) ||
793                                 (OtherDstSel == BYTE_2) ||
794                                 (OtherDstSel == WORD_0));
795       break;
796     default: DstSelAgree = false;
797     }
798 
799     if (!DstSelAgree)
800       break;
801 
802     // Also OtherInst dst_unused should be UNUSED_PAD
803     DstUnused OtherDstUnused = static_cast<DstUnused>(
804       TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_unused));
805     if (OtherDstUnused != DstUnused::UNUSED_PAD)
806       break;
807 
808     // Create DstPreserveOperand
809     MachineOperand *OrDst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
810     assert(OrDst && OrDst->isReg());
811 
812     return std::make_unique<SDWADstPreserveOperand>(
813       OrDst, OrSDWADef, OrOtherDef, DstSel);
814 
815   }
816   }
817 
818   return std::unique_ptr<SDWAOperand>(nullptr);
819 }
820 
821 #if !defined(NDEBUG)
822 static raw_ostream& operator<<(raw_ostream &OS, const SDWAOperand &Operand) {
823   Operand.print(OS);
824   return OS;
825 }
826 #endif
827 
828 void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) {
829   for (MachineInstr &MI : MBB) {
830     if (auto Operand = matchSDWAOperand(MI)) {
831       LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
832       SDWAOperands[&MI] = std::move(Operand);
833       ++NumSDWAPatternsFound;
834     }
835   }
836 }
837 
838 // Convert the V_ADD_CO_U32_e64 into V_ADD_CO_U32_e32. This allows
839 // isConvertibleToSDWA to perform its transformation on V_ADD_CO_U32_e32 into
840 // V_ADD_CO_U32_sdwa.
841 //
842 // We are transforming from a VOP3 into a VOP2 form of the instruction.
843 //   %19:vgpr_32 = V_AND_B32_e32 255,
844 //       killed %16:vgpr_32, implicit $exec
845 //   %47:vgpr_32, %49:sreg_64_xexec = V_ADD_CO_U32_e64
846 //       %26.sub0:vreg_64, %19:vgpr_32, implicit $exec
847 //  %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64
848 //       %26.sub1:vreg_64, %54:vgpr_32, killed %49:sreg_64_xexec, implicit $exec
849 //
850 // becomes
851 //   %47:vgpr_32 = V_ADD_CO_U32_sdwa
852 //       0, %26.sub0:vreg_64, 0, killed %16:vgpr_32, 0, 6, 0, 6, 0,
853 //       implicit-def $vcc, implicit $exec
854 //  %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64
855 //       %26.sub1:vreg_64, %54:vgpr_32, killed $vcc, implicit $exec
856 void SIPeepholeSDWA::pseudoOpConvertToVOP2(MachineInstr &MI,
857                                            const GCNSubtarget &ST) const {
858   int Opc = MI.getOpcode();
859   assert((Opc == AMDGPU::V_ADD_CO_U32_e64 || Opc == AMDGPU::V_SUB_CO_U32_e64) &&
860          "Currently only handles V_ADD_CO_U32_e64 or V_SUB_CO_U32_e64");
861 
862   // Can the candidate MI be shrunk?
863   if (!TII->canShrink(MI, *MRI))
864     return;
865   Opc = AMDGPU::getVOPe32(Opc);
866   // Find the related ADD instruction.
867   const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
868   if (!Sdst)
869     return;
870   MachineOperand *NextOp = findSingleRegUse(Sdst, MRI);
871   if (!NextOp)
872     return;
873   MachineInstr &MISucc = *NextOp->getParent();
874 
875   // Make sure the carry in/out are subsequently unused.
876   MachineOperand *CarryIn = TII->getNamedOperand(MISucc, AMDGPU::OpName::src2);
877   if (!CarryIn)
878     return;
879   MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst);
880   if (!CarryOut)
881     return;
882   if (!MRI->hasOneUse(CarryIn->getReg()) || !MRI->use_empty(CarryOut->getReg()))
883     return;
884   // Make sure VCC or its subregs are dead before MI.
885   MachineBasicBlock &MBB = *MI.getParent();
886   auto Liveness = MBB.computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 25);
887   if (Liveness != MachineBasicBlock::LQR_Dead)
888     return;
889   // Check if VCC is referenced in range of (MI,MISucc].
890   for (auto I = std::next(MI.getIterator()), E = MISucc.getIterator();
891        I != E; ++I) {
892     if (I->modifiesRegister(AMDGPU::VCC, TRI))
893       return;
894   }
895 
896   // Replace MI with V_{SUB|ADD}_I32_e32
897   BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opc))
898     .add(*TII->getNamedOperand(MI, AMDGPU::OpName::vdst))
899     .add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0))
900     .add(*TII->getNamedOperand(MI, AMDGPU::OpName::src1))
901     .setMIFlags(MI.getFlags());
902 
903   MI.eraseFromParent();
904 
905   // Since the carry output of MI is now VCC, update its use in MISucc.
906 
907   MISucc.substituteRegister(CarryIn->getReg(), TRI->getVCC(), 0, *TRI);
908 }
909 
910 bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI,
911                                          const GCNSubtarget &ST) const {
912   // Check if this is already an SDWA instruction
913   unsigned Opc = MI.getOpcode();
914   if (TII->isSDWA(Opc))
915     return true;
916 
917   // Check if this instruction has opcode that supports SDWA
918   if (AMDGPU::getSDWAOp(Opc) == -1)
919     Opc = AMDGPU::getVOPe32(Opc);
920 
921   if (AMDGPU::getSDWAOp(Opc) == -1)
922     return false;
923 
924   if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
925     return false;
926 
927   if (TII->isVOPC(Opc)) {
928     if (!ST.hasSDWASdst()) {
929       const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
930       if (SDst && (SDst->getReg() != AMDGPU::VCC &&
931                    SDst->getReg() != AMDGPU::VCC_LO))
932         return false;
933     }
934 
935     if (!ST.hasSDWAOutModsVOPC() &&
936         (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
937          TII->hasModifiersSet(MI, AMDGPU::OpName::omod)))
938       return false;
939 
940   } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) ||
941              !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
942     return false;
943   }
944 
945   if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_FMAC_F16_e32 ||
946                            Opc == AMDGPU::V_FMAC_F32_e32 ||
947                            Opc == AMDGPU::V_MAC_F16_e32 ||
948                            Opc == AMDGPU::V_MAC_F32_e32))
949     return false;
950 
951   // Check if target supports this SDWA opcode
952   if (TII->pseudoToMCOpcode(Opc) == -1)
953     return false;
954 
955   // FIXME: has SDWA but require handling of implicit VCC use
956   if (Opc == AMDGPU::V_CNDMASK_B32_e32)
957     return false;
958 
959   if (MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0)) {
960     if (!Src0->isReg() && !Src0->isImm())
961       return false;
962   }
963 
964   if (MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1)) {
965     if (!Src1->isReg() && !Src1->isImm())
966       return false;
967   }
968 
969   return true;
970 }
971 
972 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
973                                    const SDWAOperandsVector &SDWAOperands) {
974 
975   LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
976 
977   // Convert to sdwa
978   int SDWAOpcode;
979   unsigned Opcode = MI.getOpcode();
980   if (TII->isSDWA(Opcode)) {
981     SDWAOpcode = Opcode;
982   } else {
983     SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
984     if (SDWAOpcode == -1)
985       SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
986   }
987   assert(SDWAOpcode != -1);
988 
989   const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
990 
991   // Create SDWA version of instruction MI and initialize its operands
992   MachineInstrBuilder SDWAInst =
993     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc)
994     .setMIFlags(MI.getFlags());
995 
996   // Copy dst, if it is present in original then should also be present in SDWA
997   MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
998   if (Dst) {
999     assert(AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::vdst));
1000     SDWAInst.add(*Dst);
1001   } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) {
1002     assert(Dst && AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::sdst));
1003     SDWAInst.add(*Dst);
1004   } else {
1005     assert(AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::sdst));
1006     SDWAInst.addReg(TRI->getVCC(), RegState::Define);
1007   }
1008 
1009   // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and
1010   // src0_modifiers (except for v_nop_sdwa, but it can't get here)
1011   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1012   assert(Src0 && AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src0) &&
1013          AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src0_modifiers));
1014   if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers))
1015     SDWAInst.addImm(Mod->getImm());
1016   else
1017     SDWAInst.addImm(0);
1018   SDWAInst.add(*Src0);
1019 
1020   // Copy src1 if present, initialize src1_modifiers.
1021   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1022   if (Src1) {
1023     assert(AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src1) &&
1024            AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src1_modifiers));
1025     if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers))
1026       SDWAInst.addImm(Mod->getImm());
1027     else
1028       SDWAInst.addImm(0);
1029     SDWAInst.add(*Src1);
1030   }
1031 
1032   if (SDWAOpcode == AMDGPU::V_FMAC_F16_sdwa ||
1033       SDWAOpcode == AMDGPU::V_FMAC_F32_sdwa ||
1034       SDWAOpcode == AMDGPU::V_MAC_F16_sdwa ||
1035       SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) {
1036     // v_mac_f16/32 has additional src2 operand tied to vdst
1037     MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
1038     assert(Src2);
1039     SDWAInst.add(*Src2);
1040   }
1041 
1042   // Copy clamp if present, initialize otherwise
1043   assert(AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::clamp));
1044   MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
1045   if (Clamp) {
1046     SDWAInst.add(*Clamp);
1047   } else {
1048     SDWAInst.addImm(0);
1049   }
1050 
1051   // Copy omod if present, initialize otherwise if needed
1052   if (AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::omod)) {
1053     MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
1054     if (OMod) {
1055       SDWAInst.add(*OMod);
1056     } else {
1057       SDWAInst.addImm(0);
1058     }
1059   }
1060 
1061   // Copy dst_sel if present, initialize otherwise if needed
1062   if (AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::dst_sel)) {
1063     MachineOperand *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
1064     if (DstSel) {
1065       SDWAInst.add(*DstSel);
1066     } else {
1067       SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1068     }
1069   }
1070 
1071   // Copy dst_unused if present, initialize otherwise if needed
1072   if (AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::dst_unused)) {
1073     MachineOperand *DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1074     if (DstUnused) {
1075       SDWAInst.add(*DstUnused);
1076     } else {
1077       SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD);
1078     }
1079   }
1080 
1081   // Copy src0_sel if present, initialize otherwise
1082   assert(AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src0_sel));
1083   MachineOperand *Src0Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
1084   if (Src0Sel) {
1085     SDWAInst.add(*Src0Sel);
1086   } else {
1087     SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1088   }
1089 
1090   // Copy src1_sel if present, initialize otherwise if needed
1091   if (Src1) {
1092     assert(AMDGPU::hasNamedOperand(SDWAOpcode, AMDGPU::OpName::src1_sel));
1093     MachineOperand *Src1Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
1094     if (Src1Sel) {
1095       SDWAInst.add(*Src1Sel);
1096     } else {
1097       SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1098     }
1099   }
1100 
1101   // Check for a preserved register that needs to be copied.
1102   auto DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1103   if (DstUnused &&
1104       DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
1105     // We expect, if we are here, that the instruction was already in it's SDWA form,
1106     // with a tied operand.
1107     assert(Dst && Dst->isTied());
1108     assert(Opcode == static_cast<unsigned int>(SDWAOpcode));
1109     // We also expect a vdst, since sdst can't preserve.
1110     auto PreserveDstIdx = AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst);
1111     assert(PreserveDstIdx != -1);
1112 
1113     auto TiedIdx = MI.findTiedOperandIdx(PreserveDstIdx);
1114     auto Tied = MI.getOperand(TiedIdx);
1115 
1116     SDWAInst.add(Tied);
1117     SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
1118   }
1119 
1120   // Apply all sdwa operand patterns.
1121   bool Converted = false;
1122   for (auto &Operand : SDWAOperands) {
1123     LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
1124     // There should be no intersection between SDWA operands and potential MIs
1125     // e.g.:
1126     // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
1127     // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
1128     // v_add_u32 v3, v4, v2
1129     //
1130     // In that example it is possible that we would fold 2nd instruction into
1131     // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that
1132     // was already destroyed). So if SDWAOperand is also a potential MI then do
1133     // not apply it.
1134     if (PotentialMatches.count(Operand->getParentInst()) == 0)
1135       Converted |= Operand->convertToSDWA(*SDWAInst, TII);
1136   }
1137   if (Converted) {
1138     ConvertedInstructions.push_back(SDWAInst);
1139   } else {
1140     SDWAInst->eraseFromParent();
1141     return false;
1142   }
1143 
1144   LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
1145   ++NumSDWAInstructionsPeepholed;
1146 
1147   MI.eraseFromParent();
1148   return true;
1149 }
1150 
1151 // If an instruction was converted to SDWA it should not have immediates or SGPR
1152 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
1153 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
1154                                             const GCNSubtarget &ST) const {
1155   const MCInstrDesc &Desc = TII->get(MI.getOpcode());
1156   unsigned ConstantBusCount = 0;
1157   for (MachineOperand &Op : MI.explicit_uses()) {
1158     if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
1159       continue;
1160 
1161     unsigned I = Op.getOperandNo();
1162     if (Desc.operands()[I].RegClass == -1 ||
1163         !TRI->isVSSuperClass(TRI->getRegClass(Desc.operands()[I].RegClass)))
1164       continue;
1165 
1166     if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
1167         TRI->isSGPRReg(*MRI, Op.getReg())) {
1168       ++ConstantBusCount;
1169       continue;
1170     }
1171 
1172     Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1173     auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1174                         TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
1175     if (Op.isImm())
1176       Copy.addImm(Op.getImm());
1177     else if (Op.isReg())
1178       Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0,
1179                   Op.getSubReg());
1180     Op.ChangeToRegister(VGPR, false);
1181   }
1182 }
1183 
1184 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
1185   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1186 
1187   if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
1188     return false;
1189 
1190   MRI = &MF.getRegInfo();
1191   TRI = ST.getRegisterInfo();
1192   TII = ST.getInstrInfo();
1193 
1194   // Find all SDWA operands in MF.
1195   bool Ret = false;
1196   for (MachineBasicBlock &MBB : MF) {
1197     bool Changed = false;
1198     do {
1199       // Preprocess the ADD/SUB pairs so they could be SDWA'ed.
1200       // Look for a possible ADD or SUB that resulted from a previously lowered
1201       // V_{ADD|SUB}_U64_PSEUDO. The function pseudoOpConvertToVOP2
1202       // lowers the pair of instructions into e32 form.
1203       matchSDWAOperands(MBB);
1204       for (const auto &OperandPair : SDWAOperands) {
1205         const auto &Operand = OperandPair.second;
1206         MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1207         if (PotentialMI &&
1208            (PotentialMI->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 ||
1209             PotentialMI->getOpcode() == AMDGPU::V_SUB_CO_U32_e64))
1210           pseudoOpConvertToVOP2(*PotentialMI, ST);
1211       }
1212       SDWAOperands.clear();
1213 
1214       // Generate potential match list.
1215       matchSDWAOperands(MBB);
1216 
1217       for (const auto &OperandPair : SDWAOperands) {
1218         const auto &Operand = OperandPair.second;
1219         MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1220         if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
1221           PotentialMatches[PotentialMI].push_back(Operand.get());
1222         }
1223       }
1224 
1225       for (auto &PotentialPair : PotentialMatches) {
1226         MachineInstr &PotentialMI = *PotentialPair.first;
1227         convertToSDWA(PotentialMI, PotentialPair.second);
1228       }
1229 
1230       PotentialMatches.clear();
1231       SDWAOperands.clear();
1232 
1233       Changed = !ConvertedInstructions.empty();
1234 
1235       if (Changed)
1236         Ret = true;
1237       while (!ConvertedInstructions.empty())
1238         legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);
1239     } while (Changed);
1240   }
1241 
1242   return Ret;
1243 }
1244