1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 #include <optional>
30 
31 #define DEBUG_TYPE "amdgpu-isel"
32 
33 using namespace llvm;
34 using namespace MIPatternMatch;
35 
36 static cl::opt<bool> AllowRiskySelect(
37   "amdgpu-global-isel-risky-select",
38   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
39   cl::init(false),
40   cl::ReallyHidden);
41 
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47 
48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50     const AMDGPUTargetMachine &TM)
51     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
52       STI(STI),
53       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
54 #define GET_GLOBALISEL_PREDICATES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_PREDICATES_INIT
57 #define GET_GLOBALISEL_TEMPORARIES_INIT
58 #include "AMDGPUGenGlobalISel.inc"
59 #undef GET_GLOBALISEL_TEMPORARIES_INIT
60 {
61 }
62 
63 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
64 
65 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
66                                         CodeGenCoverage *CoverageInfo,
67                                         ProfileSummaryInfo *PSI,
68                                         BlockFrequencyInfo *BFI) {
69   MRI = &MF.getRegInfo();
70   Subtarget = &MF.getSubtarget<GCNSubtarget>();
71   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
72 }
73 
74 bool AMDGPUInstructionSelector::isVCC(Register Reg,
75                                       const MachineRegisterInfo &MRI) const {
76   // The verifier is oblivious to s1 being a valid value for wavesize registers.
77   if (Reg.isPhysical())
78     return false;
79 
80   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
81   const TargetRegisterClass *RC =
82       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
83   if (RC) {
84     const LLT Ty = MRI.getType(Reg);
85     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
86       return false;
87     // G_TRUNC s1 result is never vcc.
88     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
89            RC->hasSuperClassEq(TRI.getBoolRC());
90   }
91 
92   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
93   return RB->getID() == AMDGPU::VCCRegBankID;
94 }
95 
96 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
97                                                         unsigned NewOpc) const {
98   MI.setDesc(TII.get(NewOpc));
99   MI.removeOperand(1); // Remove intrinsic ID.
100   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
101 
102   MachineOperand &Dst = MI.getOperand(0);
103   MachineOperand &Src = MI.getOperand(1);
104 
105   // TODO: This should be legalized to s32 if needed
106   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
107     return false;
108 
109   const TargetRegisterClass *DstRC
110     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
111   const TargetRegisterClass *SrcRC
112     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
113   if (!DstRC || DstRC != SrcRC)
114     return false;
115 
116   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
117          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
118 }
119 
120 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
121   const DebugLoc &DL = I.getDebugLoc();
122   MachineBasicBlock *BB = I.getParent();
123   I.setDesc(TII.get(TargetOpcode::COPY));
124 
125   const MachineOperand &Src = I.getOperand(1);
126   MachineOperand &Dst = I.getOperand(0);
127   Register DstReg = Dst.getReg();
128   Register SrcReg = Src.getReg();
129 
130   if (isVCC(DstReg, *MRI)) {
131     if (SrcReg == AMDGPU::SCC) {
132       const TargetRegisterClass *RC
133         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
134       if (!RC)
135         return true;
136       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
137     }
138 
139     if (!isVCC(SrcReg, *MRI)) {
140       // TODO: Should probably leave the copy and let copyPhysReg expand it.
141       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
142         return false;
143 
144       const TargetRegisterClass *SrcRC
145         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
146 
147       std::optional<ValueAndVReg> ConstVal =
148           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
149       if (ConstVal) {
150         unsigned MovOpc =
151             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
152         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
153             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
154       } else {
155         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
156 
157         // We can't trust the high bits at this point, so clear them.
158 
159         // TODO: Skip masking high bits if def is known boolean.
160 
161         unsigned AndOpc =
162             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
163         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
164             .addImm(1)
165             .addReg(SrcReg);
166         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
167             .addImm(0)
168             .addReg(MaskedReg);
169       }
170 
171       if (!MRI->getRegClassOrNull(SrcReg))
172         MRI->setRegClass(SrcReg, SrcRC);
173       I.eraseFromParent();
174       return true;
175     }
176 
177     const TargetRegisterClass *RC =
178       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
179     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
180       return false;
181 
182     return true;
183   }
184 
185   for (const MachineOperand &MO : I.operands()) {
186     if (MO.getReg().isPhysical())
187       continue;
188 
189     const TargetRegisterClass *RC =
190             TRI.getConstrainedRegClassForOperand(MO, *MRI);
191     if (!RC)
192       continue;
193     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
194   }
195   return true;
196 }
197 
198 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
199   const Register DefReg = I.getOperand(0).getReg();
200   const LLT DefTy = MRI->getType(DefReg);
201   if (DefTy == LLT::scalar(1)) {
202     if (!AllowRiskySelect) {
203       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
204       return false;
205     }
206 
207     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
208   }
209 
210   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
211 
212   const RegClassOrRegBank &RegClassOrBank =
213     MRI->getRegClassOrRegBank(DefReg);
214 
215   const TargetRegisterClass *DefRC
216     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
217   if (!DefRC) {
218     if (!DefTy.isValid()) {
219       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
220       return false;
221     }
222 
223     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
224     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
225     if (!DefRC) {
226       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
227       return false;
228     }
229   }
230 
231   // TODO: Verify that all registers have the same bank
232   I.setDesc(TII.get(TargetOpcode::PHI));
233   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
234 }
235 
236 MachineOperand
237 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
238                                            const TargetRegisterClass &SubRC,
239                                            unsigned SubIdx) const {
240 
241   MachineInstr *MI = MO.getParent();
242   MachineBasicBlock *BB = MO.getParent()->getParent();
243   Register DstReg = MRI->createVirtualRegister(&SubRC);
244 
245   if (MO.isReg()) {
246     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
247     Register Reg = MO.getReg();
248     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
249             .addReg(Reg, 0, ComposedSubIdx);
250 
251     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
252                                      MO.isKill(), MO.isDead(), MO.isUndef(),
253                                      MO.isEarlyClobber(), 0, MO.isDebug(),
254                                      MO.isInternalRead());
255   }
256 
257   assert(MO.isImm());
258 
259   APInt Imm(64, MO.getImm());
260 
261   switch (SubIdx) {
262   default:
263     llvm_unreachable("do not know to split immediate with this sub index.");
264   case AMDGPU::sub0:
265     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
266   case AMDGPU::sub1:
267     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
268   }
269 }
270 
271 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
272   switch (Opc) {
273   case AMDGPU::G_AND:
274     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
275   case AMDGPU::G_OR:
276     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
277   case AMDGPU::G_XOR:
278     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
279   default:
280     llvm_unreachable("not a bit op");
281   }
282 }
283 
284 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
285   Register DstReg = I.getOperand(0).getReg();
286   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
287 
288   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
289   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
290       DstRB->getID() != AMDGPU::VCCRegBankID)
291     return false;
292 
293   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
294                             STI.isWave64());
295   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
296 
297   // Dead implicit-def of scc
298   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
299                                          true, // isImp
300                                          false, // isKill
301                                          true)); // isDead
302   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
303 }
304 
305 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
306   MachineBasicBlock *BB = I.getParent();
307   MachineFunction *MF = BB->getParent();
308   Register DstReg = I.getOperand(0).getReg();
309   const DebugLoc &DL = I.getDebugLoc();
310   LLT Ty = MRI->getType(DstReg);
311   if (Ty.isVector())
312     return false;
313 
314   unsigned Size = Ty.getSizeInBits();
315   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
316   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
317   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
318 
319   if (Size == 32) {
320     if (IsSALU) {
321       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
322       MachineInstr *Add =
323         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
324         .add(I.getOperand(1))
325         .add(I.getOperand(2));
326       I.eraseFromParent();
327       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
328     }
329 
330     if (STI.hasAddNoCarry()) {
331       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
332       I.setDesc(TII.get(Opc));
333       I.addOperand(*MF, MachineOperand::CreateImm(0));
334       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
335       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
336     }
337 
338     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
339 
340     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
341     MachineInstr *Add
342       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
343       .addDef(UnusedCarry, RegState::Dead)
344       .add(I.getOperand(1))
345       .add(I.getOperand(2))
346       .addImm(0);
347     I.eraseFromParent();
348     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
349   }
350 
351   assert(!Sub && "illegal sub should not reach here");
352 
353   const TargetRegisterClass &RC
354     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
355   const TargetRegisterClass &HalfRC
356     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
357 
358   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
359   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
360   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
361   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
362 
363   Register DstLo = MRI->createVirtualRegister(&HalfRC);
364   Register DstHi = MRI->createVirtualRegister(&HalfRC);
365 
366   if (IsSALU) {
367     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
368       .add(Lo1)
369       .add(Lo2);
370     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
371       .add(Hi1)
372       .add(Hi2);
373   } else {
374     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
375     Register CarryReg = MRI->createVirtualRegister(CarryRC);
376     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
377       .addDef(CarryReg)
378       .add(Lo1)
379       .add(Lo2)
380       .addImm(0);
381     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
382       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
383       .add(Hi1)
384       .add(Hi2)
385       .addReg(CarryReg, RegState::Kill)
386       .addImm(0);
387 
388     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
389       return false;
390   }
391 
392   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
393     .addReg(DstLo)
394     .addImm(AMDGPU::sub0)
395     .addReg(DstHi)
396     .addImm(AMDGPU::sub1);
397 
398 
399   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
400     return false;
401 
402   I.eraseFromParent();
403   return true;
404 }
405 
406 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
407   MachineInstr &I) const {
408   MachineBasicBlock *BB = I.getParent();
409   MachineFunction *MF = BB->getParent();
410   const DebugLoc &DL = I.getDebugLoc();
411   Register Dst0Reg = I.getOperand(0).getReg();
412   Register Dst1Reg = I.getOperand(1).getReg();
413   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
414                      I.getOpcode() == AMDGPU::G_UADDE;
415   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
416                           I.getOpcode() == AMDGPU::G_USUBE;
417 
418   if (isVCC(Dst1Reg, *MRI)) {
419     unsigned NoCarryOpc =
420         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
421     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
422     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
423     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
424     I.addOperand(*MF, MachineOperand::CreateImm(0));
425     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
426   }
427 
428   Register Src0Reg = I.getOperand(2).getReg();
429   Register Src1Reg = I.getOperand(3).getReg();
430 
431   if (HasCarryIn) {
432     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
433       .addReg(I.getOperand(4).getReg());
434   }
435 
436   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
437   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
438 
439   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
440     .add(I.getOperand(2))
441     .add(I.getOperand(3));
442   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
443     .addReg(AMDGPU::SCC);
444 
445   if (!MRI->getRegClassOrNull(Dst1Reg))
446     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
447 
448   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
450       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
451     return false;
452 
453   if (HasCarryIn &&
454       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
455                                     AMDGPU::SReg_32RegClass, *MRI))
456     return false;
457 
458   I.eraseFromParent();
459   return true;
460 }
461 
462 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
463     MachineInstr &I) const {
464   MachineBasicBlock *BB = I.getParent();
465   MachineFunction *MF = BB->getParent();
466   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
467 
468   unsigned Opc;
469   if (Subtarget->hasMADIntraFwdBug())
470     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
471                      : AMDGPU::V_MAD_I64_I32_gfx11_e64;
472   else
473     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
474   I.setDesc(TII.get(Opc));
475   I.addOperand(*MF, MachineOperand::CreateImm(0));
476   I.addImplicitDefUseOperands(*MF);
477   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
478 }
479 
480 // TODO: We should probably legalize these to only using 32-bit results.
481 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
482   MachineBasicBlock *BB = I.getParent();
483   Register DstReg = I.getOperand(0).getReg();
484   Register SrcReg = I.getOperand(1).getReg();
485   LLT DstTy = MRI->getType(DstReg);
486   LLT SrcTy = MRI->getType(SrcReg);
487   const unsigned SrcSize = SrcTy.getSizeInBits();
488   unsigned DstSize = DstTy.getSizeInBits();
489 
490   // TODO: Should handle any multiple of 32 offset.
491   unsigned Offset = I.getOperand(2).getImm();
492   if (Offset % 32 != 0 || DstSize > 128)
493     return false;
494 
495   // 16-bit operations really use 32-bit registers.
496   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
497   if (DstSize == 16)
498     DstSize = 32;
499 
500   const TargetRegisterClass *DstRC =
501     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
502   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
503     return false;
504 
505   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
506   const TargetRegisterClass *SrcRC =
507       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
508   if (!SrcRC)
509     return false;
510   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
511                                                          DstSize / 32);
512   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
513   if (!SrcRC)
514     return false;
515 
516   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
517                                     *SrcRC, I.getOperand(1));
518   const DebugLoc &DL = I.getDebugLoc();
519   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
520     .addReg(SrcReg, 0, SubReg);
521 
522   I.eraseFromParent();
523   return true;
524 }
525 
526 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
527   MachineBasicBlock *BB = MI.getParent();
528   Register DstReg = MI.getOperand(0).getReg();
529   LLT DstTy = MRI->getType(DstReg);
530   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
531 
532   const unsigned SrcSize = SrcTy.getSizeInBits();
533   if (SrcSize < 32)
534     return selectImpl(MI, *CoverageInfo);
535 
536   const DebugLoc &DL = MI.getDebugLoc();
537   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
538   const unsigned DstSize = DstTy.getSizeInBits();
539   const TargetRegisterClass *DstRC =
540       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
541   if (!DstRC)
542     return false;
543 
544   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
545   MachineInstrBuilder MIB =
546     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
547   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
548     MachineOperand &Src = MI.getOperand(I + 1);
549     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
550     MIB.addImm(SubRegs[I]);
551 
552     const TargetRegisterClass *SrcRC
553       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
554     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
555       return false;
556   }
557 
558   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
559     return false;
560 
561   MI.eraseFromParent();
562   return true;
563 }
564 
565 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
566   MachineBasicBlock *BB = MI.getParent();
567   const int NumDst = MI.getNumOperands() - 1;
568 
569   MachineOperand &Src = MI.getOperand(NumDst);
570 
571   Register SrcReg = Src.getReg();
572   Register DstReg0 = MI.getOperand(0).getReg();
573   LLT DstTy = MRI->getType(DstReg0);
574   LLT SrcTy = MRI->getType(SrcReg);
575 
576   const unsigned DstSize = DstTy.getSizeInBits();
577   const unsigned SrcSize = SrcTy.getSizeInBits();
578   const DebugLoc &DL = MI.getDebugLoc();
579   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
580 
581   const TargetRegisterClass *SrcRC =
582       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
583   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
584     return false;
585 
586   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
587   // source, and this relies on the fact that the same subregister indices are
588   // used for both.
589   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
590   for (int I = 0, E = NumDst; I != E; ++I) {
591     MachineOperand &Dst = MI.getOperand(I);
592     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
593       .addReg(SrcReg, 0, SubRegs[I]);
594 
595     // Make sure the subregister index is valid for the source register.
596     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
597     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
598       return false;
599 
600     const TargetRegisterClass *DstRC =
601       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
602     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
603       return false;
604   }
605 
606   MI.eraseFromParent();
607   return true;
608 }
609 
610 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
611   assert(MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC ||
612          MI.getOpcode() == AMDGPU::G_BUILD_VECTOR);
613 
614   Register Src0 = MI.getOperand(1).getReg();
615   Register Src1 = MI.getOperand(2).getReg();
616   LLT SrcTy = MRI->getType(Src0);
617   const unsigned SrcSize = SrcTy.getSizeInBits();
618 
619   // BUILD_VECTOR with >=32 bits source is handled by MERGE_VALUE.
620   if (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR && SrcSize >= 32) {
621     return selectG_MERGE_VALUES(MI);
622   }
623 
624   // Selection logic below is for V2S16 only.
625   // For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32.
626   Register Dst = MI.getOperand(0).getReg();
627   if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) ||
628       (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC &&
629        SrcTy != LLT::scalar(32)))
630     return selectImpl(MI, *CoverageInfo);
631 
632   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
633   if (DstBank->getID() == AMDGPU::AGPRRegBankID)
634     return false;
635 
636   assert(DstBank->getID() == AMDGPU::SGPRRegBankID ||
637          DstBank->getID() == AMDGPU::VGPRRegBankID);
638   const bool IsVector = DstBank->getID() == AMDGPU::VGPRRegBankID;
639 
640   const DebugLoc &DL = MI.getDebugLoc();
641   MachineBasicBlock *BB = MI.getParent();
642 
643   // First, before trying TableGen patterns, check if both sources are
644   // constants. In those cases, we can trivially compute the final constant
645   // and emit a simple move.
646   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
647   if (ConstSrc1) {
648     auto ConstSrc0 =
649         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
650     if (ConstSrc0) {
651       const int64_t K0 = ConstSrc0->Value.getSExtValue();
652       const int64_t K1 = ConstSrc1->Value.getSExtValue();
653       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
654       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
655       uint32_t Imm = Lo16 | (Hi16 << 16);
656 
657       // VALU
658       if (IsVector) {
659         BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), Dst).addImm(Imm);
660         MI.eraseFromParent();
661         return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI);
662       }
663 
664       // SALU
665       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst).addImm(Imm);
666       MI.eraseFromParent();
667       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
668     }
669   }
670 
671   // Now try TableGen patterns.
672   if (selectImpl(MI, *CoverageInfo))
673     return true;
674 
675   // TODO: This should probably be a combine somewhere
676   // (build_vector $src0, undef)  -> copy $src0
677   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
678   if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
679     MI.setDesc(TII.get(AMDGPU::COPY));
680     MI.removeOperand(2);
681     const auto &RC =
682         IsVector ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
683     return RBI.constrainGenericRegister(Dst, RC, *MRI) &&
684            RBI.constrainGenericRegister(Src0, RC, *MRI);
685   }
686 
687   // TODO: Can be improved?
688   if (IsVector) {
689     Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
690     auto MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
691                    .addImm(0xFFFF)
692                    .addReg(Src0);
693     if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
694       return false;
695 
696     MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), Dst)
697               .addReg(Src1)
698               .addImm(16)
699               .addReg(TmpReg);
700     if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
701       return false;
702 
703     MI.eraseFromParent();
704     return true;
705   }
706 
707   Register ShiftSrc0;
708   Register ShiftSrc1;
709 
710   // With multiple uses of the shift, this will duplicate the shift and
711   // increase register pressure.
712   //
713   // (build_vector (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
714   //  => (S_PACK_HH_B32_B16 $src0, $src1)
715   // (build_vector (lshr_oneuse SReg_32:$src0, 16), $src1)
716   //  => (S_PACK_HL_B32_B16 $src0, $src1)
717   // (build_vector $src0, (lshr_oneuse SReg_32:$src1, 16))
718   //  => (S_PACK_LH_B32_B16 $src0, $src1)
719   // (build_vector $src0, $src1)
720   //  => (S_PACK_LL_B32_B16 $src0, $src1)
721 
722   bool Shift0 = mi_match(
723       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
724 
725   bool Shift1 = mi_match(
726       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
727 
728   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
729   if (Shift0 && Shift1) {
730     Opc = AMDGPU::S_PACK_HH_B32_B16;
731     MI.getOperand(1).setReg(ShiftSrc0);
732     MI.getOperand(2).setReg(ShiftSrc1);
733   } else if (Shift1) {
734     Opc = AMDGPU::S_PACK_LH_B32_B16;
735     MI.getOperand(2).setReg(ShiftSrc1);
736   } else if (Shift0) {
737     auto ConstSrc1 =
738         getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
739     if (ConstSrc1 && ConstSrc1->Value == 0) {
740       // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
741       auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
742                      .addReg(ShiftSrc0)
743                      .addImm(16);
744 
745       MI.eraseFromParent();
746       return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
747     }
748     if (STI.hasSPackHL()) {
749       Opc = AMDGPU::S_PACK_HL_B32_B16;
750       MI.getOperand(1).setReg(ShiftSrc0);
751     }
752   }
753 
754   MI.setDesc(TII.get(Opc));
755   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
756 }
757 
758 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
759   return selectG_ADD_SUB(I);
760 }
761 
762 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
763   const MachineOperand &MO = I.getOperand(0);
764 
765   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
766   // regbank check here is to know why getConstrainedRegClassForOperand failed.
767   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
768   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
769       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
770     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
771     return true;
772   }
773 
774   return false;
775 }
776 
777 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
778   MachineBasicBlock *BB = I.getParent();
779 
780   Register DstReg = I.getOperand(0).getReg();
781   Register Src0Reg = I.getOperand(1).getReg();
782   Register Src1Reg = I.getOperand(2).getReg();
783   LLT Src1Ty = MRI->getType(Src1Reg);
784 
785   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
786   unsigned InsSize = Src1Ty.getSizeInBits();
787 
788   int64_t Offset = I.getOperand(3).getImm();
789 
790   // FIXME: These cases should have been illegal and unnecessary to check here.
791   if (Offset % 32 != 0 || InsSize % 32 != 0)
792     return false;
793 
794   // Currently not handled by getSubRegFromChannel.
795   if (InsSize > 128)
796     return false;
797 
798   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
799   if (SubReg == AMDGPU::NoSubRegister)
800     return false;
801 
802   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
803   const TargetRegisterClass *DstRC =
804       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
805   if (!DstRC)
806     return false;
807 
808   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
809   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
810   const TargetRegisterClass *Src0RC =
811       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
812   const TargetRegisterClass *Src1RC =
813       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
814 
815   // Deal with weird cases where the class only partially supports the subreg
816   // index.
817   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
818   if (!Src0RC || !Src1RC)
819     return false;
820 
821   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
822       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
823       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
824     return false;
825 
826   const DebugLoc &DL = I.getDebugLoc();
827   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
828     .addReg(Src0Reg)
829     .addReg(Src1Reg)
830     .addImm(SubReg);
831 
832   I.eraseFromParent();
833   return true;
834 }
835 
836 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
837   Register DstReg = MI.getOperand(0).getReg();
838   Register SrcReg = MI.getOperand(1).getReg();
839   Register OffsetReg = MI.getOperand(2).getReg();
840   Register WidthReg = MI.getOperand(3).getReg();
841 
842   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
843          "scalar BFX instructions are expanded in regbankselect");
844   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
845          "64-bit vector BFX instructions are expanded in regbankselect");
846 
847   const DebugLoc &DL = MI.getDebugLoc();
848   MachineBasicBlock *MBB = MI.getParent();
849 
850   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
851   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
852   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
853                  .addReg(SrcReg)
854                  .addReg(OffsetReg)
855                  .addReg(WidthReg);
856   MI.eraseFromParent();
857   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
858 }
859 
860 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
861   if (STI.getLDSBankCount() != 16)
862     return selectImpl(MI, *CoverageInfo);
863 
864   Register Dst = MI.getOperand(0).getReg();
865   Register Src0 = MI.getOperand(2).getReg();
866   Register M0Val = MI.getOperand(6).getReg();
867   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
868       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
869       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
870     return false;
871 
872   // This requires 2 instructions. It is possible to write a pattern to support
873   // this, but the generated isel emitter doesn't correctly deal with multiple
874   // output instructions using the same physical register input. The copy to m0
875   // is incorrectly placed before the second instruction.
876   //
877   // TODO: Match source modifiers.
878 
879   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
880   const DebugLoc &DL = MI.getDebugLoc();
881   MachineBasicBlock *MBB = MI.getParent();
882 
883   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
884     .addReg(M0Val);
885   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
886     .addImm(2)
887     .addImm(MI.getOperand(4).getImm())  // $attr
888     .addImm(MI.getOperand(3).getImm()); // $attrchan
889 
890   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
891     .addImm(0)                          // $src0_modifiers
892     .addReg(Src0)                       // $src0
893     .addImm(MI.getOperand(4).getImm())  // $attr
894     .addImm(MI.getOperand(3).getImm())  // $attrchan
895     .addImm(0)                          // $src2_modifiers
896     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
897     .addImm(MI.getOperand(5).getImm())  // $high
898     .addImm(0)                          // $clamp
899     .addImm(0);                         // $omod
900 
901   MI.eraseFromParent();
902   return true;
903 }
904 
905 // Writelane is special in that it can use SGPR and M0 (which would normally
906 // count as using the constant bus twice - but in this case it is allowed since
907 // the lane selector doesn't count as a use of the constant bus). However, it is
908 // still required to abide by the 1 SGPR rule. Fix this up if we might have
909 // multiple SGPRs.
910 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
911   // With a constant bus limit of at least 2, there's no issue.
912   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
913     return selectImpl(MI, *CoverageInfo);
914 
915   MachineBasicBlock *MBB = MI.getParent();
916   const DebugLoc &DL = MI.getDebugLoc();
917   Register VDst = MI.getOperand(0).getReg();
918   Register Val = MI.getOperand(2).getReg();
919   Register LaneSelect = MI.getOperand(3).getReg();
920   Register VDstIn = MI.getOperand(4).getReg();
921 
922   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
923 
924   std::optional<ValueAndVReg> ConstSelect =
925       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
926   if (ConstSelect) {
927     // The selector has to be an inline immediate, so we can use whatever for
928     // the other operands.
929     MIB.addReg(Val);
930     MIB.addImm(ConstSelect->Value.getSExtValue() &
931                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
932   } else {
933     std::optional<ValueAndVReg> ConstVal =
934         getIConstantVRegValWithLookThrough(Val, *MRI);
935 
936     // If the value written is an inline immediate, we can get away without a
937     // copy to m0.
938     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
939                                                  STI.hasInv2PiInlineImm())) {
940       MIB.addImm(ConstVal->Value.getSExtValue());
941       MIB.addReg(LaneSelect);
942     } else {
943       MIB.addReg(Val);
944 
945       // If the lane selector was originally in a VGPR and copied with
946       // readfirstlane, there's a hazard to read the same SGPR from the
947       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
948       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
949 
950       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
951         .addReg(LaneSelect);
952       MIB.addReg(AMDGPU::M0);
953     }
954   }
955 
956   MIB.addReg(VDstIn);
957 
958   MI.eraseFromParent();
959   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
960 }
961 
962 // We need to handle this here because tablegen doesn't support matching
963 // instructions with multiple outputs.
964 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
965   Register Dst0 = MI.getOperand(0).getReg();
966   Register Dst1 = MI.getOperand(1).getReg();
967 
968   LLT Ty = MRI->getType(Dst0);
969   unsigned Opc;
970   if (Ty == LLT::scalar(32))
971     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
972   else if (Ty == LLT::scalar(64))
973     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
974   else
975     return false;
976 
977   // TODO: Match source modifiers.
978 
979   const DebugLoc &DL = MI.getDebugLoc();
980   MachineBasicBlock *MBB = MI.getParent();
981 
982   Register Numer = MI.getOperand(3).getReg();
983   Register Denom = MI.getOperand(4).getReg();
984   unsigned ChooseDenom = MI.getOperand(5).getImm();
985 
986   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
987 
988   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
989     .addDef(Dst1)
990     .addImm(0)     // $src0_modifiers
991     .addUse(Src0)  // $src0
992     .addImm(0)     // $src1_modifiers
993     .addUse(Denom) // $src1
994     .addImm(0)     // $src2_modifiers
995     .addUse(Numer) // $src2
996     .addImm(0)     // $clamp
997     .addImm(0);    // $omod
998 
999   MI.eraseFromParent();
1000   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1001 }
1002 
1003 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
1004   unsigned IntrinsicID = I.getIntrinsicID();
1005   switch (IntrinsicID) {
1006   case Intrinsic::amdgcn_if_break: {
1007     MachineBasicBlock *BB = I.getParent();
1008 
1009     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1010     // SelectionDAG uses for wave32 vs wave64.
1011     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
1012       .add(I.getOperand(0))
1013       .add(I.getOperand(2))
1014       .add(I.getOperand(3));
1015 
1016     Register DstReg = I.getOperand(0).getReg();
1017     Register Src0Reg = I.getOperand(2).getReg();
1018     Register Src1Reg = I.getOperand(3).getReg();
1019 
1020     I.eraseFromParent();
1021 
1022     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
1023       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1024 
1025     return true;
1026   }
1027   case Intrinsic::amdgcn_interp_p1_f16:
1028     return selectInterpP1F16(I);
1029   case Intrinsic::amdgcn_wqm:
1030     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
1031   case Intrinsic::amdgcn_softwqm:
1032     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
1033   case Intrinsic::amdgcn_strict_wwm:
1034   case Intrinsic::amdgcn_wwm:
1035     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
1036   case Intrinsic::amdgcn_strict_wqm:
1037     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
1038   case Intrinsic::amdgcn_writelane:
1039     return selectWritelane(I);
1040   case Intrinsic::amdgcn_div_scale:
1041     return selectDivScale(I);
1042   case Intrinsic::amdgcn_icmp:
1043   case Intrinsic::amdgcn_fcmp:
1044     if (selectImpl(I, *CoverageInfo))
1045       return true;
1046     return selectIntrinsicCmp(I);
1047   case Intrinsic::amdgcn_ballot:
1048     return selectBallot(I);
1049   case Intrinsic::amdgcn_inverse_ballot:
1050     return selectInverseBallot(I);
1051   case Intrinsic::amdgcn_reloc_constant:
1052     return selectRelocConstant(I);
1053   case Intrinsic::amdgcn_groupstaticsize:
1054     return selectGroupStaticSize(I);
1055   case Intrinsic::returnaddress:
1056     return selectReturnAddress(I);
1057   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1058   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1059   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1060   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1061   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1062   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1063   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
1064   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
1065   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
1066   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
1067   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
1068   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
1069   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
1070   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
1071     return selectSMFMACIntrin(I);
1072   default:
1073     return selectImpl(I, *CoverageInfo);
1074   }
1075 }
1076 
1077 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size,
1078                           const GCNSubtarget &ST) {
1079   if (Size != 16 && Size != 32 && Size != 64)
1080     return -1;
1081 
1082   if (Size == 16 && !ST.has16BitInsts())
1083     return -1;
1084 
1085   const auto Select = [&](unsigned S16Opc, unsigned TrueS16Opc, unsigned S32Opc,
1086                           unsigned S64Opc) {
1087     if (Size == 16)
1088       return ST.hasTrue16BitInsts() ? TrueS16Opc : S16Opc;
1089     if (Size == 32)
1090       return S32Opc;
1091     return S64Opc;
1092   };
1093 
1094   switch (P) {
1095   default:
1096     llvm_unreachable("Unknown condition code!");
1097   case CmpInst::ICMP_NE:
1098     return Select(AMDGPU::V_CMP_NE_U16_e64, AMDGPU::V_CMP_NE_U16_t16_e64,
1099                   AMDGPU::V_CMP_NE_U32_e64, AMDGPU::V_CMP_NE_U64_e64);
1100   case CmpInst::ICMP_EQ:
1101     return Select(AMDGPU::V_CMP_EQ_U16_e64, AMDGPU::V_CMP_EQ_U16_t16_e64,
1102                   AMDGPU::V_CMP_EQ_U32_e64, AMDGPU::V_CMP_EQ_U64_e64);
1103   case CmpInst::ICMP_SGT:
1104     return Select(AMDGPU::V_CMP_GT_I16_e64, AMDGPU::V_CMP_GT_I16_t16_e64,
1105                   AMDGPU::V_CMP_GT_I32_e64, AMDGPU::V_CMP_GT_I64_e64);
1106   case CmpInst::ICMP_SGE:
1107     return Select(AMDGPU::V_CMP_GE_I16_e64, AMDGPU::V_CMP_GE_I16_t16_e64,
1108                   AMDGPU::V_CMP_GE_I32_e64, AMDGPU::V_CMP_GE_I64_e64);
1109   case CmpInst::ICMP_SLT:
1110     return Select(AMDGPU::V_CMP_LT_I16_e64, AMDGPU::V_CMP_LT_I16_t16_e64,
1111                   AMDGPU::V_CMP_LT_I32_e64, AMDGPU::V_CMP_LT_I64_e64);
1112   case CmpInst::ICMP_SLE:
1113     return Select(AMDGPU::V_CMP_LE_I16_e64, AMDGPU::V_CMP_LE_I16_t16_e64,
1114                   AMDGPU::V_CMP_LE_I32_e64, AMDGPU::V_CMP_LE_I64_e64);
1115   case CmpInst::ICMP_UGT:
1116     return Select(AMDGPU::V_CMP_GT_U16_e64, AMDGPU::V_CMP_GT_U16_t16_e64,
1117                   AMDGPU::V_CMP_GT_U32_e64, AMDGPU::V_CMP_GT_U64_e64);
1118   case CmpInst::ICMP_UGE:
1119     return Select(AMDGPU::V_CMP_GE_U16_e64, AMDGPU::V_CMP_GE_U16_t16_e64,
1120                   AMDGPU::V_CMP_GE_U32_e64, AMDGPU::V_CMP_GE_U64_e64);
1121   case CmpInst::ICMP_ULT:
1122     return Select(AMDGPU::V_CMP_LT_U16_e64, AMDGPU::V_CMP_LT_U16_t16_e64,
1123                   AMDGPU::V_CMP_LT_U32_e64, AMDGPU::V_CMP_LT_U64_e64);
1124   case CmpInst::ICMP_ULE:
1125     return Select(AMDGPU::V_CMP_LE_U16_e64, AMDGPU::V_CMP_LE_U16_t16_e64,
1126                   AMDGPU::V_CMP_LE_U32_e64, AMDGPU::V_CMP_LE_U64_e64);
1127 
1128   case CmpInst::FCMP_OEQ:
1129     return Select(AMDGPU::V_CMP_EQ_F16_e64, AMDGPU::V_CMP_EQ_F16_t16_e64,
1130                   AMDGPU::V_CMP_EQ_F32_e64, AMDGPU::V_CMP_EQ_F64_e64);
1131   case CmpInst::FCMP_OGT:
1132     return Select(AMDGPU::V_CMP_GT_F16_e64, AMDGPU::V_CMP_GT_F16_t16_e64,
1133                   AMDGPU::V_CMP_GT_F32_e64, AMDGPU::V_CMP_GT_F64_e64);
1134   case CmpInst::FCMP_OGE:
1135     return Select(AMDGPU::V_CMP_GE_F16_e64, AMDGPU::V_CMP_GE_F16_t16_e64,
1136                   AMDGPU::V_CMP_GE_F32_e64, AMDGPU::V_CMP_GE_F64_e64);
1137   case CmpInst::FCMP_OLT:
1138     return Select(AMDGPU::V_CMP_LT_F16_e64, AMDGPU::V_CMP_LT_F16_t16_e64,
1139                   AMDGPU::V_CMP_LT_F32_e64, AMDGPU::V_CMP_LT_F64_e64);
1140   case CmpInst::FCMP_OLE:
1141     return Select(AMDGPU::V_CMP_LE_F16_e64, AMDGPU::V_CMP_LE_F16_t16_e64,
1142                   AMDGPU::V_CMP_LE_F32_e64, AMDGPU::V_CMP_LE_F64_e64);
1143   case CmpInst::FCMP_ONE:
1144     return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1145                   AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1146   case CmpInst::FCMP_ORD:
1147     return Select(AMDGPU::V_CMP_O_F16_e64, AMDGPU::V_CMP_O_F16_t16_e64,
1148                   AMDGPU::V_CMP_O_F32_e64, AMDGPU::V_CMP_O_F64_e64);
1149   case CmpInst::FCMP_UNO:
1150     return Select(AMDGPU::V_CMP_U_F16_e64, AMDGPU::V_CMP_U_F16_t16_e64,
1151                   AMDGPU::V_CMP_U_F32_e64, AMDGPU::V_CMP_U_F64_e64);
1152   case CmpInst::FCMP_UEQ:
1153     return Select(AMDGPU::V_CMP_NLG_F16_e64, AMDGPU::V_CMP_NLG_F16_t16_e64,
1154                   AMDGPU::V_CMP_NLG_F32_e64, AMDGPU::V_CMP_NLG_F64_e64);
1155   case CmpInst::FCMP_UGT:
1156     return Select(AMDGPU::V_CMP_NLE_F16_e64, AMDGPU::V_CMP_NLE_F16_t16_e64,
1157                   AMDGPU::V_CMP_NLE_F32_e64, AMDGPU::V_CMP_NLE_F64_e64);
1158   case CmpInst::FCMP_UGE:
1159     return Select(AMDGPU::V_CMP_NLT_F16_e64, AMDGPU::V_CMP_NLT_F16_t16_e64,
1160                   AMDGPU::V_CMP_NLT_F32_e64, AMDGPU::V_CMP_NLT_F64_e64);
1161   case CmpInst::FCMP_ULT:
1162     return Select(AMDGPU::V_CMP_NGE_F16_e64, AMDGPU::V_CMP_NGE_F16_t16_e64,
1163                   AMDGPU::V_CMP_NGE_F32_e64, AMDGPU::V_CMP_NGE_F64_e64);
1164   case CmpInst::FCMP_ULE:
1165     return Select(AMDGPU::V_CMP_NGT_F16_e64, AMDGPU::V_CMP_NGT_F16_t16_e64,
1166                   AMDGPU::V_CMP_NGT_F32_e64, AMDGPU::V_CMP_NGT_F64_e64);
1167   case CmpInst::FCMP_UNE:
1168     return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1169                   AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1170   case CmpInst::FCMP_TRUE:
1171     return Select(AMDGPU::V_CMP_TRU_F16_e64, AMDGPU::V_CMP_TRU_F16_t16_e64,
1172                   AMDGPU::V_CMP_TRU_F32_e64, AMDGPU::V_CMP_TRU_F64_e64);
1173   case CmpInst::FCMP_FALSE:
1174     return Select(AMDGPU::V_CMP_F_F16_e64, AMDGPU::V_CMP_F_F16_t16_e64,
1175                   AMDGPU::V_CMP_F_F32_e64, AMDGPU::V_CMP_F_F64_e64);
1176   }
1177 }
1178 
1179 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1180                                               unsigned Size) const {
1181   if (Size == 64) {
1182     if (!STI.hasScalarCompareEq64())
1183       return -1;
1184 
1185     switch (P) {
1186     case CmpInst::ICMP_NE:
1187       return AMDGPU::S_CMP_LG_U64;
1188     case CmpInst::ICMP_EQ:
1189       return AMDGPU::S_CMP_EQ_U64;
1190     default:
1191       return -1;
1192     }
1193   }
1194 
1195   if (Size != 32)
1196     return -1;
1197 
1198   switch (P) {
1199   case CmpInst::ICMP_NE:
1200     return AMDGPU::S_CMP_LG_U32;
1201   case CmpInst::ICMP_EQ:
1202     return AMDGPU::S_CMP_EQ_U32;
1203   case CmpInst::ICMP_SGT:
1204     return AMDGPU::S_CMP_GT_I32;
1205   case CmpInst::ICMP_SGE:
1206     return AMDGPU::S_CMP_GE_I32;
1207   case CmpInst::ICMP_SLT:
1208     return AMDGPU::S_CMP_LT_I32;
1209   case CmpInst::ICMP_SLE:
1210     return AMDGPU::S_CMP_LE_I32;
1211   case CmpInst::ICMP_UGT:
1212     return AMDGPU::S_CMP_GT_U32;
1213   case CmpInst::ICMP_UGE:
1214     return AMDGPU::S_CMP_GE_U32;
1215   case CmpInst::ICMP_ULT:
1216     return AMDGPU::S_CMP_LT_U32;
1217   case CmpInst::ICMP_ULE:
1218     return AMDGPU::S_CMP_LE_U32;
1219   default:
1220     llvm_unreachable("Unknown condition code!");
1221   }
1222 }
1223 
1224 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1225   MachineBasicBlock *BB = I.getParent();
1226   const DebugLoc &DL = I.getDebugLoc();
1227 
1228   Register SrcReg = I.getOperand(2).getReg();
1229   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1230 
1231   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1232 
1233   Register CCReg = I.getOperand(0).getReg();
1234   if (!isVCC(CCReg, *MRI)) {
1235     int Opcode = getS_CMPOpcode(Pred, Size);
1236     if (Opcode == -1)
1237       return false;
1238     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1239             .add(I.getOperand(2))
1240             .add(I.getOperand(3));
1241     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1242       .addReg(AMDGPU::SCC);
1243     bool Ret =
1244         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1245         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1246     I.eraseFromParent();
1247     return Ret;
1248   }
1249 
1250   int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1251   if (Opcode == -1)
1252     return false;
1253 
1254   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1255             I.getOperand(0).getReg())
1256             .add(I.getOperand(2))
1257             .add(I.getOperand(3));
1258   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1259                                *TRI.getBoolRC(), *MRI);
1260   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1261   I.eraseFromParent();
1262   return Ret;
1263 }
1264 
1265 bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const {
1266   Register Dst = I.getOperand(0).getReg();
1267   if (isVCC(Dst, *MRI))
1268     return false;
1269 
1270   LLT DstTy = MRI->getType(Dst);
1271   if (DstTy.getSizeInBits() != STI.getWavefrontSize())
1272     return false;
1273 
1274   MachineBasicBlock *BB = I.getParent();
1275   const DebugLoc &DL = I.getDebugLoc();
1276   Register SrcReg = I.getOperand(2).getReg();
1277   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1278 
1279   // i1 inputs are not supported in GlobalISel.
1280   if (Size == 1)
1281     return false;
1282 
1283   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1284   if (!CmpInst::isIntPredicate(Pred) && !CmpInst::isFPPredicate(Pred)) {
1285     BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1286     I.eraseFromParent();
1287     return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1288   }
1289 
1290   const int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1291   if (Opcode == -1)
1292     return false;
1293 
1294   MachineInstrBuilder SelectedMI;
1295   MachineOperand &LHS = I.getOperand(2);
1296   MachineOperand &RHS = I.getOperand(3);
1297   auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS);
1298   auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS);
1299   Register Src0Reg =
1300       copyToVGPRIfSrcFolded(Src0, Src0Mods, LHS, &I, /*ForceVGPR*/ true);
1301   Register Src1Reg =
1302       copyToVGPRIfSrcFolded(Src1, Src1Mods, RHS, &I, /*ForceVGPR*/ true);
1303   SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst);
1304   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0_modifiers))
1305     SelectedMI.addImm(Src0Mods);
1306   SelectedMI.addReg(Src0Reg);
1307   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1_modifiers))
1308     SelectedMI.addImm(Src1Mods);
1309   SelectedMI.addReg(Src1Reg);
1310   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::clamp))
1311     SelectedMI.addImm(0); // clamp
1312   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel))
1313     SelectedMI.addImm(0); // op_sel
1314 
1315   RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1316   if (!constrainSelectedInstRegOperands(*SelectedMI, TII, TRI, RBI))
1317     return false;
1318 
1319   I.eraseFromParent();
1320   return true;
1321 }
1322 
1323 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1324   MachineBasicBlock *BB = I.getParent();
1325   const DebugLoc &DL = I.getDebugLoc();
1326   Register DstReg = I.getOperand(0).getReg();
1327   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1328   const bool Is64 = Size == 64;
1329   const bool IsWave32 = (STI.getWavefrontSize() == 32);
1330 
1331   // In the common case, the return type matches the wave size.
1332   // However we also support emitting i64 ballots in wave32 mode.
1333   if (Size != STI.getWavefrontSize() && (!Is64 || !IsWave32))
1334     return false;
1335 
1336   std::optional<ValueAndVReg> Arg =
1337       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1338 
1339   const auto BuildCopy = [&](Register SrcReg) {
1340     if (Size == STI.getWavefrontSize()) {
1341       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1342           .addReg(SrcReg);
1343       return;
1344     }
1345 
1346     // If emitting a i64 ballot in wave32, fill the upper bits with zeroes.
1347     Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1348     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg).addImm(0);
1349     BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1350         .addReg(SrcReg)
1351         .addImm(AMDGPU::sub0)
1352         .addReg(HiReg)
1353         .addImm(AMDGPU::sub1);
1354   };
1355 
1356   if (Arg) {
1357     const int64_t Value = Arg->Value.getSExtValue();
1358     if (Value == 0) {
1359       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1360       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1361     } else if (Value == -1) // all ones
1362       BuildCopy(IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC);
1363     else
1364       return false;
1365   } else
1366     BuildCopy(I.getOperand(2).getReg());
1367 
1368   I.eraseFromParent();
1369   return true;
1370 }
1371 
1372 bool AMDGPUInstructionSelector::selectInverseBallot(MachineInstr &I) const {
1373   MachineBasicBlock *BB = I.getParent();
1374   const DebugLoc &DL = I.getDebugLoc();
1375   const Register DstReg = I.getOperand(0).getReg();
1376   const Register MaskReg = I.getOperand(2).getReg();
1377 
1378   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(MaskReg);
1379   I.eraseFromParent();
1380   return true;
1381 }
1382 
1383 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1384   Register DstReg = I.getOperand(0).getReg();
1385   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1386   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1387   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1388     return false;
1389 
1390   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1391 
1392   Module *M = MF->getFunction().getParent();
1393   const MDNode *Metadata = I.getOperand(2).getMetadata();
1394   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1395   auto RelocSymbol = cast<GlobalVariable>(
1396     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1397 
1398   MachineBasicBlock *BB = I.getParent();
1399   BuildMI(*BB, &I, I.getDebugLoc(),
1400           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1401     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1402 
1403   I.eraseFromParent();
1404   return true;
1405 }
1406 
1407 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1408   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1409 
1410   Register DstReg = I.getOperand(0).getReg();
1411   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1412   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1413     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1414 
1415   MachineBasicBlock *MBB = I.getParent();
1416   const DebugLoc &DL = I.getDebugLoc();
1417 
1418   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1419 
1420   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1421     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1422     MIB.addImm(MFI->getLDSSize());
1423   } else {
1424     Module *M = MF->getFunction().getParent();
1425     const GlobalValue *GV
1426       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1427     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1428   }
1429 
1430   I.eraseFromParent();
1431   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1432 }
1433 
1434 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1435   MachineBasicBlock *MBB = I.getParent();
1436   MachineFunction &MF = *MBB->getParent();
1437   const DebugLoc &DL = I.getDebugLoc();
1438 
1439   MachineOperand &Dst = I.getOperand(0);
1440   Register DstReg = Dst.getReg();
1441   unsigned Depth = I.getOperand(2).getImm();
1442 
1443   const TargetRegisterClass *RC
1444     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1445   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1446       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1447     return false;
1448 
1449   // Check for kernel and shader functions
1450   if (Depth != 0 ||
1451       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1452     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1453       .addImm(0);
1454     I.eraseFromParent();
1455     return true;
1456   }
1457 
1458   MachineFrameInfo &MFI = MF.getFrameInfo();
1459   // There is a call to @llvm.returnaddress in this function
1460   MFI.setReturnAddressIsTaken(true);
1461 
1462   // Get the return address reg and mark it as an implicit live-in
1463   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1464   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1465                                              AMDGPU::SReg_64RegClass, DL);
1466   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1467     .addReg(LiveIn);
1468   I.eraseFromParent();
1469   return true;
1470 }
1471 
1472 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1473   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1474   // SelectionDAG uses for wave32 vs wave64.
1475   MachineBasicBlock *BB = MI.getParent();
1476   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1477       .add(MI.getOperand(1));
1478 
1479   Register Reg = MI.getOperand(1).getReg();
1480   MI.eraseFromParent();
1481 
1482   if (!MRI->getRegClassOrNull(Reg))
1483     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1484   return true;
1485 }
1486 
1487 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1488   MachineInstr &MI, Intrinsic::ID IntrID) const {
1489   MachineBasicBlock *MBB = MI.getParent();
1490   MachineFunction *MF = MBB->getParent();
1491   const DebugLoc &DL = MI.getDebugLoc();
1492 
1493   unsigned IndexOperand = MI.getOperand(7).getImm();
1494   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1495   bool WaveDone = MI.getOperand(9).getImm() != 0;
1496 
1497   if (WaveDone && !WaveRelease)
1498     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1499 
1500   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1501   IndexOperand &= ~0x3f;
1502   unsigned CountDw = 0;
1503 
1504   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1505     CountDw = (IndexOperand >> 24) & 0xf;
1506     IndexOperand &= ~(0xf << 24);
1507 
1508     if (CountDw < 1 || CountDw > 4) {
1509       report_fatal_error(
1510         "ds_ordered_count: dword count must be between 1 and 4");
1511     }
1512   }
1513 
1514   if (IndexOperand)
1515     report_fatal_error("ds_ordered_count: bad index operand");
1516 
1517   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1518   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1519 
1520   unsigned Offset0 = OrderedCountIndex << 2;
1521   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1522 
1523   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1524     Offset1 |= (CountDw - 1) << 6;
1525 
1526   if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1527     Offset1 |= ShaderType << 2;
1528 
1529   unsigned Offset = Offset0 | (Offset1 << 8);
1530 
1531   Register M0Val = MI.getOperand(2).getReg();
1532   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1533     .addReg(M0Val);
1534 
1535   Register DstReg = MI.getOperand(0).getReg();
1536   Register ValReg = MI.getOperand(3).getReg();
1537   MachineInstrBuilder DS =
1538     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1539       .addReg(ValReg)
1540       .addImm(Offset)
1541       .cloneMemRefs(MI);
1542 
1543   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1544     return false;
1545 
1546   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1547   MI.eraseFromParent();
1548   return Ret;
1549 }
1550 
1551 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1552   switch (IntrID) {
1553   case Intrinsic::amdgcn_ds_gws_init:
1554     return AMDGPU::DS_GWS_INIT;
1555   case Intrinsic::amdgcn_ds_gws_barrier:
1556     return AMDGPU::DS_GWS_BARRIER;
1557   case Intrinsic::amdgcn_ds_gws_sema_v:
1558     return AMDGPU::DS_GWS_SEMA_V;
1559   case Intrinsic::amdgcn_ds_gws_sema_br:
1560     return AMDGPU::DS_GWS_SEMA_BR;
1561   case Intrinsic::amdgcn_ds_gws_sema_p:
1562     return AMDGPU::DS_GWS_SEMA_P;
1563   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1564     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1565   default:
1566     llvm_unreachable("not a gws intrinsic");
1567   }
1568 }
1569 
1570 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1571                                                      Intrinsic::ID IID) const {
1572   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1573       !STI.hasGWSSemaReleaseAll())
1574     return false;
1575 
1576   // intrinsic ID, vsrc, offset
1577   const bool HasVSrc = MI.getNumOperands() == 3;
1578   assert(HasVSrc || MI.getNumOperands() == 2);
1579 
1580   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1581   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1582   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1583     return false;
1584 
1585   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1586   unsigned ImmOffset;
1587 
1588   MachineBasicBlock *MBB = MI.getParent();
1589   const DebugLoc &DL = MI.getDebugLoc();
1590 
1591   MachineInstr *Readfirstlane = nullptr;
1592 
1593   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1594   // incoming offset, in case there's an add of a constant. We'll have to put it
1595   // back later.
1596   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1597     Readfirstlane = OffsetDef;
1598     BaseOffset = OffsetDef->getOperand(1).getReg();
1599     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1600   }
1601 
1602   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1603     // If we have a constant offset, try to use the 0 in m0 as the base.
1604     // TODO: Look into changing the default m0 initialization value. If the
1605     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1606     // the immediate offset.
1607 
1608     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1609     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1610       .addImm(0);
1611   } else {
1612     std::tie(BaseOffset, ImmOffset) =
1613         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KB);
1614 
1615     if (Readfirstlane) {
1616       // We have the constant offset now, so put the readfirstlane back on the
1617       // variable component.
1618       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1619         return false;
1620 
1621       Readfirstlane->getOperand(1).setReg(BaseOffset);
1622       BaseOffset = Readfirstlane->getOperand(0).getReg();
1623     } else {
1624       if (!RBI.constrainGenericRegister(BaseOffset,
1625                                         AMDGPU::SReg_32RegClass, *MRI))
1626         return false;
1627     }
1628 
1629     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1630     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1631       .addReg(BaseOffset)
1632       .addImm(16);
1633 
1634     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1635       .addReg(M0Base);
1636   }
1637 
1638   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1639   // offset field) % 64. Some versions of the programming guide omit the m0
1640   // part, or claim it's from offset 0.
1641   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1642 
1643   if (HasVSrc) {
1644     Register VSrc = MI.getOperand(1).getReg();
1645     MIB.addReg(VSrc);
1646 
1647     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1648       return false;
1649   }
1650 
1651   MIB.addImm(ImmOffset)
1652      .cloneMemRefs(MI);
1653 
1654   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1655 
1656   MI.eraseFromParent();
1657   return true;
1658 }
1659 
1660 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1661                                                       bool IsAppend) const {
1662   Register PtrBase = MI.getOperand(2).getReg();
1663   LLT PtrTy = MRI->getType(PtrBase);
1664   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1665 
1666   unsigned Offset;
1667   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1668 
1669   // TODO: Should this try to look through readfirstlane like GWS?
1670   if (!isDSOffsetLegal(PtrBase, Offset)) {
1671     PtrBase = MI.getOperand(2).getReg();
1672     Offset = 0;
1673   }
1674 
1675   MachineBasicBlock *MBB = MI.getParent();
1676   const DebugLoc &DL = MI.getDebugLoc();
1677   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1678 
1679   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1680     .addReg(PtrBase);
1681   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1682     return false;
1683 
1684   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1685     .addImm(Offset)
1686     .addImm(IsGDS ? -1 : 0)
1687     .cloneMemRefs(MI);
1688   MI.eraseFromParent();
1689   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1690 }
1691 
1692 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1693   if (TM.getOptLevel() > CodeGenOpt::None) {
1694     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1695     if (WGSize <= STI.getWavefrontSize()) {
1696       MachineBasicBlock *MBB = MI.getParent();
1697       const DebugLoc &DL = MI.getDebugLoc();
1698       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1699       MI.eraseFromParent();
1700       return true;
1701     }
1702   }
1703   return selectImpl(MI, *CoverageInfo);
1704 }
1705 
1706 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1707                          bool &IsTexFail) {
1708   if (TexFailCtrl)
1709     IsTexFail = true;
1710 
1711   TFE = (TexFailCtrl & 0x1) ? true : false;
1712   TexFailCtrl &= ~(uint64_t)0x1;
1713   LWE = (TexFailCtrl & 0x2) ? true : false;
1714   TexFailCtrl &= ~(uint64_t)0x2;
1715 
1716   return TexFailCtrl == 0;
1717 }
1718 
1719 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1720   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1721   MachineBasicBlock *MBB = MI.getParent();
1722   const DebugLoc &DL = MI.getDebugLoc();
1723 
1724   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1725     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1726 
1727   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1728   unsigned IntrOpcode = Intr->BaseOpcode;
1729   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1730   const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1731 
1732   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1733 
1734   Register VDataIn, VDataOut;
1735   LLT VDataTy;
1736   int NumVDataDwords = -1;
1737   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1738                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1739 
1740   bool Unorm;
1741   if (!BaseOpcode->Sampler)
1742     Unorm = true;
1743   else
1744     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1745 
1746   bool TFE;
1747   bool LWE;
1748   bool IsTexFail = false;
1749   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1750                     TFE, LWE, IsTexFail))
1751     return false;
1752 
1753   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1754   const bool IsA16 = (Flags & 1) != 0;
1755   const bool IsG16 = (Flags & 2) != 0;
1756 
1757   // A16 implies 16 bit gradients if subtarget doesn't support G16
1758   if (IsA16 && !STI.hasG16() && !IsG16)
1759     return false;
1760 
1761   unsigned DMask = 0;
1762   unsigned DMaskLanes = 0;
1763 
1764   if (BaseOpcode->Atomic) {
1765     VDataOut = MI.getOperand(0).getReg();
1766     VDataIn = MI.getOperand(2).getReg();
1767     LLT Ty = MRI->getType(VDataIn);
1768 
1769     // Be careful to allow atomic swap on 16-bit element vectors.
1770     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1771       Ty.getSizeInBits() == 128 :
1772       Ty.getSizeInBits() == 64;
1773 
1774     if (BaseOpcode->AtomicX2) {
1775       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1776 
1777       DMask = Is64Bit ? 0xf : 0x3;
1778       NumVDataDwords = Is64Bit ? 4 : 2;
1779     } else {
1780       DMask = Is64Bit ? 0x3 : 0x1;
1781       NumVDataDwords = Is64Bit ? 2 : 1;
1782     }
1783   } else {
1784     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1785     DMaskLanes = BaseOpcode->Gather4 ? 4 : llvm::popcount(DMask);
1786 
1787     if (BaseOpcode->Store) {
1788       VDataIn = MI.getOperand(1).getReg();
1789       VDataTy = MRI->getType(VDataIn);
1790       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1791     } else {
1792       VDataOut = MI.getOperand(0).getReg();
1793       VDataTy = MRI->getType(VDataOut);
1794       NumVDataDwords = DMaskLanes;
1795 
1796       if (IsD16 && !STI.hasUnpackedD16VMem())
1797         NumVDataDwords = (DMaskLanes + 1) / 2;
1798     }
1799   }
1800 
1801   // Set G16 opcode
1802   if (Subtarget->hasG16() && IsG16) {
1803     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1804         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1805     assert(G16MappingInfo);
1806     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1807   }
1808 
1809   // TODO: Check this in verifier.
1810   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1811 
1812   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1813   if (BaseOpcode->Atomic)
1814     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1815   if (CPol & ~AMDGPU::CPol::ALL)
1816     return false;
1817 
1818   int NumVAddrRegs = 0;
1819   int NumVAddrDwords = 0;
1820   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1821     // Skip the $noregs and 0s inserted during legalization.
1822     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1823     if (!AddrOp.isReg())
1824       continue; // XXX - Break?
1825 
1826     Register Addr = AddrOp.getReg();
1827     if (!Addr)
1828       break;
1829 
1830     ++NumVAddrRegs;
1831     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1832   }
1833 
1834   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1835   // NSA, these should have been packed into a single value in the first
1836   // address register
1837   const bool UseNSA =
1838       NumVAddrRegs != 1 &&
1839       (STI.hasPartialNSAEncoding() ? NumVAddrDwords >= NumVAddrRegs
1840                                    : NumVAddrDwords == NumVAddrRegs);
1841   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1842     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1843     return false;
1844   }
1845 
1846   if (IsTexFail)
1847     ++NumVDataDwords;
1848 
1849   int Opcode = -1;
1850   if (IsGFX11Plus) {
1851     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1852                                    UseNSA ? AMDGPU::MIMGEncGfx11NSA
1853                                           : AMDGPU::MIMGEncGfx11Default,
1854                                    NumVDataDwords, NumVAddrDwords);
1855   } else if (IsGFX10Plus) {
1856     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1857                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1858                                           : AMDGPU::MIMGEncGfx10Default,
1859                                    NumVDataDwords, NumVAddrDwords);
1860   } else {
1861     if (Subtarget->hasGFX90AInsts()) {
1862       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1863                                      NumVDataDwords, NumVAddrDwords);
1864       if (Opcode == -1) {
1865         LLVM_DEBUG(
1866             dbgs()
1867             << "requested image instruction is not supported on this GPU\n");
1868         return false;
1869       }
1870     }
1871     if (Opcode == -1 &&
1872         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1873       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1874                                      NumVDataDwords, NumVAddrDwords);
1875     if (Opcode == -1)
1876       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1877                                      NumVDataDwords, NumVAddrDwords);
1878   }
1879   if (Opcode == -1)
1880     return false;
1881 
1882   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1883     .cloneMemRefs(MI);
1884 
1885   if (VDataOut) {
1886     if (BaseOpcode->AtomicX2) {
1887       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1888 
1889       Register TmpReg = MRI->createVirtualRegister(
1890         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1891       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1892 
1893       MIB.addDef(TmpReg);
1894       if (!MRI->use_empty(VDataOut)) {
1895         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1896             .addReg(TmpReg, RegState::Kill, SubReg);
1897       }
1898 
1899     } else {
1900       MIB.addDef(VDataOut); // vdata output
1901     }
1902   }
1903 
1904   if (VDataIn)
1905     MIB.addReg(VDataIn); // vdata input
1906 
1907   for (int I = 0; I != NumVAddrRegs; ++I) {
1908     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1909     if (SrcOp.isReg()) {
1910       assert(SrcOp.getReg() != 0);
1911       MIB.addReg(SrcOp.getReg());
1912     }
1913   }
1914 
1915   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1916   if (BaseOpcode->Sampler)
1917     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1918 
1919   MIB.addImm(DMask); // dmask
1920 
1921   if (IsGFX10Plus)
1922     MIB.addImm(DimInfo->Encoding);
1923   MIB.addImm(Unorm);
1924 
1925   MIB.addImm(CPol);
1926   MIB.addImm(IsA16 &&  // a16 or r128
1927              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1928   if (IsGFX10Plus)
1929     MIB.addImm(IsA16 ? -1 : 0);
1930 
1931   if (!Subtarget->hasGFX90AInsts()) {
1932     MIB.addImm(TFE); // tfe
1933   } else if (TFE) {
1934     LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1935     return false;
1936   }
1937 
1938   MIB.addImm(LWE); // lwe
1939   if (!IsGFX10Plus)
1940     MIB.addImm(DimInfo->DA ? -1 : 0);
1941   if (BaseOpcode->HasD16)
1942     MIB.addImm(IsD16 ? -1 : 0);
1943 
1944   if (IsTexFail) {
1945     // An image load instruction with TFE/LWE only conditionally writes to its
1946     // result registers. Initialize them to zero so that we always get well
1947     // defined result values.
1948     assert(VDataOut && !VDataIn);
1949     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1950     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1951     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1952       .addImm(0);
1953     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1954     if (STI.usePRTStrictNull()) {
1955       // With enable-prt-strict-null enabled, initialize all result registers to
1956       // zero.
1957       auto RegSeq =
1958           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1959       for (auto Sub : Parts)
1960         RegSeq.addReg(Zero).addImm(Sub);
1961     } else {
1962       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1963       // result register.
1964       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1965       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1966       auto RegSeq =
1967           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1968       for (auto Sub : Parts.drop_back(1))
1969         RegSeq.addReg(Undef).addImm(Sub);
1970       RegSeq.addReg(Zero).addImm(Parts.back());
1971     }
1972     MIB.addReg(Tied, RegState::Implicit);
1973     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1974   }
1975 
1976   MI.eraseFromParent();
1977   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1978   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1979   return true;
1980 }
1981 
1982 // We need to handle this here because tablegen doesn't support matching
1983 // instructions with multiple outputs.
1984 bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
1985     MachineInstr &MI) const {
1986   Register Dst0 = MI.getOperand(0).getReg();
1987   Register Dst1 = MI.getOperand(1).getReg();
1988 
1989   const DebugLoc &DL = MI.getDebugLoc();
1990   MachineBasicBlock *MBB = MI.getParent();
1991 
1992   Register Addr = MI.getOperand(3).getReg();
1993   Register Data0 = MI.getOperand(4).getReg();
1994   Register Data1 = MI.getOperand(5).getReg();
1995   unsigned Offset = MI.getOperand(6).getImm();
1996 
1997   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_BVH_STACK_RTN_B32), Dst0)
1998                  .addDef(Dst1)
1999                  .addUse(Addr)
2000                  .addUse(Data0)
2001                  .addUse(Data1)
2002                  .addImm(Offset)
2003                  .cloneMemRefs(MI);
2004 
2005   MI.eraseFromParent();
2006   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2007 }
2008 
2009 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
2010     MachineInstr &I) const {
2011   unsigned IntrinsicID = I.getIntrinsicID();
2012   switch (IntrinsicID) {
2013   case Intrinsic::amdgcn_end_cf:
2014     return selectEndCfIntrinsic(I);
2015   case Intrinsic::amdgcn_ds_ordered_add:
2016   case Intrinsic::amdgcn_ds_ordered_swap:
2017     return selectDSOrderedIntrinsic(I, IntrinsicID);
2018   case Intrinsic::amdgcn_ds_gws_init:
2019   case Intrinsic::amdgcn_ds_gws_barrier:
2020   case Intrinsic::amdgcn_ds_gws_sema_v:
2021   case Intrinsic::amdgcn_ds_gws_sema_br:
2022   case Intrinsic::amdgcn_ds_gws_sema_p:
2023   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2024     return selectDSGWSIntrinsic(I, IntrinsicID);
2025   case Intrinsic::amdgcn_ds_append:
2026     return selectDSAppendConsume(I, true);
2027   case Intrinsic::amdgcn_ds_consume:
2028     return selectDSAppendConsume(I, false);
2029   case Intrinsic::amdgcn_s_barrier:
2030     return selectSBarrier(I);
2031   case Intrinsic::amdgcn_raw_buffer_load_lds:
2032   case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
2033   case Intrinsic::amdgcn_struct_buffer_load_lds:
2034   case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
2035     return selectBufferLoadLds(I);
2036   case Intrinsic::amdgcn_global_load_lds:
2037     return selectGlobalLoadLds(I);
2038   case Intrinsic::amdgcn_exp_compr:
2039     if (!STI.hasCompressedExport()) {
2040       Function &F = I.getMF()->getFunction();
2041       DiagnosticInfoUnsupported NoFpRet(
2042           F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
2043       F.getContext().diagnose(NoFpRet);
2044       return false;
2045     }
2046     break;
2047   case Intrinsic::amdgcn_ds_bvh_stack_rtn:
2048     return selectDSBvhStackIntrinsic(I);
2049   }
2050   return selectImpl(I, *CoverageInfo);
2051 }
2052 
2053 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
2054   if (selectImpl(I, *CoverageInfo))
2055     return true;
2056 
2057   MachineBasicBlock *BB = I.getParent();
2058   const DebugLoc &DL = I.getDebugLoc();
2059 
2060   Register DstReg = I.getOperand(0).getReg();
2061   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
2062   assert(Size <= 32 || Size == 64);
2063   const MachineOperand &CCOp = I.getOperand(1);
2064   Register CCReg = CCOp.getReg();
2065   if (!isVCC(CCReg, *MRI)) {
2066     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
2067                                          AMDGPU::S_CSELECT_B32;
2068     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
2069             .addReg(CCReg);
2070 
2071     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
2072     // bank, because it does not cover the register class that we used to represent
2073     // for it.  So we need to manually set the register class here.
2074     if (!MRI->getRegClassOrNull(CCReg))
2075         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
2076     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
2077             .add(I.getOperand(2))
2078             .add(I.getOperand(3));
2079 
2080     bool Ret = false;
2081     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2082     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
2083     I.eraseFromParent();
2084     return Ret;
2085   }
2086 
2087   // Wide VGPR select should have been split in RegBankSelect.
2088   if (Size > 32)
2089     return false;
2090 
2091   MachineInstr *Select =
2092       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
2093               .addImm(0)
2094               .add(I.getOperand(3))
2095               .addImm(0)
2096               .add(I.getOperand(2))
2097               .add(I.getOperand(1));
2098 
2099   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2100   I.eraseFromParent();
2101   return Ret;
2102 }
2103 
2104 static int sizeToSubRegIndex(unsigned Size) {
2105   switch (Size) {
2106   case 32:
2107     return AMDGPU::sub0;
2108   case 64:
2109     return AMDGPU::sub0_sub1;
2110   case 96:
2111     return AMDGPU::sub0_sub1_sub2;
2112   case 128:
2113     return AMDGPU::sub0_sub1_sub2_sub3;
2114   case 256:
2115     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
2116   default:
2117     if (Size < 32)
2118       return AMDGPU::sub0;
2119     if (Size > 256)
2120       return -1;
2121     return sizeToSubRegIndex(llvm::bit_ceil(Size));
2122   }
2123 }
2124 
2125 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
2126   Register DstReg = I.getOperand(0).getReg();
2127   Register SrcReg = I.getOperand(1).getReg();
2128   const LLT DstTy = MRI->getType(DstReg);
2129   const LLT SrcTy = MRI->getType(SrcReg);
2130   const LLT S1 = LLT::scalar(1);
2131 
2132   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2133   const RegisterBank *DstRB;
2134   if (DstTy == S1) {
2135     // This is a special case. We don't treat s1 for legalization artifacts as
2136     // vcc booleans.
2137     DstRB = SrcRB;
2138   } else {
2139     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2140     if (SrcRB != DstRB)
2141       return false;
2142   }
2143 
2144   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2145 
2146   unsigned DstSize = DstTy.getSizeInBits();
2147   unsigned SrcSize = SrcTy.getSizeInBits();
2148 
2149   const TargetRegisterClass *SrcRC =
2150       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
2151   const TargetRegisterClass *DstRC =
2152       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
2153   if (!SrcRC || !DstRC)
2154     return false;
2155 
2156   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2157       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
2158     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
2159     return false;
2160   }
2161 
2162   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
2163     MachineBasicBlock *MBB = I.getParent();
2164     const DebugLoc &DL = I.getDebugLoc();
2165 
2166     Register LoReg = MRI->createVirtualRegister(DstRC);
2167     Register HiReg = MRI->createVirtualRegister(DstRC);
2168     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
2169       .addReg(SrcReg, 0, AMDGPU::sub0);
2170     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
2171       .addReg(SrcReg, 0, AMDGPU::sub1);
2172 
2173     if (IsVALU && STI.hasSDWA()) {
2174       // Write the low 16-bits of the high element into the high 16-bits of the
2175       // low element.
2176       MachineInstr *MovSDWA =
2177         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2178         .addImm(0)                             // $src0_modifiers
2179         .addReg(HiReg)                         // $src0
2180         .addImm(0)                             // $clamp
2181         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2182         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2183         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2184         .addReg(LoReg, RegState::Implicit);
2185       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2186     } else {
2187       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
2188       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
2189       Register ImmReg = MRI->createVirtualRegister(DstRC);
2190       if (IsVALU) {
2191         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
2192           .addImm(16)
2193           .addReg(HiReg);
2194       } else {
2195         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
2196           .addReg(HiReg)
2197           .addImm(16);
2198       }
2199 
2200       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2201       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2202       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
2203 
2204       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
2205         .addImm(0xffff);
2206       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
2207         .addReg(LoReg)
2208         .addReg(ImmReg);
2209       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
2210         .addReg(TmpReg0)
2211         .addReg(TmpReg1);
2212     }
2213 
2214     I.eraseFromParent();
2215     return true;
2216   }
2217 
2218   if (!DstTy.isScalar())
2219     return false;
2220 
2221   if (SrcSize > 32) {
2222     int SubRegIdx = sizeToSubRegIndex(DstSize);
2223     if (SubRegIdx == -1)
2224       return false;
2225 
2226     // Deal with weird cases where the class only partially supports the subreg
2227     // index.
2228     const TargetRegisterClass *SrcWithSubRC
2229       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2230     if (!SrcWithSubRC)
2231       return false;
2232 
2233     if (SrcWithSubRC != SrcRC) {
2234       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2235         return false;
2236     }
2237 
2238     I.getOperand(1).setSubReg(SubRegIdx);
2239   }
2240 
2241   I.setDesc(TII.get(TargetOpcode::COPY));
2242   return true;
2243 }
2244 
2245 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2246 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2247   Mask = maskTrailingOnes<unsigned>(Size);
2248   int SignedMask = static_cast<int>(Mask);
2249   return SignedMask >= -16 && SignedMask <= 64;
2250 }
2251 
2252 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2253 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2254   Register Reg, const MachineRegisterInfo &MRI,
2255   const TargetRegisterInfo &TRI) const {
2256   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2257   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2258     return RB;
2259 
2260   // Ignore the type, since we don't use vcc in artifacts.
2261   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2262     return &RBI.getRegBankFromRegClass(*RC, LLT());
2263   return nullptr;
2264 }
2265 
2266 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2267   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2268   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2269   const DebugLoc &DL = I.getDebugLoc();
2270   MachineBasicBlock &MBB = *I.getParent();
2271   const Register DstReg = I.getOperand(0).getReg();
2272   const Register SrcReg = I.getOperand(1).getReg();
2273 
2274   const LLT DstTy = MRI->getType(DstReg);
2275   const LLT SrcTy = MRI->getType(SrcReg);
2276   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2277     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2278   const unsigned DstSize = DstTy.getSizeInBits();
2279   if (!DstTy.isScalar())
2280     return false;
2281 
2282   // Artifact casts should never use vcc.
2283   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2284 
2285   // FIXME: This should probably be illegal and split earlier.
2286   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2287     if (DstSize <= 32)
2288       return selectCOPY(I);
2289 
2290     const TargetRegisterClass *SrcRC =
2291         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2292     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2293     const TargetRegisterClass *DstRC =
2294         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2295 
2296     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2297     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2298     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2299       .addReg(SrcReg)
2300       .addImm(AMDGPU::sub0)
2301       .addReg(UndefReg)
2302       .addImm(AMDGPU::sub1);
2303     I.eraseFromParent();
2304 
2305     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2306            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2307   }
2308 
2309   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2310     // 64-bit should have been split up in RegBankSelect
2311 
2312     // Try to use an and with a mask if it will save code size.
2313     unsigned Mask;
2314     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2315       MachineInstr *ExtI =
2316       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2317         .addImm(Mask)
2318         .addReg(SrcReg);
2319       I.eraseFromParent();
2320       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2321     }
2322 
2323     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2324     MachineInstr *ExtI =
2325       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2326       .addReg(SrcReg)
2327       .addImm(0) // Offset
2328       .addImm(SrcSize); // Width
2329     I.eraseFromParent();
2330     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2331   }
2332 
2333   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2334     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2335       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2336     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2337       return false;
2338 
2339     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2340       const unsigned SextOpc = SrcSize == 8 ?
2341         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2342       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2343         .addReg(SrcReg);
2344       I.eraseFromParent();
2345       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2346     }
2347 
2348     // Using a single 32-bit SALU to calculate the high half is smaller than
2349     // S_BFE with a literal constant operand.
2350     if (DstSize > 32 && SrcSize == 32) {
2351       Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2352       unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2353       if (Signed) {
2354         BuildMI(MBB, I, DL, TII.get(AMDGPU::S_ASHR_I32), HiReg)
2355           .addReg(SrcReg, 0, SubReg)
2356           .addImm(31);
2357       } else {
2358         BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg)
2359           .addImm(0);
2360       }
2361       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2362         .addReg(SrcReg, 0, SubReg)
2363         .addImm(AMDGPU::sub0)
2364         .addReg(HiReg)
2365         .addImm(AMDGPU::sub1);
2366       I.eraseFromParent();
2367       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass,
2368                                           *MRI);
2369     }
2370 
2371     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2372     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2373 
2374     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2375     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2376       // We need a 64-bit register source, but the high bits don't matter.
2377       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2378       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2379       unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2380 
2381       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2382       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2383         .addReg(SrcReg, 0, SubReg)
2384         .addImm(AMDGPU::sub0)
2385         .addReg(UndefReg)
2386         .addImm(AMDGPU::sub1);
2387 
2388       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2389         .addReg(ExtReg)
2390         .addImm(SrcSize << 16);
2391 
2392       I.eraseFromParent();
2393       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2394     }
2395 
2396     unsigned Mask;
2397     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2398       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2399         .addReg(SrcReg)
2400         .addImm(Mask);
2401     } else {
2402       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2403         .addReg(SrcReg)
2404         .addImm(SrcSize << 16);
2405     }
2406 
2407     I.eraseFromParent();
2408     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2409   }
2410 
2411   return false;
2412 }
2413 
2414 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2415   MachineBasicBlock *BB = I.getParent();
2416   MachineOperand &ImmOp = I.getOperand(1);
2417   Register DstReg = I.getOperand(0).getReg();
2418   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2419 
2420   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2421   if (ImmOp.isFPImm()) {
2422     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2423     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2424   } else if (ImmOp.isCImm()) {
2425     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2426   } else {
2427     llvm_unreachable("Not supported by g_constants");
2428   }
2429 
2430   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2431   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2432 
2433   unsigned Opcode;
2434   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2435     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2436   } else {
2437     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2438 
2439     // We should never produce s1 values on banks other than VCC. If the user of
2440     // this already constrained the register, we may incorrectly think it's VCC
2441     // if it wasn't originally.
2442     if (Size == 1)
2443       return false;
2444   }
2445 
2446   if (Size != 64) {
2447     I.setDesc(TII.get(Opcode));
2448     I.addImplicitDefUseOperands(*MF);
2449     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2450   }
2451 
2452   const DebugLoc &DL = I.getDebugLoc();
2453 
2454   APInt Imm(Size, I.getOperand(1).getImm());
2455 
2456   MachineInstr *ResInst;
2457   if (IsSgpr && TII.isInlineConstant(Imm)) {
2458     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2459       .addImm(I.getOperand(1).getImm());
2460   } else {
2461     const TargetRegisterClass *RC = IsSgpr ?
2462       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2463     Register LoReg = MRI->createVirtualRegister(RC);
2464     Register HiReg = MRI->createVirtualRegister(RC);
2465 
2466     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2467       .addImm(Imm.trunc(32).getZExtValue());
2468 
2469     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2470       .addImm(Imm.ashr(32).getZExtValue());
2471 
2472     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2473       .addReg(LoReg)
2474       .addImm(AMDGPU::sub0)
2475       .addReg(HiReg)
2476       .addImm(AMDGPU::sub1);
2477   }
2478 
2479   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2480   // work for target independent opcodes
2481   I.eraseFromParent();
2482   const TargetRegisterClass *DstRC =
2483     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2484   if (!DstRC)
2485     return true;
2486   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2487 }
2488 
2489 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2490   // Only manually handle the f64 SGPR case.
2491   //
2492   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2493   // the bit ops theoretically have a second result due to the implicit def of
2494   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2495   // that is easy by disabling the check. The result works, but uses a
2496   // nonsensical sreg32orlds_and_sreg_1 regclass.
2497   //
2498   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2499   // the variadic REG_SEQUENCE operands.
2500 
2501   Register Dst = MI.getOperand(0).getReg();
2502   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2503   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2504       MRI->getType(Dst) != LLT::scalar(64))
2505     return false;
2506 
2507   Register Src = MI.getOperand(1).getReg();
2508   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2509   if (Fabs)
2510     Src = Fabs->getOperand(1).getReg();
2511 
2512   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2513       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2514     return false;
2515 
2516   MachineBasicBlock *BB = MI.getParent();
2517   const DebugLoc &DL = MI.getDebugLoc();
2518   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2519   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2520   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2521   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2522 
2523   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2524     .addReg(Src, 0, AMDGPU::sub0);
2525   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2526     .addReg(Src, 0, AMDGPU::sub1);
2527   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2528     .addImm(0x80000000);
2529 
2530   // Set or toggle sign bit.
2531   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2532   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2533     .addReg(HiReg)
2534     .addReg(ConstReg);
2535   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2536     .addReg(LoReg)
2537     .addImm(AMDGPU::sub0)
2538     .addReg(OpReg)
2539     .addImm(AMDGPU::sub1);
2540   MI.eraseFromParent();
2541   return true;
2542 }
2543 
2544 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2545 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2546   Register Dst = MI.getOperand(0).getReg();
2547   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2548   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2549       MRI->getType(Dst) != LLT::scalar(64))
2550     return false;
2551 
2552   Register Src = MI.getOperand(1).getReg();
2553   MachineBasicBlock *BB = MI.getParent();
2554   const DebugLoc &DL = MI.getDebugLoc();
2555   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2556   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2557   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2558   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2559 
2560   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2561       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2562     return false;
2563 
2564   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2565     .addReg(Src, 0, AMDGPU::sub0);
2566   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2567     .addReg(Src, 0, AMDGPU::sub1);
2568   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2569     .addImm(0x7fffffff);
2570 
2571   // Clear sign bit.
2572   // TODO: Should this used S_BITSET0_*?
2573   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2574     .addReg(HiReg)
2575     .addReg(ConstReg);
2576   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2577     .addReg(LoReg)
2578     .addImm(AMDGPU::sub0)
2579     .addReg(OpReg)
2580     .addImm(AMDGPU::sub1);
2581 
2582   MI.eraseFromParent();
2583   return true;
2584 }
2585 
2586 static bool isConstant(const MachineInstr &MI) {
2587   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2588 }
2589 
2590 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2591     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2592 
2593   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2594 
2595   assert(PtrMI);
2596 
2597   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2598     return;
2599 
2600   GEPInfo GEPInfo;
2601 
2602   for (unsigned i = 1; i != 3; ++i) {
2603     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2604     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2605     assert(OpDef);
2606     if (i == 2 && isConstant(*OpDef)) {
2607       // TODO: Could handle constant base + variable offset, but a combine
2608       // probably should have commuted it.
2609       assert(GEPInfo.Imm == 0);
2610       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2611       continue;
2612     }
2613     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2614     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2615       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2616     else
2617       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2618   }
2619 
2620   AddrInfo.push_back(GEPInfo);
2621   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2622 }
2623 
2624 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2625   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2626 }
2627 
2628 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2629   if (!MI.hasOneMemOperand())
2630     return false;
2631 
2632   const MachineMemOperand *MMO = *MI.memoperands_begin();
2633   const Value *Ptr = MMO->getValue();
2634 
2635   // UndefValue means this is a load of a kernel input.  These are uniform.
2636   // Sometimes LDS instructions have constant pointers.
2637   // If Ptr is null, then that means this mem operand contains a
2638   // PseudoSourceValue like GOT.
2639   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2640       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2641     return true;
2642 
2643   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2644     return true;
2645 
2646   const Instruction *I = dyn_cast<Instruction>(Ptr);
2647   return I && I->getMetadata("amdgpu.uniform");
2648 }
2649 
2650 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2651   for (const GEPInfo &GEPInfo : AddrInfo) {
2652     if (!GEPInfo.VgprParts.empty())
2653       return true;
2654   }
2655   return false;
2656 }
2657 
2658 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2659   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2660   unsigned AS = PtrTy.getAddressSpace();
2661   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2662       STI.ldsRequiresM0Init()) {
2663     MachineBasicBlock *BB = I.getParent();
2664 
2665     // If DS instructions require M0 initialization, insert it before selecting.
2666     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2667       .addImm(-1);
2668   }
2669 }
2670 
2671 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2672   MachineInstr &I) const {
2673   initM0(I);
2674   return selectImpl(I, *CoverageInfo);
2675 }
2676 
2677 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2678   if (Reg.isPhysical())
2679     return false;
2680 
2681   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2682   const unsigned Opcode = MI.getOpcode();
2683 
2684   if (Opcode == AMDGPU::COPY)
2685     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2686 
2687   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2688       Opcode == AMDGPU::G_XOR)
2689     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2690            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2691 
2692   if (Opcode == TargetOpcode::G_INTRINSIC)
2693     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2694 
2695   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2696 }
2697 
2698 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2699   MachineBasicBlock *BB = I.getParent();
2700   MachineOperand &CondOp = I.getOperand(0);
2701   Register CondReg = CondOp.getReg();
2702   const DebugLoc &DL = I.getDebugLoc();
2703 
2704   unsigned BrOpcode;
2705   Register CondPhysReg;
2706   const TargetRegisterClass *ConstrainRC;
2707 
2708   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2709   // whether the branch is uniform when selecting the instruction. In
2710   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2711   // RegBankSelect knows what it's doing if the branch condition is scc, even
2712   // though it currently does not.
2713   if (!isVCC(CondReg, *MRI)) {
2714     if (MRI->getType(CondReg) != LLT::scalar(32))
2715       return false;
2716 
2717     CondPhysReg = AMDGPU::SCC;
2718     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2719     ConstrainRC = &AMDGPU::SReg_32RegClass;
2720   } else {
2721     // FIXME: Should scc->vcc copies and with exec?
2722 
2723     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2724     // need to insert an and with exec.
2725     if (!isVCmpResult(CondReg, *MRI)) {
2726       const bool Is64 = STI.isWave64();
2727       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2728       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2729 
2730       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2731       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2732           .addReg(CondReg)
2733           .addReg(Exec);
2734       CondReg = TmpReg;
2735     }
2736 
2737     CondPhysReg = TRI.getVCC();
2738     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2739     ConstrainRC = TRI.getBoolRC();
2740   }
2741 
2742   if (!MRI->getRegClassOrNull(CondReg))
2743     MRI->setRegClass(CondReg, ConstrainRC);
2744 
2745   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2746     .addReg(CondReg);
2747   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2748     .addMBB(I.getOperand(1).getMBB());
2749 
2750   I.eraseFromParent();
2751   return true;
2752 }
2753 
2754 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2755   MachineInstr &I) const {
2756   Register DstReg = I.getOperand(0).getReg();
2757   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2758   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2759   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2760   if (IsVGPR)
2761     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2762 
2763   return RBI.constrainGenericRegister(
2764     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2765 }
2766 
2767 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2768   Register DstReg = I.getOperand(0).getReg();
2769   Register SrcReg = I.getOperand(1).getReg();
2770   Register MaskReg = I.getOperand(2).getReg();
2771   LLT Ty = MRI->getType(DstReg);
2772   LLT MaskTy = MRI->getType(MaskReg);
2773   MachineBasicBlock *BB = I.getParent();
2774   const DebugLoc &DL = I.getDebugLoc();
2775 
2776   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2777   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2778   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2779   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2780   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2781     return false;
2782 
2783   // Try to avoid emitting a bit operation when we only need to touch half of
2784   // the 64-bit pointer.
2785   APInt MaskOnes = KB->getKnownOnes(MaskReg).zext(64);
2786   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2787   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2788 
2789   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2790   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2791 
2792   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2793       !CanCopyLow32 && !CanCopyHi32) {
2794     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2795       .addReg(SrcReg)
2796       .addReg(MaskReg);
2797     I.eraseFromParent();
2798     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2799   }
2800 
2801   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2802   const TargetRegisterClass &RegRC
2803     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2804 
2805   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2806   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2807   const TargetRegisterClass *MaskRC =
2808       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2809 
2810   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2811       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2812       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2813     return false;
2814 
2815   if (Ty.getSizeInBits() == 32) {
2816     assert(MaskTy.getSizeInBits() == 32 &&
2817            "ptrmask should have been narrowed during legalize");
2818 
2819     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2820       .addReg(SrcReg)
2821       .addReg(MaskReg);
2822     I.eraseFromParent();
2823     return true;
2824   }
2825 
2826   Register HiReg = MRI->createVirtualRegister(&RegRC);
2827   Register LoReg = MRI->createVirtualRegister(&RegRC);
2828 
2829   // Extract the subregisters from the source pointer.
2830   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2831     .addReg(SrcReg, 0, AMDGPU::sub0);
2832   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2833     .addReg(SrcReg, 0, AMDGPU::sub1);
2834 
2835   Register MaskedLo, MaskedHi;
2836 
2837   if (CanCopyLow32) {
2838     // If all the bits in the low half are 1, we only need a copy for it.
2839     MaskedLo = LoReg;
2840   } else {
2841     // Extract the mask subregister and apply the and.
2842     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2843     MaskedLo = MRI->createVirtualRegister(&RegRC);
2844 
2845     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2846       .addReg(MaskReg, 0, AMDGPU::sub0);
2847     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2848       .addReg(LoReg)
2849       .addReg(MaskLo);
2850   }
2851 
2852   if (CanCopyHi32) {
2853     // If all the bits in the high half are 1, we only need a copy for it.
2854     MaskedHi = HiReg;
2855   } else {
2856     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2857     MaskedHi = MRI->createVirtualRegister(&RegRC);
2858 
2859     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2860       .addReg(MaskReg, 0, AMDGPU::sub1);
2861     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2862       .addReg(HiReg)
2863       .addReg(MaskHi);
2864   }
2865 
2866   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2867     .addReg(MaskedLo)
2868     .addImm(AMDGPU::sub0)
2869     .addReg(MaskedHi)
2870     .addImm(AMDGPU::sub1);
2871   I.eraseFromParent();
2872   return true;
2873 }
2874 
2875 /// Return the register to use for the index value, and the subregister to use
2876 /// for the indirectly accessed register.
2877 static std::pair<Register, unsigned>
2878 computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
2879                         const TargetRegisterClass *SuperRC, Register IdxReg,
2880                         unsigned EltSize, GISelKnownBits &KnownBits) {
2881   Register IdxBaseReg;
2882   int Offset;
2883 
2884   std::tie(IdxBaseReg, Offset) =
2885       AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &KnownBits);
2886   if (IdxBaseReg == AMDGPU::NoRegister) {
2887     // This will happen if the index is a known constant. This should ordinarily
2888     // be legalized out, but handle it as a register just in case.
2889     assert(Offset == 0);
2890     IdxBaseReg = IdxReg;
2891   }
2892 
2893   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2894 
2895   // Skip out of bounds offsets, or else we would end up using an undefined
2896   // register.
2897   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2898     return std::pair(IdxReg, SubRegs[0]);
2899   return std::pair(IdxBaseReg, SubRegs[Offset]);
2900 }
2901 
2902 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2903   MachineInstr &MI) const {
2904   Register DstReg = MI.getOperand(0).getReg();
2905   Register SrcReg = MI.getOperand(1).getReg();
2906   Register IdxReg = MI.getOperand(2).getReg();
2907 
2908   LLT DstTy = MRI->getType(DstReg);
2909   LLT SrcTy = MRI->getType(SrcReg);
2910 
2911   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2912   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2913   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2914 
2915   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2916   // into a waterfall loop.
2917   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2918     return false;
2919 
2920   const TargetRegisterClass *SrcRC =
2921       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2922   const TargetRegisterClass *DstRC =
2923       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2924   if (!SrcRC || !DstRC)
2925     return false;
2926   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2927       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2928       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2929     return false;
2930 
2931   MachineBasicBlock *BB = MI.getParent();
2932   const DebugLoc &DL = MI.getDebugLoc();
2933   const bool Is64 = DstTy.getSizeInBits() == 64;
2934 
2935   unsigned SubReg;
2936   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(
2937       *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KB);
2938 
2939   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2940     if (DstTy.getSizeInBits() != 32 && !Is64)
2941       return false;
2942 
2943     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2944       .addReg(IdxReg);
2945 
2946     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2947     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2948       .addReg(SrcReg, 0, SubReg)
2949       .addReg(SrcReg, RegState::Implicit);
2950     MI.eraseFromParent();
2951     return true;
2952   }
2953 
2954   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2955     return false;
2956 
2957   if (!STI.useVGPRIndexMode()) {
2958     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2959       .addReg(IdxReg);
2960     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2961       .addReg(SrcReg, 0, SubReg)
2962       .addReg(SrcReg, RegState::Implicit);
2963     MI.eraseFromParent();
2964     return true;
2965   }
2966 
2967   const MCInstrDesc &GPRIDXDesc =
2968       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2969   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2970       .addReg(SrcReg)
2971       .addReg(IdxReg)
2972       .addImm(SubReg);
2973 
2974   MI.eraseFromParent();
2975   return true;
2976 }
2977 
2978 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2979 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2980   MachineInstr &MI) const {
2981   Register DstReg = MI.getOperand(0).getReg();
2982   Register VecReg = MI.getOperand(1).getReg();
2983   Register ValReg = MI.getOperand(2).getReg();
2984   Register IdxReg = MI.getOperand(3).getReg();
2985 
2986   LLT VecTy = MRI->getType(DstReg);
2987   LLT ValTy = MRI->getType(ValReg);
2988   unsigned VecSize = VecTy.getSizeInBits();
2989   unsigned ValSize = ValTy.getSizeInBits();
2990 
2991   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2992   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2993   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2994 
2995   assert(VecTy.getElementType() == ValTy);
2996 
2997   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2998   // into a waterfall loop.
2999   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
3000     return false;
3001 
3002   const TargetRegisterClass *VecRC =
3003       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
3004   const TargetRegisterClass *ValRC =
3005       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
3006 
3007   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
3008       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
3009       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
3010       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
3011     return false;
3012 
3013   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
3014     return false;
3015 
3016   unsigned SubReg;
3017   std::tie(IdxReg, SubReg) =
3018       computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, ValSize / 8, *KB);
3019 
3020   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
3021                          STI.useVGPRIndexMode();
3022 
3023   MachineBasicBlock *BB = MI.getParent();
3024   const DebugLoc &DL = MI.getDebugLoc();
3025 
3026   if (!IndexMode) {
3027     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3028       .addReg(IdxReg);
3029 
3030     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
3031         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
3032     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
3033         .addReg(VecReg)
3034         .addReg(ValReg)
3035         .addImm(SubReg);
3036     MI.eraseFromParent();
3037     return true;
3038   }
3039 
3040   const MCInstrDesc &GPRIDXDesc =
3041       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
3042   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
3043       .addReg(VecReg)
3044       .addReg(ValReg)
3045       .addReg(IdxReg)
3046       .addImm(SubReg);
3047 
3048   MI.eraseFromParent();
3049   return true;
3050 }
3051 
3052 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3053   unsigned Opc;
3054   unsigned Size = MI.getOperand(3).getImm();
3055 
3056   // The struct intrinsic variants add one additional operand over raw.
3057   const bool HasVIndex = MI.getNumOperands() == 9;
3058   Register VIndex;
3059   int OpOffset = 0;
3060   if (HasVIndex) {
3061     VIndex = MI.getOperand(4).getReg();
3062     OpOffset = 1;
3063   }
3064 
3065   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3066   std::optional<ValueAndVReg> MaybeVOffset =
3067       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3068   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3069 
3070   switch (Size) {
3071   default:
3072     return false;
3073   case 1:
3074     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3075                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3076                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3077                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3078     break;
3079   case 2:
3080     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3081                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3082                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3083                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3084     break;
3085   case 4:
3086     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3087                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3088                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3089                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3090     break;
3091   }
3092 
3093   MachineBasicBlock *MBB = MI.getParent();
3094   const DebugLoc &DL = MI.getDebugLoc();
3095   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3096     .add(MI.getOperand(2));
3097 
3098   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3099 
3100   if (HasVIndex && HasVOffset) {
3101     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3102     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3103       .addReg(VIndex)
3104       .addImm(AMDGPU::sub0)
3105       .addReg(VOffset)
3106       .addImm(AMDGPU::sub1);
3107 
3108     MIB.addReg(IdxReg);
3109   } else if (HasVIndex) {
3110     MIB.addReg(VIndex);
3111   } else if (HasVOffset) {
3112     MIB.addReg(VOffset);
3113   }
3114 
3115   MIB.add(MI.getOperand(1));            // rsrc
3116   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3117   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3118   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3119   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3120   MIB.addImm((Aux >> 3) & 1);           // swz
3121 
3122   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3123   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3124   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3125   MachinePointerInfo StorePtrI = LoadPtrI;
3126   StorePtrI.V = nullptr;
3127   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3128 
3129   auto F = LoadMMO->getFlags() &
3130            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3131   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3132                                      Size, LoadMMO->getBaseAlign());
3133 
3134   MachineMemOperand *StoreMMO =
3135       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3136                                sizeof(int32_t), LoadMMO->getBaseAlign());
3137 
3138   MIB.setMemRefs({LoadMMO, StoreMMO});
3139 
3140   MI.eraseFromParent();
3141   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3142 }
3143 
3144 /// Match a zero extend from a 32-bit value to 64-bits.
3145 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3146   Register ZExtSrc;
3147   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3148     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3149 
3150   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3151   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3152   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3153     return Register();
3154 
3155   assert(Def->getNumOperands() == 3 &&
3156          MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64));
3157   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3158     return Def->getOperand(1).getReg();
3159   }
3160 
3161   return Register();
3162 }
3163 
3164 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3165   unsigned Opc;
3166   unsigned Size = MI.getOperand(3).getImm();
3167 
3168   switch (Size) {
3169   default:
3170     return false;
3171   case 1:
3172     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3173     break;
3174   case 2:
3175     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3176     break;
3177   case 4:
3178     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3179     break;
3180   }
3181 
3182   MachineBasicBlock *MBB = MI.getParent();
3183   const DebugLoc &DL = MI.getDebugLoc();
3184   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3185     .add(MI.getOperand(2));
3186 
3187   Register Addr = MI.getOperand(1).getReg();
3188   Register VOffset;
3189   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3190   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3191   if (!isSGPR(Addr)) {
3192     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3193     if (isSGPR(AddrDef->Reg)) {
3194       Addr = AddrDef->Reg;
3195     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3196       Register SAddr =
3197           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3198       if (isSGPR(SAddr)) {
3199         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3200         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3201           Addr = SAddr;
3202           VOffset = Off;
3203         }
3204       }
3205     }
3206   }
3207 
3208   if (isSGPR(Addr)) {
3209     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3210     if (!VOffset) {
3211       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3212       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3213         .addImm(0);
3214     }
3215   }
3216 
3217   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3218     .addReg(Addr);
3219 
3220   if (isSGPR(Addr))
3221     MIB.addReg(VOffset);
3222 
3223   MIB.add(MI.getOperand(4))  // offset
3224      .add(MI.getOperand(5)); // cpol
3225 
3226   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3227   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3228   LoadPtrI.Offset = MI.getOperand(4).getImm();
3229   MachinePointerInfo StorePtrI = LoadPtrI;
3230   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3231   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3232   auto F = LoadMMO->getFlags() &
3233            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3234   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3235                                      Size, LoadMMO->getBaseAlign());
3236   MachineMemOperand *StoreMMO =
3237       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3238                                sizeof(int32_t), Align(4));
3239 
3240   MIB.setMemRefs({LoadMMO, StoreMMO});
3241 
3242   MI.eraseFromParent();
3243   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3244 }
3245 
3246 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3247   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3248   MI.removeOperand(1);
3249   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3250   return true;
3251 }
3252 
3253 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3254   unsigned Opc;
3255   switch (MI.getIntrinsicID()) {
3256   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3257     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3258     break;
3259   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3260     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3261     break;
3262   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3263     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3264     break;
3265   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3266     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3267     break;
3268   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3269     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3270     break;
3271   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3272     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3273     break;
3274   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
3275     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64;
3276     break;
3277   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
3278     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64;
3279     break;
3280   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
3281     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64;
3282     break;
3283   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
3284     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64;
3285     break;
3286   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
3287     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64;
3288     break;
3289   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
3290     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64;
3291     break;
3292   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
3293     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64;
3294     break;
3295   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
3296     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64;
3297     break;
3298   default:
3299     llvm_unreachable("unhandled smfmac intrinsic");
3300   }
3301 
3302   auto VDst_In = MI.getOperand(4);
3303 
3304   MI.setDesc(TII.get(Opc));
3305   MI.removeOperand(4); // VDst_In
3306   MI.removeOperand(1); // Intrinsic ID
3307   MI.addOperand(VDst_In); // Readd VDst_In to the end
3308   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3309   return true;
3310 }
3311 
3312 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3313   Register DstReg = MI.getOperand(0).getReg();
3314   Register SrcReg = MI.getOperand(1).getReg();
3315   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3316   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3317   MachineBasicBlock *MBB = MI.getParent();
3318   const DebugLoc &DL = MI.getDebugLoc();
3319 
3320   if (IsVALU) {
3321     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3322       .addImm(Subtarget->getWavefrontSizeLog2())
3323       .addReg(SrcReg);
3324   } else {
3325     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3326       .addReg(SrcReg)
3327       .addImm(Subtarget->getWavefrontSizeLog2());
3328   }
3329 
3330   const TargetRegisterClass &RC =
3331       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3332   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3333     return false;
3334 
3335   MI.eraseFromParent();
3336   return true;
3337 }
3338 
3339 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3340   if (I.isPHI())
3341     return selectPHI(I);
3342 
3343   if (!I.isPreISelOpcode()) {
3344     if (I.isCopy())
3345       return selectCOPY(I);
3346     return true;
3347   }
3348 
3349   switch (I.getOpcode()) {
3350   case TargetOpcode::G_AND:
3351   case TargetOpcode::G_OR:
3352   case TargetOpcode::G_XOR:
3353     if (selectImpl(I, *CoverageInfo))
3354       return true;
3355     return selectG_AND_OR_XOR(I);
3356   case TargetOpcode::G_ADD:
3357   case TargetOpcode::G_SUB:
3358     if (selectImpl(I, *CoverageInfo))
3359       return true;
3360     return selectG_ADD_SUB(I);
3361   case TargetOpcode::G_UADDO:
3362   case TargetOpcode::G_USUBO:
3363   case TargetOpcode::G_UADDE:
3364   case TargetOpcode::G_USUBE:
3365     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3366   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3367   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3368     return selectG_AMDGPU_MAD_64_32(I);
3369   case TargetOpcode::G_INTTOPTR:
3370   case TargetOpcode::G_BITCAST:
3371   case TargetOpcode::G_PTRTOINT:
3372     return selectCOPY(I);
3373   case TargetOpcode::G_CONSTANT:
3374   case TargetOpcode::G_FCONSTANT:
3375     return selectG_CONSTANT(I);
3376   case TargetOpcode::G_FNEG:
3377     if (selectImpl(I, *CoverageInfo))
3378       return true;
3379     return selectG_FNEG(I);
3380   case TargetOpcode::G_FABS:
3381     if (selectImpl(I, *CoverageInfo))
3382       return true;
3383     return selectG_FABS(I);
3384   case TargetOpcode::G_EXTRACT:
3385     return selectG_EXTRACT(I);
3386   case TargetOpcode::G_MERGE_VALUES:
3387   case TargetOpcode::G_CONCAT_VECTORS:
3388     return selectG_MERGE_VALUES(I);
3389   case TargetOpcode::G_UNMERGE_VALUES:
3390     return selectG_UNMERGE_VALUES(I);
3391   case TargetOpcode::G_BUILD_VECTOR:
3392   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3393     return selectG_BUILD_VECTOR(I);
3394   case TargetOpcode::G_PTR_ADD:
3395     if (selectImpl(I, *CoverageInfo))
3396       return true;
3397     return selectG_PTR_ADD(I);
3398   case TargetOpcode::G_IMPLICIT_DEF:
3399     return selectG_IMPLICIT_DEF(I);
3400   case TargetOpcode::G_FREEZE:
3401     return selectCOPY(I);
3402   case TargetOpcode::G_INSERT:
3403     return selectG_INSERT(I);
3404   case TargetOpcode::G_INTRINSIC:
3405     return selectG_INTRINSIC(I);
3406   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3407     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3408   case TargetOpcode::G_ICMP:
3409     if (selectG_ICMP(I))
3410       return true;
3411     return selectImpl(I, *CoverageInfo);
3412   case TargetOpcode::G_LOAD:
3413   case TargetOpcode::G_STORE:
3414   case TargetOpcode::G_ATOMIC_CMPXCHG:
3415   case TargetOpcode::G_ATOMICRMW_XCHG:
3416   case TargetOpcode::G_ATOMICRMW_ADD:
3417   case TargetOpcode::G_ATOMICRMW_SUB:
3418   case TargetOpcode::G_ATOMICRMW_AND:
3419   case TargetOpcode::G_ATOMICRMW_OR:
3420   case TargetOpcode::G_ATOMICRMW_XOR:
3421   case TargetOpcode::G_ATOMICRMW_MIN:
3422   case TargetOpcode::G_ATOMICRMW_MAX:
3423   case TargetOpcode::G_ATOMICRMW_UMIN:
3424   case TargetOpcode::G_ATOMICRMW_UMAX:
3425   case TargetOpcode::G_ATOMICRMW_UINC_WRAP:
3426   case TargetOpcode::G_ATOMICRMW_UDEC_WRAP:
3427   case TargetOpcode::G_ATOMICRMW_FADD:
3428   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3429   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3430     return selectG_LOAD_STORE_ATOMICRMW(I);
3431   case TargetOpcode::G_SELECT:
3432     return selectG_SELECT(I);
3433   case TargetOpcode::G_TRUNC:
3434     return selectG_TRUNC(I);
3435   case TargetOpcode::G_SEXT:
3436   case TargetOpcode::G_ZEXT:
3437   case TargetOpcode::G_ANYEXT:
3438   case TargetOpcode::G_SEXT_INREG:
3439     // This is a workaround. For extension from type i1, `selectImpl()` uses
3440     // patterns from TD file and generates an illegal VGPR to SGPR COPY as type
3441     // i1 can only be hold in a SGPR class.
3442     if (MRI->getType(I.getOperand(1).getReg()) != LLT::scalar(1) &&
3443         selectImpl(I, *CoverageInfo))
3444       return true;
3445     return selectG_SZA_EXT(I);
3446   case TargetOpcode::G_BRCOND:
3447     return selectG_BRCOND(I);
3448   case TargetOpcode::G_GLOBAL_VALUE:
3449     return selectG_GLOBAL_VALUE(I);
3450   case TargetOpcode::G_PTRMASK:
3451     return selectG_PTRMASK(I);
3452   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3453     return selectG_EXTRACT_VECTOR_ELT(I);
3454   case TargetOpcode::G_INSERT_VECTOR_ELT:
3455     return selectG_INSERT_VECTOR_ELT(I);
3456   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3457   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3458   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3459   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3460     const AMDGPU::ImageDimIntrinsicInfo *Intr
3461       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3462     assert(Intr && "not an image intrinsic with image pseudo");
3463     return selectImageIntrinsic(I, Intr);
3464   }
3465   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3466     return selectBVHIntrinsic(I);
3467   case AMDGPU::G_SBFX:
3468   case AMDGPU::G_UBFX:
3469     return selectG_SBFX_UBFX(I);
3470   case AMDGPU::G_SI_CALL:
3471     I.setDesc(TII.get(AMDGPU::SI_CALL));
3472     return true;
3473   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3474     return selectWaveAddress(I);
3475   default:
3476     return selectImpl(I, *CoverageInfo);
3477   }
3478   return false;
3479 }
3480 
3481 InstructionSelector::ComplexRendererFns
3482 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3483   return {{
3484       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3485   }};
3486 
3487 }
3488 
3489 std::pair<Register, unsigned>
3490 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3491                                               bool IsCanonicalizing,
3492                                               bool AllowAbs, bool OpSel) const {
3493   Register Src = Root.getReg();
3494   unsigned Mods = 0;
3495   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3496 
3497   if (MI->getOpcode() == AMDGPU::G_FNEG) {
3498     Src = MI->getOperand(1).getReg();
3499     Mods |= SISrcMods::NEG;
3500     MI = getDefIgnoringCopies(Src, *MRI);
3501   } else if (MI->getOpcode() == AMDGPU::G_FSUB && IsCanonicalizing) {
3502     // Fold fsub [+-]0 into fneg. This may not have folded depending on the
3503     // denormal mode, but we're implicitly canonicalizing in a source operand.
3504     const ConstantFP *LHS =
3505         getConstantFPVRegVal(MI->getOperand(1).getReg(), *MRI);
3506     if (LHS && LHS->isZero()) {
3507       Mods |= SISrcMods::NEG;
3508       Src = MI->getOperand(2).getReg();
3509     }
3510   }
3511 
3512   if (AllowAbs && MI->getOpcode() == AMDGPU::G_FABS) {
3513     Src = MI->getOperand(1).getReg();
3514     Mods |= SISrcMods::ABS;
3515   }
3516 
3517   if (OpSel)
3518     Mods |= SISrcMods::OP_SEL_0;
3519 
3520   return std::pair(Src, Mods);
3521 }
3522 
3523 Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded(
3524     Register Src, unsigned Mods, MachineOperand Root, MachineInstr *InsertPt,
3525     bool ForceVGPR) const {
3526   if ((Mods != 0 || ForceVGPR) &&
3527       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3528 
3529     // If we looked through copies to find source modifiers on an SGPR operand,
3530     // we now have an SGPR register source. To avoid potentially violating the
3531     // constant bus restriction, we need to insert a copy to a VGPR.
3532     Register VGPRSrc = MRI->cloneVirtualRegister(Root.getReg());
3533     BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(),
3534             TII.get(AMDGPU::COPY), VGPRSrc)
3535         .addReg(Src);
3536     Src = VGPRSrc;
3537   }
3538 
3539   return Src;
3540 }
3541 
3542 ///
3543 /// This will select either an SGPR or VGPR operand and will save us from
3544 /// having to write an extra tablegen pattern.
3545 InstructionSelector::ComplexRendererFns
3546 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3547   return {{
3548       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3549   }};
3550 }
3551 
3552 InstructionSelector::ComplexRendererFns
3553 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3554   Register Src;
3555   unsigned Mods;
3556   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3557 
3558   return {{
3559       [=](MachineInstrBuilder &MIB) {
3560         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3561       },
3562       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3563       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3564       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3565   }};
3566 }
3567 
3568 InstructionSelector::ComplexRendererFns
3569 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3570   Register Src;
3571   unsigned Mods;
3572   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3573                                            /*IsCanonicalizing=*/true,
3574                                            /*AllowAbs=*/false);
3575 
3576   return {{
3577       [=](MachineInstrBuilder &MIB) {
3578         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3579       },
3580       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3581       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3582       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3583   }};
3584 }
3585 
3586 InstructionSelector::ComplexRendererFns
3587 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3588   return {{
3589       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3590       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3591       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3592   }};
3593 }
3594 
3595 InstructionSelector::ComplexRendererFns
3596 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3597   Register Src;
3598   unsigned Mods;
3599   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3600 
3601   return {{
3602       [=](MachineInstrBuilder &MIB) {
3603         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3604       },
3605       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3606   }};
3607 }
3608 
3609 InstructionSelector::ComplexRendererFns
3610 AMDGPUInstructionSelector::selectVOP3ModsNonCanonicalizing(
3611     MachineOperand &Root) const {
3612   Register Src;
3613   unsigned Mods;
3614   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/false);
3615 
3616   return {{
3617       [=](MachineInstrBuilder &MIB) {
3618         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3619       },
3620       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3621   }};
3622 }
3623 
3624 InstructionSelector::ComplexRendererFns
3625 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3626   Register Src;
3627   unsigned Mods;
3628   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/true,
3629                                            /*AllowAbs=*/false);
3630 
3631   return {{
3632       [=](MachineInstrBuilder &MIB) {
3633         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3634       },
3635       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3636   }};
3637 }
3638 
3639 InstructionSelector::ComplexRendererFns
3640 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3641   Register Reg = Root.getReg();
3642   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3643   if (Def->getOpcode() == AMDGPU::G_FNEG || Def->getOpcode() == AMDGPU::G_FABS)
3644     return {};
3645   return {{
3646       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3647   }};
3648 }
3649 
3650 std::pair<Register, unsigned>
3651 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3652   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3653   unsigned Mods = 0;
3654   MachineInstr *MI = MRI.getVRegDef(Src);
3655 
3656   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3657       // It's possible to see an f32 fneg here, but unlikely.
3658       // TODO: Treat f32 fneg as only high bit.
3659       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3660     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3661     Src = MI->getOperand(1).getReg();
3662     MI = MRI.getVRegDef(Src);
3663   }
3664 
3665   // TODO: Handle G_FSUB 0 as fneg
3666 
3667   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3668   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3669 
3670   // Packed instructions do not have abs modifiers.
3671   Mods |= SISrcMods::OP_SEL_1;
3672 
3673   return std::pair(Src, Mods);
3674 }
3675 
3676 InstructionSelector::ComplexRendererFns
3677 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3678   MachineRegisterInfo &MRI
3679     = Root.getParent()->getParent()->getParent()->getRegInfo();
3680 
3681   Register Src;
3682   unsigned Mods;
3683   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3684 
3685   return {{
3686       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3687       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3688   }};
3689 }
3690 
3691 InstructionSelector::ComplexRendererFns
3692 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3693   MachineRegisterInfo &MRI
3694     = Root.getParent()->getParent()->getParent()->getRegInfo();
3695 
3696   Register Src;
3697   unsigned Mods;
3698   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3699 
3700   return {{
3701       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3702       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3703   }};
3704 }
3705 
3706 InstructionSelector::ComplexRendererFns
3707 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3708   // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3709   // Value is in Imm operand as i1 sign extended to int64_t.
3710   // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3711   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3712          "expected i1 value");
3713   unsigned Mods = SISrcMods::OP_SEL_1;
3714   if (Root.getImm() == -1)
3715     Mods ^= SISrcMods::NEG;
3716   return {{
3717       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3718   }};
3719 }
3720 
3721 InstructionSelector::ComplexRendererFns
3722 AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3723     MachineOperand &Root) const {
3724   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3725          "expected i1 value");
3726   unsigned Mods = SISrcMods::OP_SEL_1;
3727   if (Root.getImm() != 0)
3728     Mods |= SISrcMods::OP_SEL_0;
3729 
3730   return {{
3731       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3732   }};
3733 }
3734 
3735 InstructionSelector::ComplexRendererFns
3736 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3737   Register Src;
3738   unsigned Mods;
3739   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3740 
3741   // FIXME: Handle op_sel
3742   return {{
3743       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3744       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3745   }};
3746 }
3747 
3748 InstructionSelector::ComplexRendererFns
3749 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3750   Register Src;
3751   unsigned Mods;
3752   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3753                                            /*IsCanonicalizing=*/true,
3754                                            /*AllowAbs=*/false,
3755                                            /*OpSel=*/false);
3756 
3757   return {{
3758       [=](MachineInstrBuilder &MIB) {
3759         MIB.addReg(
3760             copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3761       },
3762       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3763   }};
3764 }
3765 
3766 InstructionSelector::ComplexRendererFns
3767 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3768   Register Src;
3769   unsigned Mods;
3770   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3771                                            /*IsCanonicalizing=*/true,
3772                                            /*AllowAbs=*/false,
3773                                            /*OpSel=*/true);
3774 
3775   return {{
3776       [=](MachineInstrBuilder &MIB) {
3777         MIB.addReg(
3778             copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3779       },
3780       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3781   }};
3782 }
3783 
3784 bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
3785                                                  Register &Base,
3786                                                  Register *SOffset,
3787                                                  int64_t *Offset) const {
3788   MachineInstr *MI = Root.getParent();
3789   MachineBasicBlock *MBB = MI->getParent();
3790 
3791   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3792   // then we can select all ptr + 32-bit offsets.
3793   SmallVector<GEPInfo, 4> AddrInfo;
3794   getAddrModeInfo(*MI, *MRI, AddrInfo);
3795 
3796   if (AddrInfo.empty())
3797     return false;
3798 
3799   const GEPInfo &GEPI = AddrInfo[0];
3800   std::optional<int64_t> EncodedImm =
3801       AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
3802 
3803   if (SOffset && Offset) {
3804     if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm &&
3805         AddrInfo.size() > 1) {
3806       const GEPInfo &GEPI2 = AddrInfo[1];
3807       if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) {
3808         if (Register OffsetReg =
3809                 matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) {
3810           Base = GEPI2.SgprParts[0];
3811           *SOffset = OffsetReg;
3812           *Offset = *EncodedImm;
3813           return true;
3814         }
3815       }
3816     }
3817     return false;
3818   }
3819 
3820   if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) {
3821     Base = GEPI.SgprParts[0];
3822     *Offset = *EncodedImm;
3823     return true;
3824   }
3825 
3826   // SGPR offset is unsigned.
3827   if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) &&
3828       GEPI.Imm != 0) {
3829     // If we make it this far we have a load with an 32-bit immediate offset.
3830     // It is OK to select this using a sgpr offset, because we have already
3831     // failed trying to select this load into one of the _IMM variants since
3832     // the _IMM Patterns are considered before the _SGPR patterns.
3833     Base = GEPI.SgprParts[0];
3834     *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3835     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset)
3836         .addImm(GEPI.Imm);
3837     return true;
3838   }
3839 
3840   if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) {
3841     if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) {
3842       Base = GEPI.SgprParts[0];
3843       *SOffset = OffsetReg;
3844       return true;
3845     }
3846   }
3847 
3848   return false;
3849 }
3850 
3851 InstructionSelector::ComplexRendererFns
3852 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3853   Register Base;
3854   int64_t Offset;
3855   if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset))
3856     return std::nullopt;
3857 
3858   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3859            [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3860 }
3861 
3862 InstructionSelector::ComplexRendererFns
3863 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3864   SmallVector<GEPInfo, 4> AddrInfo;
3865   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3866 
3867   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3868     return std::nullopt;
3869 
3870   const GEPInfo &GEPInfo = AddrInfo[0];
3871   Register PtrReg = GEPInfo.SgprParts[0];
3872   std::optional<int64_t> EncodedImm =
3873       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3874   if (!EncodedImm)
3875     return std::nullopt;
3876 
3877   return {{
3878     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3879     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3880   }};
3881 }
3882 
3883 InstructionSelector::ComplexRendererFns
3884 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3885   Register Base, SOffset;
3886   if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr))
3887     return std::nullopt;
3888 
3889   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3890            [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
3891 }
3892 
3893 InstructionSelector::ComplexRendererFns
3894 AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {
3895   Register Base, SOffset;
3896   int64_t Offset;
3897   if (!selectSmrdOffset(Root, Base, &SOffset, &Offset))
3898     return std::nullopt;
3899 
3900   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3901            [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
3902            [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3903 }
3904 
3905 std::pair<Register, int>
3906 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3907                                                 uint64_t FlatVariant) const {
3908   MachineInstr *MI = Root.getParent();
3909 
3910   auto Default = std::pair(Root.getReg(), 0);
3911 
3912   if (!STI.hasFlatInstOffsets())
3913     return Default;
3914 
3915   Register PtrBase;
3916   int64_t ConstOffset;
3917   std::tie(PtrBase, ConstOffset) =
3918       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3919   if (ConstOffset == 0 || !isFlatScratchBaseLegal(PtrBase, FlatVariant))
3920     return Default;
3921 
3922   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3923   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3924     return Default;
3925 
3926   return std::pair(PtrBase, ConstOffset);
3927 }
3928 
3929 InstructionSelector::ComplexRendererFns
3930 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3931   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3932 
3933   return {{
3934       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3935       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3936     }};
3937 }
3938 
3939 InstructionSelector::ComplexRendererFns
3940 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3941   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3942 
3943   return {{
3944       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3945       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3946   }};
3947 }
3948 
3949 InstructionSelector::ComplexRendererFns
3950 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3951   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3952 
3953   return {{
3954       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3955       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3956     }};
3957 }
3958 
3959 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3960 InstructionSelector::ComplexRendererFns
3961 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3962   Register Addr = Root.getReg();
3963   Register PtrBase;
3964   int64_t ConstOffset;
3965   int64_t ImmOffset = 0;
3966 
3967   // Match the immediate offset first, which canonically is moved as low as
3968   // possible.
3969   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3970 
3971   if (ConstOffset != 0) {
3972     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3973                               SIInstrFlags::FlatGlobal)) {
3974       Addr = PtrBase;
3975       ImmOffset = ConstOffset;
3976     } else {
3977       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3978       if (isSGPR(PtrBaseDef->Reg)) {
3979         if (ConstOffset > 0) {
3980           // Offset is too large.
3981           //
3982           // saddr + large_offset -> saddr +
3983           //                         (voffset = large_offset & ~MaxOffset) +
3984           //                         (large_offset & MaxOffset);
3985           int64_t SplitImmOffset, RemainderOffset;
3986           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3987               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3988 
3989           if (isUInt<32>(RemainderOffset)) {
3990             MachineInstr *MI = Root.getParent();
3991             MachineBasicBlock *MBB = MI->getParent();
3992             Register HighBits =
3993                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3994 
3995             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3996                     HighBits)
3997                 .addImm(RemainderOffset);
3998 
3999             return {{
4000                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
4001                 [=](MachineInstrBuilder &MIB) {
4002                   MIB.addReg(HighBits);
4003                 }, // voffset
4004                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
4005             }};
4006           }
4007         }
4008 
4009         // We are adding a 64 bit SGPR and a constant. If constant bus limit
4010         // is 1 we would need to perform 1 or 2 extra moves for each half of
4011         // the constant and it is better to do a scalar add and then issue a
4012         // single VALU instruction to materialize zero. Otherwise it is less
4013         // instructions to perform VALU adds with immediates or inline literals.
4014         unsigned NumLiterals =
4015             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
4016             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
4017         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
4018           return std::nullopt;
4019       }
4020     }
4021   }
4022 
4023   // Match the variable offset.
4024   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4025   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4026     // Look through the SGPR->VGPR copy.
4027     Register SAddr =
4028         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4029 
4030     if (isSGPR(SAddr)) {
4031       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4032 
4033       // It's possible voffset is an SGPR here, but the copy to VGPR will be
4034       // inserted later.
4035       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4036         return {{[=](MachineInstrBuilder &MIB) { // saddr
4037                    MIB.addReg(SAddr);
4038                  },
4039                  [=](MachineInstrBuilder &MIB) { // voffset
4040                    MIB.addReg(VOffset);
4041                  },
4042                  [=](MachineInstrBuilder &MIB) { // offset
4043                    MIB.addImm(ImmOffset);
4044                  }}};
4045       }
4046     }
4047   }
4048 
4049   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4050   // drop this.
4051   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4052       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4053     return std::nullopt;
4054 
4055   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4056   // moves required to copy a 64-bit SGPR to VGPR.
4057   MachineInstr *MI = Root.getParent();
4058   MachineBasicBlock *MBB = MI->getParent();
4059   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4060 
4061   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4062       .addImm(0);
4063 
4064   return {{
4065       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4066       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
4067       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
4068   }};
4069 }
4070 
4071 InstructionSelector::ComplexRendererFns
4072 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4073   Register Addr = Root.getReg();
4074   Register PtrBase;
4075   int64_t ConstOffset;
4076   int64_t ImmOffset = 0;
4077 
4078   // Match the immediate offset first, which canonically is moved as low as
4079   // possible.
4080   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4081 
4082   if (ConstOffset != 0 && isFlatScratchBaseLegal(PtrBase) &&
4083       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4084                             SIInstrFlags::FlatScratch)) {
4085     Addr = PtrBase;
4086     ImmOffset = ConstOffset;
4087   }
4088 
4089   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4090   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4091     int FI = AddrDef->MI->getOperand(1).getIndex();
4092     return {{
4093         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4094         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4095     }};
4096   }
4097 
4098   Register SAddr = AddrDef->Reg;
4099 
4100   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4101     Register LHS = AddrDef->MI->getOperand(1).getReg();
4102     Register RHS = AddrDef->MI->getOperand(2).getReg();
4103     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4104     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4105 
4106     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4107         isSGPR(RHSDef->Reg)) {
4108       int FI = LHSDef->MI->getOperand(1).getIndex();
4109       MachineInstr &I = *Root.getParent();
4110       MachineBasicBlock *BB = I.getParent();
4111       const DebugLoc &DL = I.getDebugLoc();
4112       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4113 
4114       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4115           .addFrameIndex(FI)
4116           .addReg(RHSDef->Reg);
4117     }
4118   }
4119 
4120   if (!isSGPR(SAddr))
4121     return std::nullopt;
4122 
4123   return {{
4124       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4125       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4126   }};
4127 }
4128 
4129 // Check whether the flat scratch SVS swizzle bug affects this access.
4130 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4131     Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4132   if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4133     return false;
4134 
4135   // The bug affects the swizzling of SVS accesses if there is any carry out
4136   // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4137   // voffset to (soffset + inst_offset).
4138   auto VKnown = KB->getKnownBits(VAddr);
4139   auto SKnown = KnownBits::computeForAddSub(
4140       true, false, KB->getKnownBits(SAddr),
4141       KnownBits::makeConstant(APInt(32, ImmOffset)));
4142   uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4143   uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4144   return (VMax & 3) + (SMax & 3) >= 4;
4145 }
4146 
4147 InstructionSelector::ComplexRendererFns
4148 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4149   Register Addr = Root.getReg();
4150   Register PtrBase;
4151   int64_t ConstOffset;
4152   int64_t ImmOffset = 0;
4153 
4154   // Match the immediate offset first, which canonically is moved as low as
4155   // possible.
4156   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4157 
4158   if (ConstOffset != 0 &&
4159       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4160     Addr = PtrBase;
4161     ImmOffset = ConstOffset;
4162   }
4163 
4164   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4165   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4166     return std::nullopt;
4167 
4168   Register RHS = AddrDef->MI->getOperand(2).getReg();
4169   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4170     return std::nullopt;
4171 
4172   Register LHS = AddrDef->MI->getOperand(1).getReg();
4173   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4174 
4175   if (!isFlatScratchBaseLegal(LHS) || !isFlatScratchBaseLegal(RHS))
4176     return std::nullopt;
4177 
4178   if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4179     return std::nullopt;
4180 
4181   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4182     int FI = LHSDef->MI->getOperand(1).getIndex();
4183     return {{
4184         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4185         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4186         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4187     }};
4188   }
4189 
4190   if (!isSGPR(LHS))
4191     return std::nullopt;
4192 
4193   return {{
4194       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4195       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4196       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4197   }};
4198 }
4199 
4200 InstructionSelector::ComplexRendererFns
4201 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4202   MachineInstr *MI = Root.getParent();
4203   MachineBasicBlock *MBB = MI->getParent();
4204   MachineFunction *MF = MBB->getParent();
4205   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4206 
4207   int64_t Offset = 0;
4208   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4209       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4210     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4211 
4212     // TODO: Should this be inside the render function? The iterator seems to
4213     // move.
4214     const uint32_t MaxOffset = SIInstrInfo::getMaxMUBUFImmOffset();
4215     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4216             HighBits)
4217         .addImm(Offset & ~MaxOffset);
4218 
4219     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4220                MIB.addReg(Info->getScratchRSrcReg());
4221              },
4222              [=](MachineInstrBuilder &MIB) { // vaddr
4223                MIB.addReg(HighBits);
4224              },
4225              [=](MachineInstrBuilder &MIB) { // soffset
4226                // Use constant zero for soffset and rely on eliminateFrameIndex
4227                // to choose the appropriate frame register if need be.
4228                MIB.addImm(0);
4229              },
4230              [=](MachineInstrBuilder &MIB) { // offset
4231                MIB.addImm(Offset & MaxOffset);
4232              }}};
4233   }
4234 
4235   assert(Offset == 0 || Offset == -1);
4236 
4237   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4238   // offsets.
4239   std::optional<int> FI;
4240   Register VAddr = Root.getReg();
4241   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4242     Register PtrBase;
4243     int64_t ConstOffset;
4244     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4245     if (ConstOffset != 0) {
4246       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4247           (!STI.privateMemoryResourceIsRangeChecked() ||
4248            KB->signBitIsZero(PtrBase))) {
4249         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4250         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4251           FI = PtrBaseDef->getOperand(1).getIndex();
4252         else
4253           VAddr = PtrBase;
4254         Offset = ConstOffset;
4255       }
4256     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4257       FI = RootDef->getOperand(1).getIndex();
4258     }
4259   }
4260 
4261   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4262              MIB.addReg(Info->getScratchRSrcReg());
4263            },
4264            [=](MachineInstrBuilder &MIB) { // vaddr
4265              if (FI)
4266                MIB.addFrameIndex(*FI);
4267              else
4268                MIB.addReg(VAddr);
4269            },
4270            [=](MachineInstrBuilder &MIB) { // soffset
4271              // Use constant zero for soffset and rely on eliminateFrameIndex
4272              // to choose the appropriate frame register if need be.
4273              MIB.addImm(0);
4274            },
4275            [=](MachineInstrBuilder &MIB) { // offset
4276              MIB.addImm(Offset);
4277            }}};
4278 }
4279 
4280 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4281                                                 int64_t Offset) const {
4282   if (!isUInt<16>(Offset))
4283     return false;
4284 
4285   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4286     return true;
4287 
4288   // On Southern Islands instruction with a negative base value and an offset
4289   // don't seem to work.
4290   return KB->signBitIsZero(Base);
4291 }
4292 
4293 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4294                                                  int64_t Offset1,
4295                                                  unsigned Size) const {
4296   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4297     return false;
4298   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4299     return false;
4300 
4301   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4302     return true;
4303 
4304   // On Southern Islands instruction with a negative base value and an offset
4305   // don't seem to work.
4306   return KB->signBitIsZero(Base);
4307 }
4308 
4309 bool AMDGPUInstructionSelector::isFlatScratchBaseLegal(
4310     Register Base, uint64_t FlatVariant) const {
4311   if (FlatVariant != SIInstrFlags::FlatScratch)
4312     return true;
4313 
4314   // When value in 32-bit Base can be negative calculate scratch offset using
4315   // 32-bit add instruction, otherwise use Base(unsigned) + offset.
4316   return KB->signBitIsZero(Base);
4317 }
4318 
4319 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4320                                                     unsigned ShAmtBits) const {
4321   assert(MI.getOpcode() == TargetOpcode::G_AND);
4322 
4323   std::optional<APInt> RHS =
4324       getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4325   if (!RHS)
4326     return false;
4327 
4328   if (RHS->countr_one() >= ShAmtBits)
4329     return true;
4330 
4331   const APInt &LHSKnownZeros = KB->getKnownZeroes(MI.getOperand(1).getReg());
4332   return (LHSKnownZeros | *RHS).countr_one() >= ShAmtBits;
4333 }
4334 
4335 // Return the wave level SGPR base address if this is a wave address.
4336 static Register getWaveAddress(const MachineInstr *Def) {
4337   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4338              ? Def->getOperand(1).getReg()
4339              : Register();
4340 }
4341 
4342 InstructionSelector::ComplexRendererFns
4343 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4344     MachineOperand &Root) const {
4345   Register Reg = Root.getReg();
4346   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4347 
4348   const MachineInstr *Def = MRI->getVRegDef(Reg);
4349   if (Register WaveBase = getWaveAddress(Def)) {
4350     return {{
4351         [=](MachineInstrBuilder &MIB) { // rsrc
4352           MIB.addReg(Info->getScratchRSrcReg());
4353         },
4354         [=](MachineInstrBuilder &MIB) { // soffset
4355           MIB.addReg(WaveBase);
4356         },
4357         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4358     }};
4359   }
4360 
4361   int64_t Offset = 0;
4362 
4363   // FIXME: Copy check is a hack
4364   Register BasePtr;
4365   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4366     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4367       return {};
4368     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4369     Register WaveBase = getWaveAddress(BasePtrDef);
4370     if (!WaveBase)
4371       return {};
4372 
4373     return {{
4374         [=](MachineInstrBuilder &MIB) { // rsrc
4375           MIB.addReg(Info->getScratchRSrcReg());
4376         },
4377         [=](MachineInstrBuilder &MIB) { // soffset
4378           MIB.addReg(WaveBase);
4379         },
4380         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4381     }};
4382   }
4383 
4384   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4385       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4386     return {};
4387 
4388   return {{
4389       [=](MachineInstrBuilder &MIB) { // rsrc
4390         MIB.addReg(Info->getScratchRSrcReg());
4391       },
4392       [=](MachineInstrBuilder &MIB) { // soffset
4393         MIB.addImm(0);
4394       },
4395       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4396   }};
4397 }
4398 
4399 std::pair<Register, unsigned>
4400 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4401   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4402   if (!RootDef)
4403     return std::pair(Root.getReg(), 0);
4404 
4405   int64_t ConstAddr = 0;
4406 
4407   Register PtrBase;
4408   int64_t Offset;
4409   std::tie(PtrBase, Offset) =
4410     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4411 
4412   if (Offset) {
4413     if (isDSOffsetLegal(PtrBase, Offset)) {
4414       // (add n0, c0)
4415       return std::pair(PtrBase, Offset);
4416     }
4417   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4418     // TODO
4419 
4420 
4421   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4422     // TODO
4423 
4424   }
4425 
4426   return std::pair(Root.getReg(), 0);
4427 }
4428 
4429 InstructionSelector::ComplexRendererFns
4430 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4431   Register Reg;
4432   unsigned Offset;
4433   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4434   return {{
4435       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4436       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4437     }};
4438 }
4439 
4440 InstructionSelector::ComplexRendererFns
4441 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4442   return selectDSReadWrite2(Root, 4);
4443 }
4444 
4445 InstructionSelector::ComplexRendererFns
4446 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4447   return selectDSReadWrite2(Root, 8);
4448 }
4449 
4450 InstructionSelector::ComplexRendererFns
4451 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4452                                               unsigned Size) const {
4453   Register Reg;
4454   unsigned Offset;
4455   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4456   return {{
4457       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4458       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4459       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4460     }};
4461 }
4462 
4463 std::pair<Register, unsigned>
4464 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4465                                                   unsigned Size) const {
4466   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4467   if (!RootDef)
4468     return std::pair(Root.getReg(), 0);
4469 
4470   int64_t ConstAddr = 0;
4471 
4472   Register PtrBase;
4473   int64_t Offset;
4474   std::tie(PtrBase, Offset) =
4475     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4476 
4477   if (Offset) {
4478     int64_t OffsetValue0 = Offset;
4479     int64_t OffsetValue1 = Offset + Size;
4480     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4481       // (add n0, c0)
4482       return std::pair(PtrBase, OffsetValue0 / Size);
4483     }
4484   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4485     // TODO
4486 
4487   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4488     // TODO
4489 
4490   }
4491 
4492   return std::pair(Root.getReg(), 0);
4493 }
4494 
4495 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4496 /// the base value with the constant offset. There may be intervening copies
4497 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4498 /// not match the pattern.
4499 std::pair<Register, int64_t>
4500 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4501   Register Root, const MachineRegisterInfo &MRI) const {
4502   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4503   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4504     return {Root, 0};
4505 
4506   MachineOperand &RHS = RootI->getOperand(2);
4507   std::optional<ValueAndVReg> MaybeOffset =
4508       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4509   if (!MaybeOffset)
4510     return {Root, 0};
4511   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4512 }
4513 
4514 static void addZeroImm(MachineInstrBuilder &MIB) {
4515   MIB.addImm(0);
4516 }
4517 
4518 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4519 /// BasePtr is not valid, a null base pointer will be used.
4520 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4521                           uint32_t FormatLo, uint32_t FormatHi,
4522                           Register BasePtr) {
4523   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4524   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4525   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4526   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4527 
4528   B.buildInstr(AMDGPU::S_MOV_B32)
4529     .addDef(RSrc2)
4530     .addImm(FormatLo);
4531   B.buildInstr(AMDGPU::S_MOV_B32)
4532     .addDef(RSrc3)
4533     .addImm(FormatHi);
4534 
4535   // Build the half of the subregister with the constants before building the
4536   // full 128-bit register. If we are building multiple resource descriptors,
4537   // this will allow CSEing of the 2-component register.
4538   B.buildInstr(AMDGPU::REG_SEQUENCE)
4539     .addDef(RSrcHi)
4540     .addReg(RSrc2)
4541     .addImm(AMDGPU::sub0)
4542     .addReg(RSrc3)
4543     .addImm(AMDGPU::sub1);
4544 
4545   Register RSrcLo = BasePtr;
4546   if (!BasePtr) {
4547     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4548     B.buildInstr(AMDGPU::S_MOV_B64)
4549       .addDef(RSrcLo)
4550       .addImm(0);
4551   }
4552 
4553   B.buildInstr(AMDGPU::REG_SEQUENCE)
4554     .addDef(RSrc)
4555     .addReg(RSrcLo)
4556     .addImm(AMDGPU::sub0_sub1)
4557     .addReg(RSrcHi)
4558     .addImm(AMDGPU::sub2_sub3);
4559 
4560   return RSrc;
4561 }
4562 
4563 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4564                                 const SIInstrInfo &TII, Register BasePtr) {
4565   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4566 
4567   // FIXME: Why are half the "default" bits ignored based on the addressing
4568   // mode?
4569   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4570 }
4571 
4572 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4573                                const SIInstrInfo &TII, Register BasePtr) {
4574   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4575 
4576   // FIXME: Why are half the "default" bits ignored based on the addressing
4577   // mode?
4578   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4579 }
4580 
4581 AMDGPUInstructionSelector::MUBUFAddressData
4582 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4583   MUBUFAddressData Data;
4584   Data.N0 = Src;
4585 
4586   Register PtrBase;
4587   int64_t Offset;
4588 
4589   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4590   if (isUInt<32>(Offset)) {
4591     Data.N0 = PtrBase;
4592     Data.Offset = Offset;
4593   }
4594 
4595   if (MachineInstr *InputAdd
4596       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4597     Data.N2 = InputAdd->getOperand(1).getReg();
4598     Data.N3 = InputAdd->getOperand(2).getReg();
4599 
4600     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4601     // FIXME: Don't know this was defined by operand 0
4602     //
4603     // TODO: Remove this when we have copy folding optimizations after
4604     // RegBankSelect.
4605     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4606     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4607   }
4608 
4609   return Data;
4610 }
4611 
4612 /// Return if the addr64 mubuf mode should be used for the given address.
4613 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4614   // (ptr_add N2, N3) -> addr64, or
4615   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4616   if (Addr.N2)
4617     return true;
4618 
4619   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4620   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4621 }
4622 
4623 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4624 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4625 /// component.
4626 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4627   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4628   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4629     return;
4630 
4631   // Illegal offset, store it in soffset.
4632   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4633   B.buildInstr(AMDGPU::S_MOV_B32)
4634     .addDef(SOffset)
4635     .addImm(ImmOffset);
4636   ImmOffset = 0;
4637 }
4638 
4639 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4640   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4641   Register &SOffset, int64_t &Offset) const {
4642   // FIXME: Predicates should stop this from reaching here.
4643   // addr64 bit was removed for volcanic islands.
4644   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4645     return false;
4646 
4647   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4648   if (!shouldUseAddr64(AddrData))
4649     return false;
4650 
4651   Register N0 = AddrData.N0;
4652   Register N2 = AddrData.N2;
4653   Register N3 = AddrData.N3;
4654   Offset = AddrData.Offset;
4655 
4656   // Base pointer for the SRD.
4657   Register SRDPtr;
4658 
4659   if (N2) {
4660     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4661       assert(N3);
4662       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4663         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4664         // addr64, and construct the default resource from a 0 address.
4665         VAddr = N0;
4666       } else {
4667         SRDPtr = N3;
4668         VAddr = N2;
4669       }
4670     } else {
4671       // N2 is not divergent.
4672       SRDPtr = N2;
4673       VAddr = N3;
4674     }
4675   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4676     // Use the default null pointer in the resource
4677     VAddr = N0;
4678   } else {
4679     // N0 -> offset, or
4680     // (N0 + C1) -> offset
4681     SRDPtr = N0;
4682   }
4683 
4684   MachineIRBuilder B(*Root.getParent());
4685   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4686   splitIllegalMUBUFOffset(B, SOffset, Offset);
4687   return true;
4688 }
4689 
4690 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4691   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4692   int64_t &Offset) const {
4693 
4694   // FIXME: Pattern should not reach here.
4695   if (STI.useFlatForGlobal())
4696     return false;
4697 
4698   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4699   if (shouldUseAddr64(AddrData))
4700     return false;
4701 
4702   // N0 -> offset, or
4703   // (N0 + C1) -> offset
4704   Register SRDPtr = AddrData.N0;
4705   Offset = AddrData.Offset;
4706 
4707   // TODO: Look through extensions for 32-bit soffset.
4708   MachineIRBuilder B(*Root.getParent());
4709 
4710   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4711   splitIllegalMUBUFOffset(B, SOffset, Offset);
4712   return true;
4713 }
4714 
4715 InstructionSelector::ComplexRendererFns
4716 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4717   Register VAddr;
4718   Register RSrcReg;
4719   Register SOffset;
4720   int64_t Offset = 0;
4721 
4722   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4723     return {};
4724 
4725   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4726   // pattern.
4727   return {{
4728       [=](MachineInstrBuilder &MIB) {  // rsrc
4729         MIB.addReg(RSrcReg);
4730       },
4731       [=](MachineInstrBuilder &MIB) { // vaddr
4732         MIB.addReg(VAddr);
4733       },
4734       [=](MachineInstrBuilder &MIB) { // soffset
4735         if (SOffset)
4736           MIB.addReg(SOffset);
4737         else
4738           MIB.addImm(0);
4739       },
4740       [=](MachineInstrBuilder &MIB) { // offset
4741         MIB.addImm(Offset);
4742       },
4743       addZeroImm, //  cpol
4744       addZeroImm, //  tfe
4745       addZeroImm  //  swz
4746     }};
4747 }
4748 
4749 InstructionSelector::ComplexRendererFns
4750 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4751   Register RSrcReg;
4752   Register SOffset;
4753   int64_t Offset = 0;
4754 
4755   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4756     return {};
4757 
4758   return {{
4759       [=](MachineInstrBuilder &MIB) {  // rsrc
4760         MIB.addReg(RSrcReg);
4761       },
4762       [=](MachineInstrBuilder &MIB) { // soffset
4763         if (SOffset)
4764           MIB.addReg(SOffset);
4765         else
4766           MIB.addImm(0);
4767       },
4768       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4769       addZeroImm, //  cpol
4770       addZeroImm, //  tfe
4771       addZeroImm, //  swz
4772     }};
4773 }
4774 
4775 /// Get an immediate that must be 32-bits, and treated as zero extended.
4776 static std::optional<uint64_t>
4777 getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
4778   // getIConstantVRegVal sexts any values, so see if that matters.
4779   std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4780   if (!OffsetVal || !isInt<32>(*OffsetVal))
4781     return std::nullopt;
4782   return Lo_32(*OffsetVal);
4783 }
4784 
4785 InstructionSelector::ComplexRendererFns
4786 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4787   std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4788   if (!OffsetVal)
4789     return {};
4790 
4791   std::optional<int64_t> EncodedImm =
4792       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4793   if (!EncodedImm)
4794     return {};
4795 
4796   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4797 }
4798 
4799 InstructionSelector::ComplexRendererFns
4800 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4801   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4802 
4803   std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4804   if (!OffsetVal)
4805     return {};
4806 
4807   std::optional<int64_t> EncodedImm =
4808       AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4809   if (!EncodedImm)
4810     return {};
4811 
4812   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4813 }
4814 
4815 InstructionSelector::ComplexRendererFns
4816 AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
4817   // Match the (soffset + offset) pair as a 32-bit register base and
4818   // an immediate offset.
4819   Register SOffset;
4820   unsigned Offset;
4821   std::tie(SOffset, Offset) =
4822       AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KB);
4823   if (!SOffset)
4824     return std::nullopt;
4825 
4826   std::optional<int64_t> EncodedOffset =
4827       AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
4828   if (!EncodedOffset)
4829     return std::nullopt;
4830 
4831   assert(MRI->getType(SOffset) == LLT::scalar(32));
4832   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
4833            [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}};
4834 }
4835 
4836 // Variant of stripBitCast that returns the instruction instead of a
4837 // MachineOperand.
4838 static MachineInstr *stripBitCast(MachineInstr *MI, MachineRegisterInfo &MRI) {
4839   if (MI->getOpcode() == AMDGPU::G_BITCAST)
4840     return getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
4841   return MI;
4842 }
4843 
4844 // Figure out if this is really an extract of the high 16-bits of a dword,
4845 // returns nullptr if it isn't.
4846 static MachineInstr *isExtractHiElt(MachineInstr *Inst,
4847                                     MachineRegisterInfo &MRI) {
4848   Inst = stripBitCast(Inst, MRI);
4849 
4850   if (Inst->getOpcode() != AMDGPU::G_TRUNC)
4851     return nullptr;
4852 
4853   MachineInstr *TruncOp =
4854       getDefIgnoringCopies(Inst->getOperand(1).getReg(), MRI);
4855   TruncOp = stripBitCast(TruncOp, MRI);
4856 
4857   // G_LSHR x, (G_CONSTANT i32 16)
4858   if (TruncOp->getOpcode() == AMDGPU::G_LSHR) {
4859     auto SrlAmount = getIConstantVRegValWithLookThrough(
4860         TruncOp->getOperand(2).getReg(), MRI);
4861     if (SrlAmount && SrlAmount->Value.getZExtValue() == 16) {
4862       MachineInstr *SrlOp =
4863           getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
4864       return stripBitCast(SrlOp, MRI);
4865     }
4866   }
4867 
4868   // G_SHUFFLE_VECTOR x, y, shufflemask(1, 1|0)
4869   //    1, 0 swaps the low/high 16 bits.
4870   //    1, 1 sets the high 16 bits to be the same as the low 16.
4871   // in any case, it selects the high elts.
4872   if (TruncOp->getOpcode() == AMDGPU::G_SHUFFLE_VECTOR) {
4873     assert(MRI.getType(TruncOp->getOperand(0).getReg()) ==
4874            LLT::fixed_vector(2, 16));
4875 
4876     ArrayRef<int> Mask = TruncOp->getOperand(3).getShuffleMask();
4877     assert(Mask.size() == 2);
4878 
4879     if (Mask[0] == 1 && Mask[1] <= 1) {
4880       MachineInstr *LHS =
4881           getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
4882       return stripBitCast(LHS, MRI);
4883     }
4884   }
4885 
4886   return nullptr;
4887 }
4888 
4889 std::pair<Register, unsigned>
4890 AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
4891                                                      bool &Matched) const {
4892   Matched = false;
4893 
4894   Register Src;
4895   unsigned Mods;
4896   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
4897 
4898   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
4899   if (MI->getOpcode() == AMDGPU::G_FPEXT) {
4900     MachineOperand *MO = &MI->getOperand(1);
4901     Src = MO->getReg();
4902     MI = getDefIgnoringCopies(Src, *MRI);
4903 
4904     assert(MRI->getType(Src) == LLT::scalar(16));
4905 
4906     // See through bitcasts.
4907     // FIXME: Would be nice to use stripBitCast here.
4908     if (MI->getOpcode() == AMDGPU::G_BITCAST) {
4909       MO = &MI->getOperand(1);
4910       Src = MO->getReg();
4911       MI = getDefIgnoringCopies(Src, *MRI);
4912     }
4913 
4914     const auto CheckAbsNeg = [&]() {
4915       // Be careful about folding modifiers if we already have an abs. fneg is
4916       // applied last, so we don't want to apply an earlier fneg.
4917       if ((Mods & SISrcMods::ABS) == 0) {
4918         unsigned ModsTmp;
4919         std::tie(Src, ModsTmp) = selectVOP3ModsImpl(*MO);
4920         MI = getDefIgnoringCopies(Src, *MRI);
4921 
4922         if ((ModsTmp & SISrcMods::NEG) != 0)
4923           Mods ^= SISrcMods::NEG;
4924 
4925         if ((ModsTmp & SISrcMods::ABS) != 0)
4926           Mods |= SISrcMods::ABS;
4927       }
4928     };
4929 
4930     CheckAbsNeg();
4931 
4932     // op_sel/op_sel_hi decide the source type and source.
4933     // If the source's op_sel_hi is set, it indicates to do a conversion from
4934     // fp16. If the sources's op_sel is set, it picks the high half of the
4935     // source register.
4936 
4937     Mods |= SISrcMods::OP_SEL_1;
4938 
4939     if (MachineInstr *ExtractHiEltMI = isExtractHiElt(MI, *MRI)) {
4940       Mods |= SISrcMods::OP_SEL_0;
4941       MI = ExtractHiEltMI;
4942       MO = &MI->getOperand(0);
4943       Src = MO->getReg();
4944 
4945       CheckAbsNeg();
4946     }
4947 
4948     Matched = true;
4949   }
4950 
4951   return {Src, Mods};
4952 }
4953 
4954 InstructionSelector::ComplexRendererFns
4955 AMDGPUInstructionSelector::selectVOP3PMadMixModsExt(
4956     MachineOperand &Root) const {
4957   Register Src;
4958   unsigned Mods;
4959   bool Matched;
4960   std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
4961   if (!Matched)
4962     return {};
4963 
4964   return {{
4965       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
4966       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
4967   }};
4968 }
4969 
4970 InstructionSelector::ComplexRendererFns
4971 AMDGPUInstructionSelector::selectVOP3PMadMixMods(MachineOperand &Root) const {
4972   Register Src;
4973   unsigned Mods;
4974   bool Matched;
4975   std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
4976 
4977   return {{
4978       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
4979       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
4980   }};
4981 }
4982 
4983 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4984                                                  const MachineInstr &MI,
4985                                                  int OpIdx) const {
4986   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4987          "Expected G_CONSTANT");
4988   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4989 }
4990 
4991 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4992                                                 const MachineInstr &MI,
4993                                                 int OpIdx) const {
4994   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4995          "Expected G_CONSTANT");
4996   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4997 }
4998 
4999 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
5000                                                  const MachineInstr &MI,
5001                                                  int OpIdx) const {
5002   assert(OpIdx == -1);
5003 
5004   const MachineOperand &Op = MI.getOperand(1);
5005   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
5006     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
5007   else {
5008     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
5009     MIB.addImm(Op.getCImm()->getSExtValue());
5010   }
5011 }
5012 
5013 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
5014                                                 const MachineInstr &MI,
5015                                                 int OpIdx) const {
5016   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5017          "Expected G_CONSTANT");
5018   MIB.addImm(MI.getOperand(1).getCImm()->getValue().popcount());
5019 }
5020 
5021 /// This only really exists to satisfy DAG type checking machinery, so is a
5022 /// no-op here.
5023 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
5024                                                 const MachineInstr &MI,
5025                                                 int OpIdx) const {
5026   MIB.addImm(MI.getOperand(OpIdx).getImm());
5027 }
5028 
5029 void AMDGPUInstructionSelector::renderOpSelTImm(MachineInstrBuilder &MIB,
5030                                                 const MachineInstr &MI,
5031                                                 int OpIdx) const {
5032   assert(OpIdx >= 0 && "expected to match an immediate operand");
5033   MIB.addImm(MI.getOperand(OpIdx).getImm() ? (int64_t)SISrcMods::OP_SEL_0 : 0);
5034 }
5035 
5036 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
5037                                                   const MachineInstr &MI,
5038                                                   int OpIdx) const {
5039   assert(OpIdx >= 0 && "expected to match an immediate operand");
5040   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
5041 }
5042 
5043 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
5044                                                  const MachineInstr &MI,
5045                                                  int OpIdx) const {
5046   assert(OpIdx >= 0 && "expected to match an immediate operand");
5047   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
5048 }
5049 
5050 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
5051                                              const MachineInstr &MI,
5052                                              int OpIdx) const {
5053   assert(OpIdx >= 0 && "expected to match an immediate operand");
5054   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
5055 }
5056 
5057 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
5058                                                  const MachineInstr &MI,
5059                                                  int OpIdx) const {
5060   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
5061 }
5062 
5063 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
5064   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
5065 }
5066 
5067 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
5068   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
5069 }
5070 
5071 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
5072   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
5073 }
5074 
5075 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
5076   return TII.isInlineConstant(Imm);
5077 }
5078