1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// SI implementation of the TargetRegisterInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SIRegisterInfo.h"
15 #include "AMDGPURegisterBankInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "MCTargetDesc/AMDGPUInstPrinter.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/SlotIndexes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include <vector>
30 
31 using namespace llvm;
32 
33 #define GET_REGINFO_TARGET_DESC
34 #include "AMDGPUGenRegisterInfo.inc"
35 
36 static cl::opt<bool> EnableSpillSGPRToVGPR(
37   "amdgpu-spill-sgpr-to-vgpr",
38   cl::desc("Enable spilling VGPRs to SGPRs"),
39   cl::ReallyHidden,
40   cl::init(true));
41 
42 std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
43 
44 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
45     : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
46       SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
47 
48   assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
49          getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&
50          (getSubRegIndexLaneMask(AMDGPU::lo16) |
51           getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==
52            getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
53          "getNumCoveredRegs() will not work with generated subreg masks!");
54 
55   RegPressureIgnoredUnits.resize(getNumRegUnits());
56   RegPressureIgnoredUnits.set(*MCRegUnitIterator(AMDGPU::M0, this));
57   for (auto Reg : AMDGPU::VGPR_HI16RegClass)
58     RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
59 
60   // HACK: Until this is fully tablegen'd.
61   static llvm::once_flag InitializeRegSplitPartsFlag;
62 
63   static auto InitializeRegSplitPartsOnce = [this]() {
64     for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
65       unsigned Size = getSubRegIdxSize(Idx);
66       if (Size & 31)
67         continue;
68       std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
69       unsigned Pos = getSubRegIdxOffset(Idx);
70       if (Pos % Size)
71         continue;
72       Pos /= Size;
73       if (Vec.empty()) {
74         unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
75         Vec.resize(MaxNumParts);
76       }
77       Vec[Pos] = Idx;
78     }
79   };
80 
81 
82   llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
83 }
84 
85 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
86                                            MCRegister Reg) const {
87   MCRegAliasIterator R(Reg, this, true);
88 
89   for (; R.isValid(); ++R)
90     Reserved.set(*R);
91 }
92 
93 // Forced to be here by one .inc
94 const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
95   const MachineFunction *MF) const {
96   CallingConv::ID CC = MF->getFunction().getCallingConv();
97   switch (CC) {
98   case CallingConv::C:
99   case CallingConv::Fast:
100   case CallingConv::Cold:
101     return CSR_AMDGPU_HighRegs_SaveList;
102   default: {
103     // Dummy to not crash RegisterClassInfo.
104     static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
105     return &NoCalleeSavedReg;
106   }
107   }
108 }
109 
110 const MCPhysReg *
111 SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
112   return nullptr;
113 }
114 
115 const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
116                                                      CallingConv::ID CC) const {
117   switch (CC) {
118   case CallingConv::C:
119   case CallingConv::Fast:
120   case CallingConv::Cold:
121     return CSR_AMDGPU_HighRegs_RegMask;
122   default:
123     return nullptr;
124   }
125 }
126 
127 Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
128   const SIFrameLowering *TFI =
129       MF.getSubtarget<GCNSubtarget>().getFrameLowering();
130   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
131   // During ISel lowering we always reserve the stack pointer in entry
132   // functions, but never actually want to reference it when accessing our own
133   // frame. If we need a frame pointer we use it, but otherwise we can just use
134   // an immediate "0" which we represent by returning NoRegister.
135   if (FuncInfo->isEntryFunction()) {
136     return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
137   }
138   return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
139                         : FuncInfo->getStackPtrOffsetReg();
140 }
141 
142 bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
143   // When we need stack realignment, we can't reference off of the
144   // stack pointer, so we reserve a base pointer.
145   const MachineFrameInfo &MFI = MF.getFrameInfo();
146   return MFI.getNumFixedObjects() && needsStackRealignment(MF);
147 }
148 
149 Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
150 
151 const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
152   return CSR_AMDGPU_AllVGPRs_RegMask;
153 }
154 
155 const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
156   return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
157 }
158 
159 // FIXME: TableGen should generate something to make this manageable for all
160 // register classes. At a minimum we could use the opposite of
161 // composeSubRegIndices and go up from the base 32-bit subreg.
162 unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
163                                               unsigned NumRegs) {
164   // Table of NumRegs sized pieces at every 32-bit offset.
165   static const uint16_t SubRegFromChannelTable[][32] = {
166       {AMDGPU::sub0,  AMDGPU::sub1,  AMDGPU::sub2,  AMDGPU::sub3,
167        AMDGPU::sub4,  AMDGPU::sub5,  AMDGPU::sub6,  AMDGPU::sub7,
168        AMDGPU::sub8,  AMDGPU::sub9,  AMDGPU::sub10, AMDGPU::sub11,
169        AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
170        AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19,
171        AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23,
172        AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27,
173        AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31},
174       {AMDGPU::sub0_sub1,   AMDGPU::sub1_sub2,    AMDGPU::sub2_sub3,
175        AMDGPU::sub3_sub4,   AMDGPU::sub4_sub5,    AMDGPU::sub5_sub6,
176        AMDGPU::sub6_sub7,   AMDGPU::sub7_sub8,    AMDGPU::sub8_sub9,
177        AMDGPU::sub9_sub10,  AMDGPU::sub10_sub11,  AMDGPU::sub11_sub12,
178        AMDGPU::sub12_sub13, AMDGPU::sub13_sub14,  AMDGPU::sub14_sub15,
179        AMDGPU::sub15_sub16, AMDGPU::sub16_sub17,  AMDGPU::sub17_sub18,
180        AMDGPU::sub18_sub19, AMDGPU::sub19_sub20,  AMDGPU::sub20_sub21,
181        AMDGPU::sub21_sub22, AMDGPU::sub22_sub23,  AMDGPU::sub23_sub24,
182        AMDGPU::sub24_sub25, AMDGPU::sub25_sub26,  AMDGPU::sub26_sub27,
183        AMDGPU::sub27_sub28, AMDGPU::sub28_sub29,  AMDGPU::sub29_sub30,
184        AMDGPU::sub30_sub31, AMDGPU::NoSubRegister},
185       {AMDGPU::sub0_sub1_sub2,    AMDGPU::sub1_sub2_sub3,
186        AMDGPU::sub2_sub3_sub4,    AMDGPU::sub3_sub4_sub5,
187        AMDGPU::sub4_sub5_sub6,    AMDGPU::sub5_sub6_sub7,
188        AMDGPU::sub6_sub7_sub8,    AMDGPU::sub7_sub8_sub9,
189        AMDGPU::sub8_sub9_sub10,   AMDGPU::sub9_sub10_sub11,
190        AMDGPU::sub10_sub11_sub12, AMDGPU::sub11_sub12_sub13,
191        AMDGPU::sub12_sub13_sub14, AMDGPU::sub13_sub14_sub15,
192        AMDGPU::sub14_sub15_sub16, AMDGPU::sub15_sub16_sub17,
193        AMDGPU::sub16_sub17_sub18, AMDGPU::sub17_sub18_sub19,
194        AMDGPU::sub18_sub19_sub20, AMDGPU::sub19_sub20_sub21,
195        AMDGPU::sub20_sub21_sub22, AMDGPU::sub21_sub22_sub23,
196        AMDGPU::sub22_sub23_sub24, AMDGPU::sub23_sub24_sub25,
197        AMDGPU::sub24_sub25_sub26, AMDGPU::sub25_sub26_sub27,
198        AMDGPU::sub26_sub27_sub28, AMDGPU::sub27_sub28_sub29,
199        AMDGPU::sub28_sub29_sub30, AMDGPU::sub29_sub30_sub31,
200        AMDGPU::NoSubRegister,     AMDGPU::NoSubRegister},
201       {AMDGPU::sub0_sub1_sub2_sub3,     AMDGPU::sub1_sub2_sub3_sub4,
202        AMDGPU::sub2_sub3_sub4_sub5,     AMDGPU::sub3_sub4_sub5_sub6,
203        AMDGPU::sub4_sub5_sub6_sub7,     AMDGPU::sub5_sub6_sub7_sub8,
204        AMDGPU::sub6_sub7_sub8_sub9,     AMDGPU::sub7_sub8_sub9_sub10,
205        AMDGPU::sub8_sub9_sub10_sub11,   AMDGPU::sub9_sub10_sub11_sub12,
206        AMDGPU::sub10_sub11_sub12_sub13, AMDGPU::sub11_sub12_sub13_sub14,
207        AMDGPU::sub12_sub13_sub14_sub15, AMDGPU::sub13_sub14_sub15_sub16,
208        AMDGPU::sub14_sub15_sub16_sub17, AMDGPU::sub15_sub16_sub17_sub18,
209        AMDGPU::sub16_sub17_sub18_sub19, AMDGPU::sub17_sub18_sub19_sub20,
210        AMDGPU::sub18_sub19_sub20_sub21, AMDGPU::sub19_sub20_sub21_sub22,
211        AMDGPU::sub20_sub21_sub22_sub23, AMDGPU::sub21_sub22_sub23_sub24,
212        AMDGPU::sub22_sub23_sub24_sub25, AMDGPU::sub23_sub24_sub25_sub26,
213        AMDGPU::sub24_sub25_sub26_sub27, AMDGPU::sub25_sub26_sub27_sub28,
214        AMDGPU::sub26_sub27_sub28_sub29, AMDGPU::sub27_sub28_sub29_sub30,
215        AMDGPU::sub28_sub29_sub30_sub31, AMDGPU::NoSubRegister,
216        AMDGPU::NoSubRegister,           AMDGPU::NoSubRegister}};
217 
218   const unsigned NumRegIndex = NumRegs - 1;
219 
220   assert(NumRegIndex < array_lengthof(SubRegFromChannelTable) &&
221          "Not implemented");
222   assert(Channel < array_lengthof(SubRegFromChannelTable[0]));
223   return SubRegFromChannelTable[NumRegIndex][Channel];
224 }
225 
226 MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
227   const MachineFunction &MF) const {
228   unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
229   MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
230   return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
231 }
232 
233 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
234   BitVector Reserved(getNumRegs());
235   Reserved.set(AMDGPU::MODE);
236 
237   // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
238   // this seems likely to result in bugs, so I'm marking them as reserved.
239   reserveRegisterTuples(Reserved, AMDGPU::EXEC);
240   reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
241 
242   // M0 has to be reserved so that llvm accepts it as a live-in into a block.
243   reserveRegisterTuples(Reserved, AMDGPU::M0);
244 
245   // Reserve src_vccz, src_execz, src_scc.
246   reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
247   reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
248   reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
249 
250   // Reserve the memory aperture registers.
251   reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
252   reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
253   reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
254   reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
255 
256   // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
257   reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
258 
259   // Reserve xnack_mask registers - support is not implemented in Codegen.
260   reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
261 
262   // Reserve lds_direct register - support is not implemented in Codegen.
263   reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
264 
265   // Reserve Trap Handler registers - support is not implemented in Codegen.
266   reserveRegisterTuples(Reserved, AMDGPU::TBA);
267   reserveRegisterTuples(Reserved, AMDGPU::TMA);
268   reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
269   reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
270   reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
271   reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
272   reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
273   reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
274   reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
275   reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
276 
277   // Reserve null register - it shall never be allocated
278   reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
279 
280   // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
281   // will result in bugs.
282   if (isWave32) {
283     Reserved.set(AMDGPU::VCC);
284     Reserved.set(AMDGPU::VCC_HI);
285   }
286 
287   unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
288   unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
289   for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
290     unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
291     reserveRegisterTuples(Reserved, Reg);
292   }
293 
294   unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
295   unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
296   for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
297     unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
298     reserveRegisterTuples(Reserved, Reg);
299     Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
300     reserveRegisterTuples(Reserved, Reg);
301   }
302 
303   for (auto Reg : AMDGPU::SReg_32RegClass) {
304     Reserved.set(getSubReg(Reg, AMDGPU::hi16));
305     Register Low = getSubReg(Reg, AMDGPU::lo16);
306     // This is to prevent BB vcc liveness errors.
307     if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
308       Reserved.set(Low);
309   }
310 
311   for (auto Reg : AMDGPU::AGPR_32RegClass) {
312     Reserved.set(getSubReg(Reg, AMDGPU::hi16));
313   }
314 
315   // Reserve all the rest AGPRs if there are no instructions to use it.
316   if (!ST.hasMAIInsts()) {
317     for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
318       unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
319       reserveRegisterTuples(Reserved, Reg);
320     }
321   }
322 
323   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
324 
325   unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
326   if (ScratchRSrcReg != AMDGPU::NoRegister) {
327     // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
328     // to spill.
329     // TODO: May need to reserve a VGPR if doing LDS spilling.
330     reserveRegisterTuples(Reserved, ScratchRSrcReg);
331   }
332 
333   // We have to assume the SP is needed in case there are calls in the function,
334   // which is detected after the function is lowered. If we aren't really going
335   // to need SP, don't bother reserving it.
336   MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
337 
338   if (StackPtrReg) {
339     reserveRegisterTuples(Reserved, StackPtrReg);
340     assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
341   }
342 
343   MCRegister FrameReg = MFI->getFrameOffsetReg();
344   if (FrameReg) {
345     reserveRegisterTuples(Reserved, FrameReg);
346     assert(!isSubRegister(ScratchRSrcReg, FrameReg));
347   }
348 
349   if (hasBasePointer(MF)) {
350     MCRegister BasePtrReg = getBaseRegister();
351     reserveRegisterTuples(Reserved, BasePtrReg);
352     assert(!isSubRegister(ScratchRSrcReg, BasePtrReg));
353   }
354 
355   for (MCRegister Reg : MFI->WWMReservedRegs) {
356     reserveRegisterTuples(Reserved, Reg);
357   }
358 
359   // FIXME: Stop using reserved registers for this.
360   for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
361     reserveRegisterTuples(Reserved, Reg);
362 
363   for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
364     reserveRegisterTuples(Reserved, Reg);
365 
366   if (MFI->VGPRReservedForSGPRSpill)
367     for (auto SSpill : MFI->getSGPRSpillVGPRs())
368       reserveRegisterTuples(Reserved, SSpill.VGPR);
369 
370   return Reserved;
371 }
372 
373 bool SIRegisterInfo::canRealignStack(const MachineFunction &MF) const {
374   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
375   // On entry, the base address is 0, so it can't possibly need any more
376   // alignment.
377 
378   // FIXME: Should be able to specify the entry frame alignment per calling
379   // convention instead.
380   if (Info->isEntryFunction())
381     return false;
382 
383   return TargetRegisterInfo::canRealignStack(MF);
384 }
385 
386 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
387   const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
388   if (Info->isEntryFunction()) {
389     const MachineFrameInfo &MFI = Fn.getFrameInfo();
390     return MFI.hasStackObjects() || MFI.hasCalls();
391   }
392 
393   // May need scavenger for dealing with callee saved registers.
394   return true;
395 }
396 
397 bool SIRegisterInfo::requiresFrameIndexScavenging(
398   const MachineFunction &MF) const {
399   // Do not use frame virtual registers. They used to be used for SGPRs, but
400   // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
401   // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
402   // spill.
403   return false;
404 }
405 
406 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
407   const MachineFunction &MF) const {
408   const MachineFrameInfo &MFI = MF.getFrameInfo();
409   return MFI.hasStackObjects();
410 }
411 
412 bool SIRegisterInfo::requiresVirtualBaseRegisters(
413   const MachineFunction &) const {
414   // There are no special dedicated stack or frame pointers.
415   return true;
416 }
417 
418 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr *MI) const {
419   assert(SIInstrInfo::isMUBUF(*MI));
420 
421   int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
422                                           AMDGPU::OpName::offset);
423   return MI->getOperand(OffIdx).getImm();
424 }
425 
426 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
427                                                  int Idx) const {
428   if (!SIInstrInfo::isMUBUF(*MI))
429     return 0;
430 
431   assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
432                                            AMDGPU::OpName::vaddr) &&
433          "Should never see frame index on non-address operand");
434 
435   return getMUBUFInstrOffset(MI);
436 }
437 
438 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
439   if (!MI->mayLoadOrStore())
440     return false;
441 
442   int64_t FullOffset = Offset + getMUBUFInstrOffset(MI);
443 
444   return !isUInt<12>(FullOffset);
445 }
446 
447 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
448                                                   Register BaseReg,
449                                                   int FrameIdx,
450                                                   int64_t Offset) const {
451   MachineBasicBlock::iterator Ins = MBB->begin();
452   DebugLoc DL; // Defaults to "unknown"
453 
454   if (Ins != MBB->end())
455     DL = Ins->getDebugLoc();
456 
457   MachineFunction *MF = MBB->getParent();
458   const SIInstrInfo *TII = ST.getInstrInfo();
459 
460   if (Offset == 0) {
461     BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
462       .addFrameIndex(FrameIdx);
463     return;
464   }
465 
466   MachineRegisterInfo &MRI = MF->getRegInfo();
467   Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
468 
469   Register FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
470 
471   BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
472     .addImm(Offset);
473   BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
474     .addFrameIndex(FrameIdx);
475 
476   TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
477     .addReg(OffsetReg, RegState::Kill)
478     .addReg(FIReg)
479     .addImm(0); // clamp bit
480 }
481 
482 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
483                                        int64_t Offset) const {
484   const SIInstrInfo *TII = ST.getInstrInfo();
485 
486 #ifndef NDEBUG
487   // FIXME: Is it possible to be storing a frame index to itself?
488   bool SeenFI = false;
489   for (const MachineOperand &MO: MI.operands()) {
490     if (MO.isFI()) {
491       if (SeenFI)
492         llvm_unreachable("should not see multiple frame indices");
493 
494       SeenFI = true;
495     }
496   }
497 #endif
498 
499   MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
500 #ifndef NDEBUG
501   MachineBasicBlock *MBB = MI.getParent();
502   MachineFunction *MF = MBB->getParent();
503 #endif
504   assert(FIOp && FIOp->isFI() && "frame index must be address operand");
505   assert(TII->isMUBUF(MI));
506   assert(TII->getNamedOperand(MI, AMDGPU::OpName::soffset)->getReg() ==
507          MF->getInfo<SIMachineFunctionInfo>()->getStackPtrOffsetReg() &&
508          "should only be seeing stack pointer offset relative FrameIndex");
509 
510   MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
511   int64_t NewOffset = OffsetOp->getImm() + Offset;
512   assert(isUInt<12>(NewOffset) && "offset should be legal");
513 
514   FIOp->ChangeToRegister(BaseReg, false);
515   OffsetOp->setImm(NewOffset);
516 }
517 
518 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
519                                         Register BaseReg,
520                                         int64_t Offset) const {
521   if (!SIInstrInfo::isMUBUF(*MI))
522     return false;
523 
524   int64_t NewOffset = Offset + getMUBUFInstrOffset(MI);
525 
526   return isUInt<12>(NewOffset);
527 }
528 
529 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
530   const MachineFunction &MF, unsigned Kind) const {
531   // This is inaccurate. It depends on the instruction and address space. The
532   // only place where we should hit this is for dealing with frame indexes /
533   // private accesses, so this is correct in that case.
534   return &AMDGPU::VGPR_32RegClass;
535 }
536 
537 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
538 
539   switch (Op) {
540   case AMDGPU::SI_SPILL_S1024_SAVE:
541   case AMDGPU::SI_SPILL_S1024_RESTORE:
542   case AMDGPU::SI_SPILL_V1024_SAVE:
543   case AMDGPU::SI_SPILL_V1024_RESTORE:
544   case AMDGPU::SI_SPILL_A1024_SAVE:
545   case AMDGPU::SI_SPILL_A1024_RESTORE:
546     return 32;
547   case AMDGPU::SI_SPILL_S512_SAVE:
548   case AMDGPU::SI_SPILL_S512_RESTORE:
549   case AMDGPU::SI_SPILL_V512_SAVE:
550   case AMDGPU::SI_SPILL_V512_RESTORE:
551   case AMDGPU::SI_SPILL_A512_SAVE:
552   case AMDGPU::SI_SPILL_A512_RESTORE:
553     return 16;
554   case AMDGPU::SI_SPILL_S256_SAVE:
555   case AMDGPU::SI_SPILL_S256_RESTORE:
556   case AMDGPU::SI_SPILL_V256_SAVE:
557   case AMDGPU::SI_SPILL_V256_RESTORE:
558     return 8;
559   case AMDGPU::SI_SPILL_S192_SAVE:
560   case AMDGPU::SI_SPILL_S192_RESTORE:
561   case AMDGPU::SI_SPILL_V192_SAVE:
562   case AMDGPU::SI_SPILL_V192_RESTORE:
563     return 6;
564   case AMDGPU::SI_SPILL_S160_SAVE:
565   case AMDGPU::SI_SPILL_S160_RESTORE:
566   case AMDGPU::SI_SPILL_V160_SAVE:
567   case AMDGPU::SI_SPILL_V160_RESTORE:
568     return 5;
569   case AMDGPU::SI_SPILL_S128_SAVE:
570   case AMDGPU::SI_SPILL_S128_RESTORE:
571   case AMDGPU::SI_SPILL_V128_SAVE:
572   case AMDGPU::SI_SPILL_V128_RESTORE:
573   case AMDGPU::SI_SPILL_A128_SAVE:
574   case AMDGPU::SI_SPILL_A128_RESTORE:
575     return 4;
576   case AMDGPU::SI_SPILL_S96_SAVE:
577   case AMDGPU::SI_SPILL_S96_RESTORE:
578   case AMDGPU::SI_SPILL_V96_SAVE:
579   case AMDGPU::SI_SPILL_V96_RESTORE:
580     return 3;
581   case AMDGPU::SI_SPILL_S64_SAVE:
582   case AMDGPU::SI_SPILL_S64_RESTORE:
583   case AMDGPU::SI_SPILL_V64_SAVE:
584   case AMDGPU::SI_SPILL_V64_RESTORE:
585   case AMDGPU::SI_SPILL_A64_SAVE:
586   case AMDGPU::SI_SPILL_A64_RESTORE:
587     return 2;
588   case AMDGPU::SI_SPILL_S32_SAVE:
589   case AMDGPU::SI_SPILL_S32_RESTORE:
590   case AMDGPU::SI_SPILL_V32_SAVE:
591   case AMDGPU::SI_SPILL_V32_RESTORE:
592   case AMDGPU::SI_SPILL_A32_SAVE:
593   case AMDGPU::SI_SPILL_A32_RESTORE:
594     return 1;
595   default: llvm_unreachable("Invalid spill opcode");
596   }
597 }
598 
599 static int getOffsetMUBUFStore(unsigned Opc) {
600   switch (Opc) {
601   case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
602     return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
603   case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
604     return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
605   case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
606     return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
607   case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
608     return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
609   case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
610     return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
611   case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
612     return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
613   case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
614     return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
615   default:
616     return -1;
617   }
618 }
619 
620 static int getOffsetMUBUFLoad(unsigned Opc) {
621   switch (Opc) {
622   case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
623     return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
624   case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
625     return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
626   case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
627     return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
628   case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
629     return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
630   case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
631     return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
632   case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
633     return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
634   case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
635     return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
636   case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
637     return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
638   case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
639     return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
640   case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
641     return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
642   case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
643     return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
644   case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
645     return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
646   case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
647     return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
648   default:
649     return -1;
650   }
651 }
652 
653 static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
654                                            MachineBasicBlock::iterator MI,
655                                            int Index,
656                                            unsigned Lane,
657                                            unsigned ValueReg,
658                                            bool IsKill) {
659   MachineBasicBlock *MBB = MI->getParent();
660   MachineFunction *MF = MI->getParent()->getParent();
661   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
662   const SIInstrInfo *TII = ST.getInstrInfo();
663 
664   MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
665 
666   if (Reg == AMDGPU::NoRegister)
667     return MachineInstrBuilder();
668 
669   bool IsStore = MI->mayStore();
670   MachineRegisterInfo &MRI = MF->getRegInfo();
671   auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
672 
673   unsigned Dst = IsStore ? Reg : ValueReg;
674   unsigned Src = IsStore ? ValueReg : Reg;
675   unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32
676                                                    : AMDGPU::V_ACCVGPR_READ_B32;
677 
678   return BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst)
679            .addReg(Src, getKillRegState(IsKill));
680 }
681 
682 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
683 // need to handle the case where an SGPR may need to be spilled while spilling.
684 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
685                                       MachineFrameInfo &MFI,
686                                       MachineBasicBlock::iterator MI,
687                                       int Index,
688                                       int64_t Offset) {
689   const SIInstrInfo *TII = ST.getInstrInfo();
690   MachineBasicBlock *MBB = MI->getParent();
691   const DebugLoc &DL = MI->getDebugLoc();
692   bool IsStore = MI->mayStore();
693 
694   unsigned Opc = MI->getOpcode();
695   int LoadStoreOp = IsStore ?
696     getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
697   if (LoadStoreOp == -1)
698     return false;
699 
700   const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
701   if (spillVGPRtoAGPR(ST, MI, Index, 0, Reg->getReg(), false).getInstr())
702     return true;
703 
704   MachineInstrBuilder NewMI =
705       BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
706           .add(*Reg)
707           .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
708           .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
709           .addImm(Offset)
710           .addImm(0) // glc
711           .addImm(0) // slc
712           .addImm(0) // tfe
713           .addImm(0) // dlc
714           .addImm(0) // swz
715           .cloneMemRefs(*MI);
716 
717   const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
718                                                        AMDGPU::OpName::vdata_in);
719   if (VDataIn)
720     NewMI.add(*VDataIn);
721   return true;
722 }
723 
724 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
725                                          unsigned LoadStoreOp,
726                                          int Index,
727                                          Register ValueReg,
728                                          bool IsKill,
729                                          MCRegister ScratchRsrcReg,
730                                          MCRegister ScratchOffsetReg,
731                                          int64_t InstOffset,
732                                          MachineMemOperand *MMO,
733                                          RegScavenger *RS) const {
734   MachineBasicBlock *MBB = MI->getParent();
735   MachineFunction *MF = MI->getParent()->getParent();
736   const SIInstrInfo *TII = ST.getInstrInfo();
737   const MachineFrameInfo &MFI = MF->getFrameInfo();
738   const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
739 
740   const MCInstrDesc &Desc = TII->get(LoadStoreOp);
741   const DebugLoc &DL = MI->getDebugLoc();
742   bool IsStore = Desc.mayStore();
743 
744   bool Scavenged = false;
745   MCRegister SOffset = ScratchOffsetReg;
746 
747   const unsigned EltSize = 4;
748   const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
749   unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT);
750   unsigned Size = NumSubRegs * EltSize;
751   int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
752   int64_t ScratchOffsetRegDelta = 0;
753 
754   Align Alignment = MFI.getObjectAlign(Index);
755   const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
756 
757   Register TmpReg =
758     hasAGPRs(RC) ? TII->getNamedOperand(*MI, AMDGPU::OpName::tmp)->getReg()
759                  : Register();
760 
761   assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset");
762 
763   if (!isUInt<12>(Offset + Size - EltSize)) {
764     SOffset = MCRegister();
765 
766     // We currently only support spilling VGPRs to EltSize boundaries, meaning
767     // we can simplify the adjustment of Offset here to just scale with
768     // WavefrontSize.
769     Offset *= ST.getWavefrontSize();
770 
771     // We don't have access to the register scavenger if this function is called
772     // during  PEI::scavengeFrameVirtualRegs().
773     if (RS)
774       SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
775 
776     if (!SOffset) {
777       // There are no free SGPRs, and since we are in the process of spilling
778       // VGPRs too.  Since we need a VGPR in order to spill SGPRs (this is true
779       // on SI/CI and on VI it is true until we implement spilling using scalar
780       // stores), we have no way to free up an SGPR.  Our solution here is to
781       // add the offset directly to the ScratchOffset or StackPtrOffset
782       // register, and then subtract the offset after the spill to return the
783       // register to it's original value.
784       if (!ScratchOffsetReg)
785         ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
786       SOffset = ScratchOffsetReg;
787       ScratchOffsetRegDelta = Offset;
788     } else {
789       Scavenged = true;
790     }
791 
792     if (!SOffset)
793       report_fatal_error("could not scavenge SGPR to spill in entry function");
794 
795     if (ScratchOffsetReg == AMDGPU::NoRegister) {
796       BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset)
797           .addImm(Offset);
798     } else {
799       BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
800           .addReg(ScratchOffsetReg)
801           .addImm(Offset);
802     }
803 
804     Offset = 0;
805   }
806 
807   for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
808     Register SubReg = NumSubRegs == 1
809                           ? Register(ValueReg)
810                           : getSubReg(ValueReg, getSubRegFromChannel(i));
811 
812     unsigned SOffsetRegState = 0;
813     unsigned SrcDstRegState = getDefRegState(!IsStore);
814     if (i + 1 == e) {
815       SOffsetRegState |= getKillRegState(Scavenged);
816       // The last implicit use carries the "Kill" flag.
817       SrcDstRegState |= getKillRegState(IsKill);
818     }
819 
820     auto MIB = spillVGPRtoAGPR(ST, MI, Index, i, SubReg, IsKill);
821 
822     if (!MIB.getInstr()) {
823       unsigned FinalReg = SubReg;
824       if (TmpReg != AMDGPU::NoRegister) {
825         if (IsStore)
826           BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_READ_B32), TmpReg)
827             .addReg(SubReg, getKillRegState(IsKill));
828         SubReg = TmpReg;
829       }
830 
831       MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i);
832       MachineMemOperand *NewMMO =
833           MF->getMachineMemOperand(PInfo, MMO->getFlags(), EltSize,
834                                    commonAlignment(Alignment, EltSize * i));
835 
836       MIB = BuildMI(*MBB, MI, DL, Desc)
837                 .addReg(SubReg,
838                         getDefRegState(!IsStore) | getKillRegState(IsKill))
839                 .addReg(ScratchRsrcReg);
840       if (SOffset == AMDGPU::NoRegister) {
841         MIB.addImm(0);
842       } else {
843         MIB.addReg(SOffset, SOffsetRegState);
844       }
845       MIB.addImm(Offset)
846           .addImm(0) // glc
847           .addImm(0) // slc
848           .addImm(0) // tfe
849           .addImm(0) // dlc
850           .addImm(0) // swz
851           .addMemOperand(NewMMO);
852 
853       if (!IsStore && TmpReg != AMDGPU::NoRegister)
854         MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32),
855                       FinalReg)
856           .addReg(TmpReg, RegState::Kill);
857     }
858 
859     if (NumSubRegs > 1)
860       MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
861   }
862 
863   if (ScratchOffsetRegDelta != 0) {
864     // Subtract the offset we added to the ScratchOffset register.
865     BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset)
866         .addReg(SOffset)
867         .addImm(ScratchOffsetRegDelta);
868   }
869 }
870 
871 // Generate a VMEM access which loads or stores the VGPR containing an SGPR
872 // spill such that all the lanes set in VGPRLanes are loaded or stored.
873 // This generates exec mask manipulation and will use SGPRs available in MI
874 // or VGPR lanes in the VGPR to save and restore the exec mask.
875 void SIRegisterInfo::buildSGPRSpillLoadStore(MachineBasicBlock::iterator MI,
876                                              int Index, int Offset,
877                                              unsigned EltSize, Register VGPR,
878                                              int64_t VGPRLanes,
879                                              RegScavenger *RS,
880                                              bool IsLoad) const {
881   MachineBasicBlock *MBB = MI->getParent();
882   MachineFunction *MF = MBB->getParent();
883   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
884   const SIInstrInfo *TII = ST.getInstrInfo();
885 
886   Register SuperReg = MI->getOperand(0).getReg();
887   const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
888   ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
889   unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
890   unsigned FirstPart = Offset * 32;
891   unsigned ExecLane = 0;
892 
893   bool IsKill = MI->getOperand(0).isKill();
894   const DebugLoc &DL = MI->getDebugLoc();
895 
896   // Cannot handle load/store to EXEC
897   assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
898          SuperReg != AMDGPU::EXEC && "exec should never spill");
899 
900   // On Wave32 only handle EXEC_LO.
901   // On Wave64 only update EXEC_HI if there is sufficent space for a copy.
902   bool OnlyExecLo = isWave32 || NumSubRegs == 1 || SuperReg == AMDGPU::EXEC_HI;
903 
904   unsigned ExecMovOpc = OnlyExecLo ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
905   Register ExecReg = OnlyExecLo ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
906   Register SavedExecReg;
907 
908   // Backup EXEC
909   if (OnlyExecLo) {
910     SavedExecReg = NumSubRegs == 1
911                        ? SuperReg
912                        : getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]);
913   } else {
914     // If src/dst is an odd size it is possible subreg0 is not aligned.
915     for (; ExecLane < (NumSubRegs - 1); ++ExecLane) {
916       SavedExecReg = getMatchingSuperReg(
917           getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]), AMDGPU::sub0,
918           &AMDGPU::SReg_64_XEXECRegClass);
919       if (SavedExecReg)
920         break;
921     }
922   }
923   assert(SavedExecReg);
924   BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), SavedExecReg).addReg(ExecReg);
925 
926   // Setup EXEC
927   BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg).addImm(VGPRLanes);
928 
929   // Load/store VGPR
930   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
931   assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill);
932 
933   Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
934                           ? getBaseRegister()
935                           : getFrameRegister(*MF);
936 
937   Align Alignment = FrameInfo.getObjectAlign(Index);
938   MachinePointerInfo PtrInfo =
939       MachinePointerInfo::getFixedStack(*MF, Index);
940   MachineMemOperand *MMO = MF->getMachineMemOperand(
941       PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
942       EltSize, Alignment);
943 
944   if (IsLoad) {
945     buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
946           Index,
947           VGPR, false,
948           MFI->getScratchRSrcReg(), FrameReg,
949           Offset * EltSize, MMO,
950           RS);
951   } else {
952     buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET, Index, VGPR,
953                         IsKill, MFI->getScratchRSrcReg(), FrameReg,
954                         Offset * EltSize, MMO, RS);
955     // This only ever adds one VGPR spill
956     MFI->addToSpilledVGPRs(1);
957   }
958 
959   // Restore EXEC
960   BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg)
961       .addReg(SavedExecReg, getKillRegState(IsLoad || IsKill));
962 
963   // Restore clobbered SGPRs
964   if (IsLoad) {
965     // Nothing to do; register will be overwritten
966   } else if (!IsKill) {
967     // Restore SGPRs from appropriate VGPR lanes
968     if (!OnlyExecLo) {
969       BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
970               getSubReg(SuperReg, SplitParts[FirstPart + ExecLane + 1]))
971           .addReg(VGPR)
972           .addImm(ExecLane + 1);
973     }
974     BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
975             NumSubRegs == 1
976                 ? SavedExecReg
977                 : getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]))
978         .addReg(VGPR, RegState::Kill)
979         .addImm(ExecLane);
980   }
981 }
982 
983 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
984                                int Index,
985                                RegScavenger *RS,
986                                bool OnlyToVGPR) const {
987   MachineBasicBlock *MBB = MI->getParent();
988   MachineFunction *MF = MBB->getParent();
989   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
990   DenseSet<Register> SGPRSpillVGPRDefinedSet;
991 
992   ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
993     = MFI->getSGPRToVGPRSpills(Index);
994   bool SpillToVGPR = !VGPRSpills.empty();
995   if (OnlyToVGPR && !SpillToVGPR)
996     return false;
997 
998   const SIInstrInfo *TII = ST.getInstrInfo();
999 
1000   Register SuperReg = MI->getOperand(0).getReg();
1001   bool IsKill = MI->getOperand(0).isKill();
1002   const DebugLoc &DL = MI->getDebugLoc();
1003 
1004   assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
1005                          SuperReg != MFI->getFrameOffsetReg()));
1006 
1007   assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1008   assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1009          SuperReg != AMDGPU::EXEC && "exec should never spill");
1010 
1011   unsigned EltSize = 4;
1012   const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1013 
1014   ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1015   unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1016 
1017   if (SpillToVGPR) {
1018     for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
1019       Register SubReg =
1020           NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1021       SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1022 
1023       // During SGPR spilling to VGPR, determine if the VGPR is defined. The
1024       // only circumstance in which we say it is undefined is when it is the
1025       // first spill to this VGPR in the first basic block.
1026       bool VGPRDefined = true;
1027       if (MBB == &MF->front())
1028         VGPRDefined = !SGPRSpillVGPRDefinedSet.insert(Spill.VGPR).second;
1029 
1030       // Mark the "old value of vgpr" input undef only if this is the first sgpr
1031       // spill to this specific vgpr in the first basic block.
1032       BuildMI(*MBB, MI, DL,
1033               TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
1034               Spill.VGPR)
1035         .addReg(SubReg, getKillRegState(IsKill))
1036         .addImm(Spill.Lane)
1037         .addReg(Spill.VGPR, VGPRDefined ? 0 : RegState::Undef);
1038 
1039       // FIXME: Since this spills to another register instead of an actual
1040       // frame index, we should delete the frame index when all references to
1041       // it are fixed.
1042     }
1043   } else {
1044     // Scavenged temporary VGPR to use. It must be scavenged once for any number
1045     // of spilled subregs.
1046     Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1047     RS->setRegUsed(TmpVGPR);
1048 
1049     // SubReg carries the "Kill" flag when SubReg == SuperReg.
1050     unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
1051 
1052     unsigned PerVGPR = 32;
1053     unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR;
1054     int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL;
1055 
1056     for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) {
1057       unsigned TmpVGPRFlags = RegState::Undef;
1058 
1059       // Write sub registers into the VGPR
1060       for (unsigned i = Offset * PerVGPR,
1061                     e = std::min((Offset + 1) * PerVGPR, NumSubRegs);
1062            i < e; ++i) {
1063         Register SubReg =
1064             NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1065 
1066         MachineInstrBuilder WriteLane =
1067             BuildMI(*MBB, MI, DL,
1068                     TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
1069                     TmpVGPR)
1070                 .addReg(SubReg, SubKillState)
1071                 .addImm(i % PerVGPR)
1072                 .addReg(TmpVGPR, TmpVGPRFlags);
1073         TmpVGPRFlags = 0;
1074 
1075         // There could be undef components of a spilled super register.
1076         // TODO: Can we detect this and skip the spill?
1077         if (NumSubRegs > 1) {
1078           // The last implicit use of the SuperReg carries the "Kill" flag.
1079           unsigned SuperKillState = 0;
1080           if (i + 1 == NumSubRegs)
1081             SuperKillState |= getKillRegState(IsKill);
1082           WriteLane.addReg(SuperReg, RegState::Implicit | SuperKillState);
1083         }
1084       }
1085 
1086       // Write out VGPR
1087       buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes,
1088                               RS, false);
1089     }
1090   }
1091 
1092   MI->eraseFromParent();
1093   MFI->addToSpilledSGPRs(NumSubRegs);
1094   return true;
1095 }
1096 
1097 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1098                                  int Index,
1099                                  RegScavenger *RS,
1100                                  bool OnlyToVGPR) const {
1101   MachineFunction *MF = MI->getParent()->getParent();
1102   MachineBasicBlock *MBB = MI->getParent();
1103   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1104 
1105   ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
1106     = MFI->getSGPRToVGPRSpills(Index);
1107   bool SpillToVGPR = !VGPRSpills.empty();
1108   if (OnlyToVGPR && !SpillToVGPR)
1109     return false;
1110 
1111   const SIInstrInfo *TII = ST.getInstrInfo();
1112   const DebugLoc &DL = MI->getDebugLoc();
1113 
1114   Register SuperReg = MI->getOperand(0).getReg();
1115 
1116   assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1117   assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1118          SuperReg != AMDGPU::EXEC && "exec should never spill");
1119 
1120   unsigned EltSize = 4;
1121 
1122   const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1123 
1124   ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1125   unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1126 
1127   if (SpillToVGPR) {
1128     for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
1129       Register SubReg =
1130           NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1131 
1132       SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1133       auto MIB =
1134         BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
1135                 SubReg)
1136         .addReg(Spill.VGPR)
1137         .addImm(Spill.Lane);
1138       if (NumSubRegs > 1 && i == 0)
1139         MIB.addReg(SuperReg, RegState::ImplicitDefine);
1140     }
1141   } else {
1142     Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1143     RS->setRegUsed(TmpVGPR);
1144 
1145     unsigned PerVGPR = 32;
1146     unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR;
1147     int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL;
1148 
1149     for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) {
1150       // Load in VGPR data
1151       buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes,
1152                               RS, true);
1153 
1154       // Unpack lanes
1155       for (unsigned i = Offset * PerVGPR,
1156                     e = std::min((Offset + 1) * PerVGPR, NumSubRegs);
1157            i < e; ++i) {
1158         Register SubReg =
1159             NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1160 
1161         bool LastSubReg = (i + 1 == e);
1162         auto MIB =
1163             BuildMI(*MBB, MI, DL,
1164                     TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32), SubReg)
1165                 .addReg(TmpVGPR, getKillRegState(LastSubReg))
1166                 .addImm(i);
1167         if (NumSubRegs > 1 && i == 0)
1168           MIB.addReg(SuperReg, RegState::ImplicitDefine);
1169       }
1170     }
1171   }
1172 
1173   MI->eraseFromParent();
1174   return true;
1175 }
1176 
1177 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1178 /// a VGPR and the stack slot can be safely eliminated when all other users are
1179 /// handled.
1180 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1181   MachineBasicBlock::iterator MI,
1182   int FI,
1183   RegScavenger *RS) const {
1184   switch (MI->getOpcode()) {
1185   case AMDGPU::SI_SPILL_S1024_SAVE:
1186   case AMDGPU::SI_SPILL_S512_SAVE:
1187   case AMDGPU::SI_SPILL_S256_SAVE:
1188   case AMDGPU::SI_SPILL_S192_SAVE:
1189   case AMDGPU::SI_SPILL_S160_SAVE:
1190   case AMDGPU::SI_SPILL_S128_SAVE:
1191   case AMDGPU::SI_SPILL_S96_SAVE:
1192   case AMDGPU::SI_SPILL_S64_SAVE:
1193   case AMDGPU::SI_SPILL_S32_SAVE:
1194     return spillSGPR(MI, FI, RS, true);
1195   case AMDGPU::SI_SPILL_S1024_RESTORE:
1196   case AMDGPU::SI_SPILL_S512_RESTORE:
1197   case AMDGPU::SI_SPILL_S256_RESTORE:
1198   case AMDGPU::SI_SPILL_S192_RESTORE:
1199   case AMDGPU::SI_SPILL_S160_RESTORE:
1200   case AMDGPU::SI_SPILL_S128_RESTORE:
1201   case AMDGPU::SI_SPILL_S96_RESTORE:
1202   case AMDGPU::SI_SPILL_S64_RESTORE:
1203   case AMDGPU::SI_SPILL_S32_RESTORE:
1204     return restoreSGPR(MI, FI, RS, true);
1205   default:
1206     llvm_unreachable("not an SGPR spill instruction");
1207   }
1208 }
1209 
1210 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1211                                         int SPAdj, unsigned FIOperandNum,
1212                                         RegScavenger *RS) const {
1213   MachineFunction *MF = MI->getParent()->getParent();
1214   MachineBasicBlock *MBB = MI->getParent();
1215   SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1216   MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1217   const SIInstrInfo *TII = ST.getInstrInfo();
1218   DebugLoc DL = MI->getDebugLoc();
1219 
1220   assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?");
1221 
1222   MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1223   int Index = MI->getOperand(FIOperandNum).getIndex();
1224 
1225   Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1226                           ? getBaseRegister()
1227                           : getFrameRegister(*MF);
1228 
1229   switch (MI->getOpcode()) {
1230     // SGPR register spill
1231     case AMDGPU::SI_SPILL_S1024_SAVE:
1232     case AMDGPU::SI_SPILL_S512_SAVE:
1233     case AMDGPU::SI_SPILL_S256_SAVE:
1234     case AMDGPU::SI_SPILL_S192_SAVE:
1235     case AMDGPU::SI_SPILL_S160_SAVE:
1236     case AMDGPU::SI_SPILL_S128_SAVE:
1237     case AMDGPU::SI_SPILL_S96_SAVE:
1238     case AMDGPU::SI_SPILL_S64_SAVE:
1239     case AMDGPU::SI_SPILL_S32_SAVE: {
1240       spillSGPR(MI, Index, RS);
1241       break;
1242     }
1243 
1244     // SGPR register restore
1245     case AMDGPU::SI_SPILL_S1024_RESTORE:
1246     case AMDGPU::SI_SPILL_S512_RESTORE:
1247     case AMDGPU::SI_SPILL_S256_RESTORE:
1248     case AMDGPU::SI_SPILL_S192_RESTORE:
1249     case AMDGPU::SI_SPILL_S160_RESTORE:
1250     case AMDGPU::SI_SPILL_S128_RESTORE:
1251     case AMDGPU::SI_SPILL_S96_RESTORE:
1252     case AMDGPU::SI_SPILL_S64_RESTORE:
1253     case AMDGPU::SI_SPILL_S32_RESTORE: {
1254       restoreSGPR(MI, Index, RS);
1255       break;
1256     }
1257 
1258     // VGPR register spill
1259     case AMDGPU::SI_SPILL_V1024_SAVE:
1260     case AMDGPU::SI_SPILL_V512_SAVE:
1261     case AMDGPU::SI_SPILL_V256_SAVE:
1262     case AMDGPU::SI_SPILL_V160_SAVE:
1263     case AMDGPU::SI_SPILL_V128_SAVE:
1264     case AMDGPU::SI_SPILL_V96_SAVE:
1265     case AMDGPU::SI_SPILL_V64_SAVE:
1266     case AMDGPU::SI_SPILL_V32_SAVE:
1267     case AMDGPU::SI_SPILL_A1024_SAVE:
1268     case AMDGPU::SI_SPILL_A512_SAVE:
1269     case AMDGPU::SI_SPILL_A128_SAVE:
1270     case AMDGPU::SI_SPILL_A64_SAVE:
1271     case AMDGPU::SI_SPILL_A32_SAVE: {
1272       const MachineOperand *VData = TII->getNamedOperand(*MI,
1273                                                          AMDGPU::OpName::vdata);
1274       assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1275              MFI->getStackPtrOffsetReg());
1276 
1277       buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
1278             Index,
1279             VData->getReg(), VData->isKill(),
1280             TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1281             FrameReg,
1282             TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1283             *MI->memoperands_begin(),
1284             RS);
1285       MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1286       MI->eraseFromParent();
1287       break;
1288     }
1289     case AMDGPU::SI_SPILL_V32_RESTORE:
1290     case AMDGPU::SI_SPILL_V64_RESTORE:
1291     case AMDGPU::SI_SPILL_V96_RESTORE:
1292     case AMDGPU::SI_SPILL_V128_RESTORE:
1293     case AMDGPU::SI_SPILL_V160_RESTORE:
1294     case AMDGPU::SI_SPILL_V256_RESTORE:
1295     case AMDGPU::SI_SPILL_V512_RESTORE:
1296     case AMDGPU::SI_SPILL_V1024_RESTORE:
1297     case AMDGPU::SI_SPILL_A32_RESTORE:
1298     case AMDGPU::SI_SPILL_A64_RESTORE:
1299     case AMDGPU::SI_SPILL_A128_RESTORE:
1300     case AMDGPU::SI_SPILL_A512_RESTORE:
1301     case AMDGPU::SI_SPILL_A1024_RESTORE: {
1302       const MachineOperand *VData = TII->getNamedOperand(*MI,
1303                                                          AMDGPU::OpName::vdata);
1304       assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1305              MFI->getStackPtrOffsetReg());
1306 
1307       buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
1308             Index,
1309             VData->getReg(), VData->isKill(),
1310             TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1311             FrameReg,
1312             TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1313             *MI->memoperands_begin(),
1314             RS);
1315       MI->eraseFromParent();
1316       break;
1317     }
1318 
1319     default: {
1320       const DebugLoc &DL = MI->getDebugLoc();
1321       bool IsMUBUF = TII->isMUBUF(*MI);
1322 
1323       if (!IsMUBUF && !MFI->isEntryFunction()) {
1324         // Convert to a swizzled stack address by scaling by the wave size.
1325         //
1326         // In an entry function/kernel the offset is already swizzled.
1327 
1328         bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1329         Register ResultReg =
1330             IsCopy ? MI->getOperand(0).getReg()
1331                    : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1332 
1333         int64_t Offset = FrameInfo.getObjectOffset(Index);
1334         if (Offset == 0) {
1335           // XXX - This never happens because of emergency scavenging slot at 0?
1336           BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1337             .addImm(ST.getWavefrontSizeLog2())
1338             .addReg(FrameReg);
1339         } else {
1340           if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
1341             // Reuse ResultReg in intermediate step.
1342             Register ScaledReg = ResultReg;
1343 
1344             BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1345                     ScaledReg)
1346               .addImm(ST.getWavefrontSizeLog2())
1347               .addReg(FrameReg);
1348 
1349             const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1350 
1351             // TODO: Fold if use instruction is another add of a constant.
1352             if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1353               // FIXME: This can fail
1354               MIB.addImm(Offset);
1355               MIB.addReg(ScaledReg, RegState::Kill);
1356               if (!IsVOP2)
1357                 MIB.addImm(0); // clamp bit
1358             } else {
1359               assert(MIB->getOpcode() == AMDGPU::V_ADD_I32_e64 &&
1360                      "Need to reuse carry out register");
1361 
1362               // Use scavenged unused carry out as offset register.
1363               Register ConstOffsetReg;
1364               if (!isWave32)
1365                 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1366               else
1367                 ConstOffsetReg = MIB.getReg(1);
1368 
1369               BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1370                 .addImm(Offset);
1371               MIB.addReg(ConstOffsetReg, RegState::Kill);
1372               MIB.addReg(ScaledReg, RegState::Kill);
1373               MIB.addImm(0); // clamp bit
1374             }
1375           } else {
1376             // We have to produce a carry out, and there isn't a free SGPR pair
1377             // for it. We can keep the whole computation on the SALU to avoid
1378             // clobbering an additional register at the cost of an extra mov.
1379 
1380             // We may have 1 free scratch SGPR even though a carry out is
1381             // unavailable. Only one additional mov is needed.
1382             Register TmpScaledReg =
1383                 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1384             Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
1385 
1386             BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1387               .addReg(FrameReg)
1388               .addImm(ST.getWavefrontSizeLog2());
1389             BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg)
1390               .addReg(ScaledReg, RegState::Kill)
1391               .addImm(Offset);
1392             BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1393               .addReg(ScaledReg, RegState::Kill);
1394 
1395             // If there were truly no free SGPRs, we need to undo everything.
1396             if (!TmpScaledReg.isValid()) {
1397               BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg)
1398                 .addReg(ScaledReg, RegState::Kill)
1399                 .addImm(Offset);
1400               BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1401                 .addReg(FrameReg)
1402                 .addImm(ST.getWavefrontSizeLog2());
1403             }
1404           }
1405         }
1406 
1407         // Don't introduce an extra copy if we're just materializing in a mov.
1408         if (IsCopy)
1409           MI->eraseFromParent();
1410         else
1411           FIOp.ChangeToRegister(ResultReg, false, false, true);
1412         return;
1413       }
1414 
1415       if (IsMUBUF) {
1416         // Disable offen so we don't need a 0 vgpr base.
1417         assert(static_cast<int>(FIOperandNum) ==
1418                AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1419                                           AMDGPU::OpName::vaddr));
1420 
1421         auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
1422         assert((SOffset.isReg() &&
1423                 SOffset.getReg() == MFI->getStackPtrOffsetReg()) ||
1424                (SOffset.isImm() && SOffset.getImm() == 0));
1425         if (SOffset.isReg()) {
1426           if (FrameReg == AMDGPU::NoRegister) {
1427             SOffset.ChangeToImmediate(0);
1428           } else {
1429             SOffset.setReg(FrameReg);
1430           }
1431         }
1432 
1433         int64_t Offset = FrameInfo.getObjectOffset(Index);
1434         int64_t OldImm
1435           = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1436         int64_t NewOffset = OldImm + Offset;
1437 
1438         if (isUInt<12>(NewOffset) &&
1439             buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
1440           MI->eraseFromParent();
1441           return;
1442         }
1443       }
1444 
1445       // If the offset is simply too big, don't convert to a scratch wave offset
1446       // relative index.
1447 
1448       int64_t Offset = FrameInfo.getObjectOffset(Index);
1449       FIOp.ChangeToImmediate(Offset);
1450       if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1451         Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1452         BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1453           .addImm(Offset);
1454         FIOp.ChangeToRegister(TmpReg, false, false, true);
1455       }
1456     }
1457   }
1458 }
1459 
1460 StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
1461   return AMDGPUInstPrinter::getRegisterName(Reg);
1462 }
1463 
1464 const TargetRegisterClass *
1465 SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) {
1466   if (BitWidth == 1)
1467     return &AMDGPU::VReg_1RegClass;
1468   if (BitWidth <= 16)
1469     return &AMDGPU::VGPR_LO16RegClass;
1470   if (BitWidth <= 32)
1471     return &AMDGPU::VGPR_32RegClass;
1472   if (BitWidth <= 64)
1473     return &AMDGPU::VReg_64RegClass;
1474   if (BitWidth <= 96)
1475     return &AMDGPU::VReg_96RegClass;
1476   if (BitWidth <= 128)
1477     return &AMDGPU::VReg_128RegClass;
1478   if (BitWidth <= 160)
1479     return &AMDGPU::VReg_160RegClass;
1480   if (BitWidth <= 192)
1481     return &AMDGPU::VReg_192RegClass;
1482   if (BitWidth <= 256)
1483     return &AMDGPU::VReg_256RegClass;
1484   if (BitWidth <= 512)
1485     return &AMDGPU::VReg_512RegClass;
1486   if (BitWidth <= 1024)
1487     return &AMDGPU::VReg_1024RegClass;
1488 
1489   return nullptr;
1490 }
1491 
1492 const TargetRegisterClass *
1493 SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) {
1494   if (BitWidth <= 16)
1495     return &AMDGPU::AGPR_LO16RegClass;
1496   if (BitWidth <= 32)
1497     return &AMDGPU::AGPR_32RegClass;
1498   if (BitWidth <= 64)
1499     return &AMDGPU::AReg_64RegClass;
1500   if (BitWidth <= 96)
1501     return &AMDGPU::AReg_96RegClass;
1502   if (BitWidth <= 128)
1503     return &AMDGPU::AReg_128RegClass;
1504   if (BitWidth <= 160)
1505     return &AMDGPU::AReg_160RegClass;
1506   if (BitWidth <= 192)
1507     return &AMDGPU::AReg_192RegClass;
1508   if (BitWidth <= 256)
1509     return &AMDGPU::AReg_256RegClass;
1510   if (BitWidth <= 512)
1511     return &AMDGPU::AReg_512RegClass;
1512   if (BitWidth <= 1024)
1513     return &AMDGPU::AReg_1024RegClass;
1514 
1515   return nullptr;
1516 }
1517 
1518 const TargetRegisterClass *
1519 SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
1520   if (BitWidth <= 16)
1521     return &AMDGPU::SGPR_LO16RegClass;
1522   if (BitWidth <= 32)
1523     return &AMDGPU::SReg_32RegClass;
1524   if (BitWidth <= 64)
1525     return &AMDGPU::SReg_64RegClass;
1526   if (BitWidth <= 96)
1527     return &AMDGPU::SGPR_96RegClass;
1528   if (BitWidth <= 128)
1529     return &AMDGPU::SGPR_128RegClass;
1530   if (BitWidth <= 160)
1531     return &AMDGPU::SGPR_160RegClass;
1532   if (BitWidth <= 192)
1533     return &AMDGPU::SGPR_192RegClass;
1534   if (BitWidth <= 256)
1535     return &AMDGPU::SGPR_256RegClass;
1536   if (BitWidth <= 512)
1537     return &AMDGPU::SGPR_512RegClass;
1538   if (BitWidth <= 1024)
1539     return &AMDGPU::SGPR_1024RegClass;
1540 
1541   return nullptr;
1542 }
1543 
1544 // FIXME: This is very slow. It might be worth creating a map from physreg to
1545 // register class.
1546 const TargetRegisterClass *
1547 SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
1548   static const TargetRegisterClass *const BaseClasses[] = {
1549     &AMDGPU::VGPR_LO16RegClass,
1550     &AMDGPU::VGPR_HI16RegClass,
1551     &AMDGPU::SReg_LO16RegClass,
1552     &AMDGPU::AGPR_LO16RegClass,
1553     &AMDGPU::VGPR_32RegClass,
1554     &AMDGPU::SReg_32RegClass,
1555     &AMDGPU::AGPR_32RegClass,
1556     &AMDGPU::VReg_64RegClass,
1557     &AMDGPU::SReg_64RegClass,
1558     &AMDGPU::AReg_64RegClass,
1559     &AMDGPU::VReg_96RegClass,
1560     &AMDGPU::SReg_96RegClass,
1561     &AMDGPU::AReg_96RegClass,
1562     &AMDGPU::VReg_128RegClass,
1563     &AMDGPU::SReg_128RegClass,
1564     &AMDGPU::AReg_128RegClass,
1565     &AMDGPU::VReg_160RegClass,
1566     &AMDGPU::SReg_160RegClass,
1567     &AMDGPU::AReg_160RegClass,
1568     &AMDGPU::VReg_192RegClass,
1569     &AMDGPU::SReg_192RegClass,
1570     &AMDGPU::AReg_192RegClass,
1571     &AMDGPU::VReg_256RegClass,
1572     &AMDGPU::SReg_256RegClass,
1573     &AMDGPU::AReg_256RegClass,
1574     &AMDGPU::VReg_512RegClass,
1575     &AMDGPU::SReg_512RegClass,
1576     &AMDGPU::AReg_512RegClass,
1577     &AMDGPU::SReg_1024RegClass,
1578     &AMDGPU::VReg_1024RegClass,
1579     &AMDGPU::AReg_1024RegClass,
1580     &AMDGPU::SCC_CLASSRegClass,
1581     &AMDGPU::Pseudo_SReg_32RegClass,
1582     &AMDGPU::Pseudo_SReg_128RegClass,
1583   };
1584 
1585   for (const TargetRegisterClass *BaseClass : BaseClasses) {
1586     if (BaseClass->contains(Reg)) {
1587       return BaseClass;
1588     }
1589   }
1590   return nullptr;
1591 }
1592 
1593 // TODO: It might be helpful to have some target specific flags in
1594 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1595 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1596   unsigned Size = getRegSizeInBits(*RC);
1597   if (Size == 16) {
1598     return getCommonSubClass(&AMDGPU::VGPR_LO16RegClass, RC) != nullptr ||
1599            getCommonSubClass(&AMDGPU::VGPR_HI16RegClass, RC) != nullptr;
1600   }
1601   const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
1602   if (!VRC) {
1603     assert(Size < 32 && "Invalid register class size");
1604     return false;
1605   }
1606   return getCommonSubClass(VRC, RC) != nullptr;
1607 }
1608 
1609 bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const {
1610   unsigned Size = getRegSizeInBits(*RC);
1611   if (Size < 16)
1612     return false;
1613   const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
1614   if (!ARC) {
1615     assert(getVGPRClassForBitWidth(Size) && "Invalid register class size");
1616     return false;
1617   }
1618   return getCommonSubClass(ARC, RC) != nullptr;
1619 }
1620 
1621 const TargetRegisterClass *
1622 SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
1623   unsigned Size = getRegSizeInBits(*SRC);
1624   const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
1625   assert(VRC && "Invalid register class size");
1626   return VRC;
1627 }
1628 
1629 const TargetRegisterClass *
1630 SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
1631   unsigned Size = getRegSizeInBits(*SRC);
1632   const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
1633   assert(ARC && "Invalid register class size");
1634   return ARC;
1635 }
1636 
1637 const TargetRegisterClass *
1638 SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
1639   unsigned Size = getRegSizeInBits(*VRC);
1640   if (Size == 32)
1641     return &AMDGPU::SGPR_32RegClass;
1642   const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
1643   assert(SRC && "Invalid register class size");
1644   return SRC;
1645 }
1646 
1647 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1648                          const TargetRegisterClass *RC, unsigned SubIdx) const {
1649   if (SubIdx == AMDGPU::NoSubRegister)
1650     return RC;
1651 
1652   // We can assume that each lane corresponds to one 32-bit register.
1653   unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
1654   if (isSGPRClass(RC)) {
1655     if (Size == 32)
1656       RC = &AMDGPU::SGPR_32RegClass;
1657     else
1658       RC = getSGPRClassForBitWidth(Size);
1659   } else if (hasAGPRs(RC)) {
1660     RC = getAGPRClassForBitWidth(Size);
1661   } else {
1662     RC = getVGPRClassForBitWidth(Size);
1663   }
1664   assert(RC && "Invalid sub-register class size");
1665   return RC;
1666 }
1667 
1668 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
1669   if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
1670       OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
1671     return !ST.hasMFMAInlineLiteralBug();
1672 
1673   return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1674          OpType <= AMDGPU::OPERAND_SRC_LAST;
1675 }
1676 
1677 bool SIRegisterInfo::shouldRewriteCopySrc(
1678   const TargetRegisterClass *DefRC,
1679   unsigned DefSubReg,
1680   const TargetRegisterClass *SrcRC,
1681   unsigned SrcSubReg) const {
1682   // We want to prefer the smallest register class possible, so we don't want to
1683   // stop and rewrite on anything that looks like a subregister
1684   // extract. Operations mostly don't care about the super register class, so we
1685   // only want to stop on the most basic of copies between the same register
1686   // class.
1687   //
1688   // e.g. if we have something like
1689   // %0 = ...
1690   // %1 = ...
1691   // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1692   // %3 = COPY %2, sub0
1693   //
1694   // We want to look through the COPY to find:
1695   //  => %3 = COPY %0
1696 
1697   // Plain copy.
1698   return getCommonSubClass(DefRC, SrcRC) != nullptr;
1699 }
1700 
1701 /// Returns a lowest register that is not used at any point in the function.
1702 ///        If all registers are used, then this function will return
1703 ///         AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
1704 ///         highest unused register.
1705 MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1706                                               const TargetRegisterClass *RC,
1707                                               const MachineFunction &MF,
1708                                               bool ReserveHighestVGPR) const {
1709   if (ReserveHighestVGPR) {
1710     for (MCRegister Reg : reverse(*RC))
1711       if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1712         return Reg;
1713   } else {
1714     for (MCRegister Reg : *RC)
1715       if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1716         return Reg;
1717   }
1718   return MCRegister();
1719 }
1720 
1721 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1722                                                    unsigned EltSize) const {
1723   const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
1724   assert(RegBitWidth >= 32 && RegBitWidth <= 1024);
1725 
1726   const unsigned RegDWORDs = RegBitWidth / 32;
1727   const unsigned EltDWORDs = EltSize / 4;
1728   assert(RegSplitParts.size() + 1 >= EltDWORDs);
1729 
1730   const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
1731   const unsigned NumParts = RegDWORDs / EltDWORDs;
1732 
1733   return makeArrayRef(Parts.data(), NumParts);
1734 }
1735 
1736 const TargetRegisterClass*
1737 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
1738                                   Register Reg) const {
1739   return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
1740 }
1741 
1742 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1743                             Register Reg) const {
1744   const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
1745   // Registers without classes are unaddressable, SGPR-like registers.
1746   return RC && hasVGPRs(RC);
1747 }
1748 
1749 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
1750                             Register Reg) const {
1751   const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
1752 
1753   // Registers without classes are unaddressable, SGPR-like registers.
1754   return RC && hasAGPRs(RC);
1755 }
1756 
1757 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
1758                                     const TargetRegisterClass *SrcRC,
1759                                     unsigned SubReg,
1760                                     const TargetRegisterClass *DstRC,
1761                                     unsigned DstSubReg,
1762                                     const TargetRegisterClass *NewRC,
1763                                     LiveIntervals &LIS) const {
1764   unsigned SrcSize = getRegSizeInBits(*SrcRC);
1765   unsigned DstSize = getRegSizeInBits(*DstRC);
1766   unsigned NewSize = getRegSizeInBits(*NewRC);
1767 
1768   // Do not increase size of registers beyond dword, we would need to allocate
1769   // adjacent registers and constraint regalloc more than needed.
1770 
1771   // Always allow dword coalescing.
1772   if (SrcSize <= 32 || DstSize <= 32)
1773     return true;
1774 
1775   return NewSize <= DstSize || NewSize <= SrcSize;
1776 }
1777 
1778 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1779                                              MachineFunction &MF) const {
1780   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1781 
1782   unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
1783                                                        MF.getFunction());
1784   switch (RC->getID()) {
1785   default:
1786     return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
1787   case AMDGPU::VGPR_32RegClassID:
1788   case AMDGPU::VGPR_LO16RegClassID:
1789   case AMDGPU::VGPR_HI16RegClassID:
1790     return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
1791   case AMDGPU::SGPR_32RegClassID:
1792   case AMDGPU::SGPR_LO16RegClassID:
1793     return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
1794   }
1795 }
1796 
1797 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
1798                                                 unsigned Idx) const {
1799   if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
1800       Idx == AMDGPU::RegisterPressureSets::AGPR_32)
1801     return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1802                                const_cast<MachineFunction &>(MF));
1803 
1804   if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
1805     return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1806                                const_cast<MachineFunction &>(MF));
1807 
1808   llvm_unreachable("Unexpected register pressure set!");
1809 }
1810 
1811 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
1812   static const int Empty[] = { -1 };
1813 
1814   if (RegPressureIgnoredUnits[RegUnit])
1815     return Empty;
1816 
1817   return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
1818 }
1819 
1820 MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
1821   // Not a callee saved register.
1822   return AMDGPU::SGPR30_SGPR31;
1823 }
1824 
1825 const TargetRegisterClass *
1826 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
1827                                          const RegisterBank &RB,
1828                                          const MachineRegisterInfo &MRI) const {
1829   switch (RB.getID()) {
1830   case AMDGPU::VGPRRegBankID:
1831     return getVGPRClassForBitWidth(std::max(32u, Size));
1832   case AMDGPU::VCCRegBankID:
1833     assert(Size == 1);
1834     return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
1835                     : &AMDGPU::SReg_64_XEXECRegClass;
1836   case AMDGPU::SGPRRegBankID:
1837     return getSGPRClassForBitWidth(std::max(32u, Size));
1838   case AMDGPU::AGPRRegBankID:
1839     return getAGPRClassForBitWidth(std::max(32u, Size));
1840   default:
1841     llvm_unreachable("unknown register bank");
1842   }
1843 }
1844 
1845 const TargetRegisterClass *
1846 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
1847                                          const MachineRegisterInfo &MRI) const {
1848   const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
1849   if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
1850     return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
1851 
1852   const TargetRegisterClass *RC = RCOrRB.get<const TargetRegisterClass*>();
1853   return getAllocatableClass(RC);
1854 }
1855 
1856 MCRegister SIRegisterInfo::getVCC() const {
1857   return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
1858 }
1859 
1860 const TargetRegisterClass *
1861 SIRegisterInfo::getRegClass(unsigned RCID) const {
1862   switch ((int)RCID) {
1863   case AMDGPU::SReg_1RegClassID:
1864     return getBoolRC();
1865   case AMDGPU::SReg_1_XEXECRegClassID:
1866     return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
1867       : &AMDGPU::SReg_64_XEXECRegClass;
1868   case -1:
1869     return nullptr;
1870   default:
1871     return AMDGPUGenRegisterInfo::getRegClass(RCID);
1872   }
1873 }
1874 
1875 // Find reaching register definition
1876 MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
1877                                               MachineInstr &Use,
1878                                               MachineRegisterInfo &MRI,
1879                                               LiveIntervals *LIS) const {
1880   auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
1881   SlotIndex UseIdx = LIS->getInstructionIndex(Use);
1882   SlotIndex DefIdx;
1883 
1884   if (Reg.isVirtual()) {
1885     if (!LIS->hasInterval(Reg))
1886       return nullptr;
1887     LiveInterval &LI = LIS->getInterval(Reg);
1888     LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
1889                                   : MRI.getMaxLaneMaskForVReg(Reg);
1890     VNInfo *V = nullptr;
1891     if (LI.hasSubRanges()) {
1892       for (auto &S : LI.subranges()) {
1893         if ((S.LaneMask & SubLanes) == SubLanes) {
1894           V = S.getVNInfoAt(UseIdx);
1895           break;
1896         }
1897       }
1898     } else {
1899       V = LI.getVNInfoAt(UseIdx);
1900     }
1901     if (!V)
1902       return nullptr;
1903     DefIdx = V->def;
1904   } else {
1905     // Find last def.
1906     for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units) {
1907       LiveRange &LR = LIS->getRegUnit(*Units);
1908       if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
1909         if (!DefIdx.isValid() ||
1910             MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
1911                           LIS->getInstructionFromIndex(V->def)))
1912           DefIdx = V->def;
1913       } else {
1914         return nullptr;
1915       }
1916     }
1917   }
1918 
1919   MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
1920 
1921   if (!Def || !MDT.dominates(Def, &Use))
1922     return nullptr;
1923 
1924   assert(Def->modifiesRegister(Reg, this));
1925 
1926   return Def;
1927 }
1928 
1929 MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
1930   assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32);
1931 
1932   for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
1933                                          AMDGPU::SReg_32RegClass,
1934                                          AMDGPU::AGPR_32RegClass } ) {
1935     if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
1936       return Super;
1937   }
1938   if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
1939                                             &AMDGPU::VGPR_32RegClass)) {
1940       return Super;
1941   }
1942 
1943   return AMDGPU::NoRegister;
1944 }
1945 
1946 bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
1947   switch (PhysReg) {
1948   case AMDGPU::SGPR_NULL:
1949   case AMDGPU::SRC_SHARED_BASE:
1950   case AMDGPU::SRC_PRIVATE_BASE:
1951   case AMDGPU::SRC_SHARED_LIMIT:
1952   case AMDGPU::SRC_PRIVATE_LIMIT:
1953     return true;
1954   default:
1955     return false;
1956   }
1957 }
1958 
1959 ArrayRef<MCPhysReg>
1960 SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
1961   return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
1962                       ST.getMaxNumSGPRs(MF) / 4);
1963 }
1964 
1965 ArrayRef<MCPhysReg>
1966 SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
1967   return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
1968 }
1969 
1970 ArrayRef<MCPhysReg>
1971 SIRegisterInfo::getAllVGPR32(const MachineFunction &MF) const {
1972   return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF));
1973 }
1974