1f4a2713aSLionel Sambuc //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2f4a2713aSLionel Sambuc //
3f4a2713aSLionel Sambuc //                     The LLVM Compiler Infrastructure
4f4a2713aSLionel Sambuc //
5f4a2713aSLionel Sambuc // This file is distributed under the University of Illinois Open Source
6f4a2713aSLionel Sambuc // License. See LICENSE.TXT for details.
7f4a2713aSLionel Sambuc //
8f4a2713aSLionel Sambuc //===----------------------------------------------------------------------===//
9f4a2713aSLionel Sambuc //
10f4a2713aSLionel Sambuc /// \file
11f4a2713aSLionel Sambuc /// \brief Implementation of the TargetInstrInfo class that is common to all
12f4a2713aSLionel Sambuc /// AMD GPUs.
13f4a2713aSLionel Sambuc //
14f4a2713aSLionel Sambuc //===----------------------------------------------------------------------===//
15f4a2713aSLionel Sambuc 
16f4a2713aSLionel Sambuc #include "AMDGPUInstrInfo.h"
17f4a2713aSLionel Sambuc #include "AMDGPURegisterInfo.h"
18f4a2713aSLionel Sambuc #include "AMDGPUTargetMachine.h"
19f4a2713aSLionel Sambuc #include "llvm/CodeGen/MachineFrameInfo.h"
20f4a2713aSLionel Sambuc #include "llvm/CodeGen/MachineInstrBuilder.h"
21f4a2713aSLionel Sambuc #include "llvm/CodeGen/MachineRegisterInfo.h"
22f4a2713aSLionel Sambuc 
23*0a6a1f1dSLionel Sambuc using namespace llvm;
24*0a6a1f1dSLionel Sambuc 
25f4a2713aSLionel Sambuc #define GET_INSTRINFO_CTOR_DTOR
26f4a2713aSLionel Sambuc #define GET_INSTRINFO_NAMED_OPS
27f4a2713aSLionel Sambuc #define GET_INSTRMAP_INFO
28f4a2713aSLionel Sambuc #include "AMDGPUGenInstrInfo.inc"
29f4a2713aSLionel Sambuc 
30f4a2713aSLionel Sambuc // Pin the vtable to this file.
anchor()31f4a2713aSLionel Sambuc void AMDGPUInstrInfo::anchor() {}
32f4a2713aSLionel Sambuc 
AMDGPUInstrInfo(const AMDGPUSubtarget & st)33*0a6a1f1dSLionel Sambuc AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
34*0a6a1f1dSLionel Sambuc   : AMDGPUGenInstrInfo(-1,-1), RI(st), ST(st) { }
35f4a2713aSLionel Sambuc 
getRegisterInfo() const36f4a2713aSLionel Sambuc const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
37f4a2713aSLionel Sambuc   return RI;
38f4a2713aSLionel Sambuc }
39f4a2713aSLionel Sambuc 
isCoalescableExtInstr(const MachineInstr & MI,unsigned & SrcReg,unsigned & DstReg,unsigned & SubIdx) const40f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
41f4a2713aSLionel Sambuc                                            unsigned &SrcReg, unsigned &DstReg,
42f4a2713aSLionel Sambuc                                            unsigned &SubIdx) const {
43f4a2713aSLionel Sambuc // TODO: Implement this function
44f4a2713aSLionel Sambuc   return false;
45f4a2713aSLionel Sambuc }
46f4a2713aSLionel Sambuc 
isLoadFromStackSlot(const MachineInstr * MI,int & FrameIndex) const47f4a2713aSLionel Sambuc unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
48f4a2713aSLionel Sambuc                                              int &FrameIndex) const {
49f4a2713aSLionel Sambuc // TODO: Implement this function
50f4a2713aSLionel Sambuc   return 0;
51f4a2713aSLionel Sambuc }
52f4a2713aSLionel Sambuc 
isLoadFromStackSlotPostFE(const MachineInstr * MI,int & FrameIndex) const53f4a2713aSLionel Sambuc unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
54f4a2713aSLionel Sambuc                                                    int &FrameIndex) const {
55f4a2713aSLionel Sambuc // TODO: Implement this function
56f4a2713aSLionel Sambuc   return 0;
57f4a2713aSLionel Sambuc }
58f4a2713aSLionel Sambuc 
hasLoadFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const59f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
60f4a2713aSLionel Sambuc                                           const MachineMemOperand *&MMO,
61f4a2713aSLionel Sambuc                                           int &FrameIndex) const {
62f4a2713aSLionel Sambuc // TODO: Implement this function
63f4a2713aSLionel Sambuc   return false;
64f4a2713aSLionel Sambuc }
isStoreFromStackSlot(const MachineInstr * MI,int & FrameIndex) const65f4a2713aSLionel Sambuc unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
66f4a2713aSLionel Sambuc                                               int &FrameIndex) const {
67f4a2713aSLionel Sambuc // TODO: Implement this function
68f4a2713aSLionel Sambuc   return 0;
69f4a2713aSLionel Sambuc }
isStoreFromStackSlotPostFE(const MachineInstr * MI,int & FrameIndex) const70f4a2713aSLionel Sambuc unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
71f4a2713aSLionel Sambuc                                                     int &FrameIndex) const {
72f4a2713aSLionel Sambuc // TODO: Implement this function
73f4a2713aSLionel Sambuc   return 0;
74f4a2713aSLionel Sambuc }
hasStoreFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const75f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
76f4a2713aSLionel Sambuc                                            const MachineMemOperand *&MMO,
77f4a2713aSLionel Sambuc                                            int &FrameIndex) const {
78f4a2713aSLionel Sambuc // TODO: Implement this function
79f4a2713aSLionel Sambuc   return false;
80f4a2713aSLionel Sambuc }
81f4a2713aSLionel Sambuc 
82f4a2713aSLionel Sambuc MachineInstr *
convertToThreeAddress(MachineFunction::iterator & MFI,MachineBasicBlock::iterator & MBBI,LiveVariables * LV) const83f4a2713aSLionel Sambuc AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
84f4a2713aSLionel Sambuc                                       MachineBasicBlock::iterator &MBBI,
85f4a2713aSLionel Sambuc                                       LiveVariables *LV) const {
86f4a2713aSLionel Sambuc // TODO: Implement this function
87*0a6a1f1dSLionel Sambuc   return nullptr;
88f4a2713aSLionel Sambuc }
89f4a2713aSLionel Sambuc 
90f4a2713aSLionel Sambuc void
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned SrcReg,bool isKill,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const91f4a2713aSLionel Sambuc AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
92f4a2713aSLionel Sambuc                                     MachineBasicBlock::iterator MI,
93f4a2713aSLionel Sambuc                                     unsigned SrcReg, bool isKill,
94f4a2713aSLionel Sambuc                                     int FrameIndex,
95f4a2713aSLionel Sambuc                                     const TargetRegisterClass *RC,
96f4a2713aSLionel Sambuc                                     const TargetRegisterInfo *TRI) const {
97*0a6a1f1dSLionel Sambuc   llvm_unreachable("Not Implemented");
98f4a2713aSLionel Sambuc }
99f4a2713aSLionel Sambuc 
100f4a2713aSLionel Sambuc void
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned DestReg,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const101f4a2713aSLionel Sambuc AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
102f4a2713aSLionel Sambuc                                      MachineBasicBlock::iterator MI,
103f4a2713aSLionel Sambuc                                      unsigned DestReg, int FrameIndex,
104f4a2713aSLionel Sambuc                                      const TargetRegisterClass *RC,
105f4a2713aSLionel Sambuc                                      const TargetRegisterInfo *TRI) const {
106*0a6a1f1dSLionel Sambuc   llvm_unreachable("Not Implemented");
107f4a2713aSLionel Sambuc }
108f4a2713aSLionel Sambuc 
expandPostRAPseudo(MachineBasicBlock::iterator MI) const109f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
110f4a2713aSLionel Sambuc   MachineBasicBlock *MBB = MI->getParent();
111*0a6a1f1dSLionel Sambuc   int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
112*0a6a1f1dSLionel Sambuc                                                AMDGPU::OpName::addr);
113f4a2713aSLionel Sambuc    // addr is a custom operand with multiple MI operands, and only the
114f4a2713aSLionel Sambuc    // first MI operand is given a name.
115f4a2713aSLionel Sambuc   int RegOpIdx = OffsetOpIdx + 1;
116*0a6a1f1dSLionel Sambuc   int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
117*0a6a1f1dSLionel Sambuc                                              AMDGPU::OpName::chan);
118f4a2713aSLionel Sambuc   if (isRegisterLoad(*MI)) {
119*0a6a1f1dSLionel Sambuc     int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
120*0a6a1f1dSLionel Sambuc                                               AMDGPU::OpName::dst);
121f4a2713aSLionel Sambuc     unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
122f4a2713aSLionel Sambuc     unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
123f4a2713aSLionel Sambuc     unsigned Address = calculateIndirectAddress(RegIndex, Channel);
124f4a2713aSLionel Sambuc     unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
125f4a2713aSLionel Sambuc     if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
126f4a2713aSLionel Sambuc       buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
127f4a2713aSLionel Sambuc                     getIndirectAddrRegClass()->getRegister(Address));
128f4a2713aSLionel Sambuc     } else {
129f4a2713aSLionel Sambuc       buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
130f4a2713aSLionel Sambuc                         Address, OffsetReg);
131f4a2713aSLionel Sambuc     }
132f4a2713aSLionel Sambuc   } else if (isRegisterStore(*MI)) {
133*0a6a1f1dSLionel Sambuc     int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
134*0a6a1f1dSLionel Sambuc                                               AMDGPU::OpName::val);
135f4a2713aSLionel Sambuc     unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
136f4a2713aSLionel Sambuc     unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
137f4a2713aSLionel Sambuc     unsigned Address = calculateIndirectAddress(RegIndex, Channel);
138f4a2713aSLionel Sambuc     unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
139f4a2713aSLionel Sambuc     if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
140f4a2713aSLionel Sambuc       buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
141f4a2713aSLionel Sambuc                     MI->getOperand(ValOpIdx).getReg());
142f4a2713aSLionel Sambuc     } else {
143f4a2713aSLionel Sambuc       buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
144f4a2713aSLionel Sambuc                          calculateIndirectAddress(RegIndex, Channel),
145f4a2713aSLionel Sambuc                          OffsetReg);
146f4a2713aSLionel Sambuc     }
147f4a2713aSLionel Sambuc   } else {
148f4a2713aSLionel Sambuc     return false;
149f4a2713aSLionel Sambuc   }
150f4a2713aSLionel Sambuc 
151f4a2713aSLionel Sambuc   MBB->erase(MI);
152f4a2713aSLionel Sambuc   return true;
153f4a2713aSLionel Sambuc }
154f4a2713aSLionel Sambuc 
155f4a2713aSLionel Sambuc 
156f4a2713aSLionel Sambuc MachineInstr *
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,int FrameIndex) const157f4a2713aSLionel Sambuc AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
158f4a2713aSLionel Sambuc                                       MachineInstr *MI,
159f4a2713aSLionel Sambuc                                       const SmallVectorImpl<unsigned> &Ops,
160f4a2713aSLionel Sambuc                                       int FrameIndex) const {
161f4a2713aSLionel Sambuc // TODO: Implement this function
162*0a6a1f1dSLionel Sambuc   return nullptr;
163f4a2713aSLionel Sambuc }
164f4a2713aSLionel Sambuc MachineInstr*
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,MachineInstr * LoadMI) const165f4a2713aSLionel Sambuc AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
166f4a2713aSLionel Sambuc                                       MachineInstr *MI,
167f4a2713aSLionel Sambuc                                       const SmallVectorImpl<unsigned> &Ops,
168f4a2713aSLionel Sambuc                                       MachineInstr *LoadMI) const {
169f4a2713aSLionel Sambuc   // TODO: Implement this function
170*0a6a1f1dSLionel Sambuc   return nullptr;
171f4a2713aSLionel Sambuc }
172f4a2713aSLionel Sambuc bool
canFoldMemoryOperand(const MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops) const173f4a2713aSLionel Sambuc AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
174f4a2713aSLionel Sambuc                                      const SmallVectorImpl<unsigned> &Ops) const {
175f4a2713aSLionel Sambuc   // TODO: Implement this function
176f4a2713aSLionel Sambuc   return false;
177f4a2713aSLionel Sambuc }
178f4a2713aSLionel Sambuc bool
unfoldMemoryOperand(MachineFunction & MF,MachineInstr * MI,unsigned Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs) const179f4a2713aSLionel Sambuc AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
180f4a2713aSLionel Sambuc                                  unsigned Reg, bool UnfoldLoad,
181f4a2713aSLionel Sambuc                                  bool UnfoldStore,
182f4a2713aSLionel Sambuc                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
183f4a2713aSLionel Sambuc   // TODO: Implement this function
184f4a2713aSLionel Sambuc   return false;
185f4a2713aSLionel Sambuc }
186f4a2713aSLionel Sambuc 
187f4a2713aSLionel Sambuc bool
unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes) const188f4a2713aSLionel Sambuc AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
189f4a2713aSLionel Sambuc                                     SmallVectorImpl<SDNode*> &NewNodes) const {
190f4a2713aSLionel Sambuc   // TODO: Implement this function
191f4a2713aSLionel Sambuc   return false;
192f4a2713aSLionel Sambuc }
193f4a2713aSLionel Sambuc 
194f4a2713aSLionel Sambuc unsigned
getOpcodeAfterMemoryUnfold(unsigned Opc,bool UnfoldLoad,bool UnfoldStore,unsigned * LoadRegIndex) const195f4a2713aSLionel Sambuc AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
196f4a2713aSLionel Sambuc                                            bool UnfoldLoad, bool UnfoldStore,
197f4a2713aSLionel Sambuc                                            unsigned *LoadRegIndex) const {
198f4a2713aSLionel Sambuc   // TODO: Implement this function
199f4a2713aSLionel Sambuc   return 0;
200f4a2713aSLionel Sambuc }
201f4a2713aSLionel Sambuc 
enableClusterLoads() const202*0a6a1f1dSLionel Sambuc bool AMDGPUInstrInfo::enableClusterLoads() const {
203*0a6a1f1dSLionel Sambuc   return true;
204*0a6a1f1dSLionel Sambuc }
205*0a6a1f1dSLionel Sambuc 
206*0a6a1f1dSLionel Sambuc // FIXME: This behaves strangely. If, for example, you have 32 load + stores,
207*0a6a1f1dSLionel Sambuc // the first 16 loads will be interleaved with the stores, and the next 16 will
208*0a6a1f1dSLionel Sambuc // be clustered as expected. It should really split into 2 16 store batches.
209*0a6a1f1dSLionel Sambuc //
210*0a6a1f1dSLionel Sambuc // Loads are clustered until this returns false, rather than trying to schedule
211*0a6a1f1dSLionel Sambuc // groups of stores. This also means we have to deal with saying different
212*0a6a1f1dSLionel Sambuc // address space loads should be clustered, and ones which might cause bank
213*0a6a1f1dSLionel Sambuc // conflicts.
214*0a6a1f1dSLionel Sambuc //
215*0a6a1f1dSLionel Sambuc // This might be deprecated so it might not be worth that much effort to fix.
shouldScheduleLoadsNear(SDNode * Load0,SDNode * Load1,int64_t Offset0,int64_t Offset1,unsigned NumLoads) const216*0a6a1f1dSLionel Sambuc bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
217*0a6a1f1dSLionel Sambuc                                               int64_t Offset0, int64_t Offset1,
218f4a2713aSLionel Sambuc                                               unsigned NumLoads) const {
219*0a6a1f1dSLionel Sambuc   assert(Offset1 > Offset0 &&
220*0a6a1f1dSLionel Sambuc          "Second offset should be larger than first offset!");
221*0a6a1f1dSLionel Sambuc   // If we have less than 16 loads in a row, and the offsets are within 64
222*0a6a1f1dSLionel Sambuc   // bytes, then schedule together.
223*0a6a1f1dSLionel Sambuc 
224*0a6a1f1dSLionel Sambuc   // A cacheline is 64 bytes (for global memory).
225*0a6a1f1dSLionel Sambuc   return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
226f4a2713aSLionel Sambuc }
227f4a2713aSLionel Sambuc 
228f4a2713aSLionel Sambuc bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const229f4a2713aSLionel Sambuc AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
230f4a2713aSLionel Sambuc   const {
231f4a2713aSLionel Sambuc   // TODO: Implement this function
232f4a2713aSLionel Sambuc   return true;
233f4a2713aSLionel Sambuc }
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const234f4a2713aSLionel Sambuc void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
235f4a2713aSLionel Sambuc                                 MachineBasicBlock::iterator MI) const {
236f4a2713aSLionel Sambuc   // TODO: Implement this function
237f4a2713aSLionel Sambuc }
238f4a2713aSLionel Sambuc 
isPredicated(const MachineInstr * MI) const239f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
240f4a2713aSLionel Sambuc   // TODO: Implement this function
241f4a2713aSLionel Sambuc   return false;
242f4a2713aSLionel Sambuc }
243f4a2713aSLionel Sambuc bool
SubsumesPredicate(const SmallVectorImpl<MachineOperand> & Pred1,const SmallVectorImpl<MachineOperand> & Pred2) const244f4a2713aSLionel Sambuc AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
245f4a2713aSLionel Sambuc                                   const SmallVectorImpl<MachineOperand> &Pred2)
246f4a2713aSLionel Sambuc   const {
247f4a2713aSLionel Sambuc   // TODO: Implement this function
248f4a2713aSLionel Sambuc   return false;
249f4a2713aSLionel Sambuc }
250f4a2713aSLionel Sambuc 
DefinesPredicate(MachineInstr * MI,std::vector<MachineOperand> & Pred) const251f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
252f4a2713aSLionel Sambuc                                       std::vector<MachineOperand> &Pred) const {
253f4a2713aSLionel Sambuc   // TODO: Implement this function
254f4a2713aSLionel Sambuc   return false;
255f4a2713aSLionel Sambuc }
256f4a2713aSLionel Sambuc 
isPredicable(MachineInstr * MI) const257f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
258f4a2713aSLionel Sambuc   // TODO: Implement this function
259f4a2713aSLionel Sambuc   return MI->getDesc().isPredicable();
260f4a2713aSLionel Sambuc }
261f4a2713aSLionel Sambuc 
262f4a2713aSLionel Sambuc bool
isSafeToMoveRegClassDefs(const TargetRegisterClass * RC) const263f4a2713aSLionel Sambuc AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
264f4a2713aSLionel Sambuc   // TODO: Implement this function
265f4a2713aSLionel Sambuc   return true;
266f4a2713aSLionel Sambuc }
267f4a2713aSLionel Sambuc 
isRegisterStore(const MachineInstr & MI) const268f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
269f4a2713aSLionel Sambuc   return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
270f4a2713aSLionel Sambuc }
271f4a2713aSLionel Sambuc 
isRegisterLoad(const MachineInstr & MI) const272f4a2713aSLionel Sambuc bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
273f4a2713aSLionel Sambuc   return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
274f4a2713aSLionel Sambuc }
275f4a2713aSLionel Sambuc 
getIndirectIndexBegin(const MachineFunction & MF) const276f4a2713aSLionel Sambuc int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
277f4a2713aSLionel Sambuc   const MachineRegisterInfo &MRI = MF.getRegInfo();
278f4a2713aSLionel Sambuc   const MachineFrameInfo *MFI = MF.getFrameInfo();
279f4a2713aSLionel Sambuc   int Offset = -1;
280f4a2713aSLionel Sambuc 
281f4a2713aSLionel Sambuc   if (MFI->getNumObjects() == 0) {
282f4a2713aSLionel Sambuc     return -1;
283f4a2713aSLionel Sambuc   }
284f4a2713aSLionel Sambuc 
285f4a2713aSLionel Sambuc   if (MRI.livein_empty()) {
286f4a2713aSLionel Sambuc     return 0;
287f4a2713aSLionel Sambuc   }
288f4a2713aSLionel Sambuc 
289f4a2713aSLionel Sambuc   const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
290f4a2713aSLionel Sambuc   for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
291f4a2713aSLionel Sambuc                                             LE = MRI.livein_end();
292f4a2713aSLionel Sambuc                                             LI != LE; ++LI) {
293f4a2713aSLionel Sambuc     unsigned Reg = LI->first;
294f4a2713aSLionel Sambuc     if (TargetRegisterInfo::isVirtualRegister(Reg) ||
295f4a2713aSLionel Sambuc         !IndirectRC->contains(Reg))
296f4a2713aSLionel Sambuc       continue;
297f4a2713aSLionel Sambuc 
298f4a2713aSLionel Sambuc     unsigned RegIndex;
299f4a2713aSLionel Sambuc     unsigned RegEnd;
300f4a2713aSLionel Sambuc     for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
301f4a2713aSLionel Sambuc                                                           ++RegIndex) {
302f4a2713aSLionel Sambuc       if (IndirectRC->getRegister(RegIndex) == Reg)
303f4a2713aSLionel Sambuc         break;
304f4a2713aSLionel Sambuc     }
305f4a2713aSLionel Sambuc     Offset = std::max(Offset, (int)RegIndex);
306f4a2713aSLionel Sambuc   }
307f4a2713aSLionel Sambuc 
308f4a2713aSLionel Sambuc   return Offset + 1;
309f4a2713aSLionel Sambuc }
310f4a2713aSLionel Sambuc 
getIndirectIndexEnd(const MachineFunction & MF) const311f4a2713aSLionel Sambuc int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
312f4a2713aSLionel Sambuc   int Offset = 0;
313f4a2713aSLionel Sambuc   const MachineFrameInfo *MFI = MF.getFrameInfo();
314f4a2713aSLionel Sambuc 
315f4a2713aSLionel Sambuc   // Variable sized objects are not supported
316f4a2713aSLionel Sambuc   assert(!MFI->hasVarSizedObjects());
317f4a2713aSLionel Sambuc 
318f4a2713aSLionel Sambuc   if (MFI->getNumObjects() == 0) {
319f4a2713aSLionel Sambuc     return -1;
320f4a2713aSLionel Sambuc   }
321f4a2713aSLionel Sambuc 
322*0a6a1f1dSLionel Sambuc   Offset = MF.getTarget()
323*0a6a1f1dSLionel Sambuc                .getSubtargetImpl()
324*0a6a1f1dSLionel Sambuc                ->getFrameLowering()
325*0a6a1f1dSLionel Sambuc                ->getFrameIndexOffset(MF, -1);
326f4a2713aSLionel Sambuc 
327f4a2713aSLionel Sambuc   return getIndirectIndexBegin(MF) + Offset;
328f4a2713aSLionel Sambuc }
329f4a2713aSLionel Sambuc 
getMaskedMIMGOp(uint16_t Opcode,unsigned Channels) const330f4a2713aSLionel Sambuc int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
331f4a2713aSLionel Sambuc   switch (Channels) {
332f4a2713aSLionel Sambuc   default: return Opcode;
333f4a2713aSLionel Sambuc   case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
334f4a2713aSLionel Sambuc   case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
335f4a2713aSLionel Sambuc   case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
336f4a2713aSLionel Sambuc   }
337f4a2713aSLionel Sambuc }
338*0a6a1f1dSLionel Sambuc 
339*0a6a1f1dSLionel Sambuc // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
340*0a6a1f1dSLionel Sambuc // header files, so we need to wrap it in a function that takes unsigned
341*0a6a1f1dSLionel Sambuc // instead.
342*0a6a1f1dSLionel Sambuc namespace llvm {
343*0a6a1f1dSLionel Sambuc namespace AMDGPU {
getMCOpcode(uint16_t Opcode,unsigned Gen)344*0a6a1f1dSLionel Sambuc static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
345*0a6a1f1dSLionel Sambuc   return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
346*0a6a1f1dSLionel Sambuc }
347*0a6a1f1dSLionel Sambuc }
348*0a6a1f1dSLionel Sambuc }
349*0a6a1f1dSLionel Sambuc 
350*0a6a1f1dSLionel Sambuc // This must be kept in sync with the SISubtarget class in SIInstrInfo.td
351*0a6a1f1dSLionel Sambuc enum SISubtarget {
352*0a6a1f1dSLionel Sambuc   SI = 0,
353*0a6a1f1dSLionel Sambuc   VI = 1
354*0a6a1f1dSLionel Sambuc };
355*0a6a1f1dSLionel Sambuc 
AMDGPUSubtargetToSISubtarget(unsigned Gen)356*0a6a1f1dSLionel Sambuc enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
357*0a6a1f1dSLionel Sambuc   switch (Gen) {
358*0a6a1f1dSLionel Sambuc   default:
359*0a6a1f1dSLionel Sambuc     return SI;
360*0a6a1f1dSLionel Sambuc   case AMDGPUSubtarget::VOLCANIC_ISLANDS:
361*0a6a1f1dSLionel Sambuc     return VI;
362*0a6a1f1dSLionel Sambuc   }
363*0a6a1f1dSLionel Sambuc }
364*0a6a1f1dSLionel Sambuc 
pseudoToMCOpcode(int Opcode) const365*0a6a1f1dSLionel Sambuc int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
366*0a6a1f1dSLionel Sambuc   int MCOp = AMDGPU::getMCOpcode(Opcode,
367*0a6a1f1dSLionel Sambuc                         AMDGPUSubtargetToSISubtarget(RI.ST.getGeneration()));
368*0a6a1f1dSLionel Sambuc 
369*0a6a1f1dSLionel Sambuc   // -1 means that Opcode is already a native instruction.
370*0a6a1f1dSLionel Sambuc   if (MCOp == -1)
371*0a6a1f1dSLionel Sambuc     return Opcode;
372*0a6a1f1dSLionel Sambuc 
373*0a6a1f1dSLionel Sambuc   // (uint16_t)-1 means that Opcode is a pseudo instruction that has
374*0a6a1f1dSLionel Sambuc   // no encoding in the given subtarget generation.
375*0a6a1f1dSLionel Sambuc   if (MCOp == (uint16_t)-1)
376*0a6a1f1dSLionel Sambuc     return -1;
377*0a6a1f1dSLionel Sambuc 
378*0a6a1f1dSLionel Sambuc   return MCOp;
379*0a6a1f1dSLionel Sambuc }
380